summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c18
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c61
-rw-r--r--drivers/gpu/drm/drm_fops.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c566
-rw-r--r--drivers/gpu/drm/drm_mm.c40
-rw-r--r--drivers/gpu/drm/drm_stub.c10
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c471
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c794
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c80
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h605
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3764
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c125
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c1343
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c139
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c724
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h193
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c104
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h91
-rw-r--r--drivers/gpu/drm/i915/intel_display.c999
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h22
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c32
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c43
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c8
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c116
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c52
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1007
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h136
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c7
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c14
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/Makefile17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c102
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c319
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c383
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c207
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h425
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c189
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c117
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c171
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c1210
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c426
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c271
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c754
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c300
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.c69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c439
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h113
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c102
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c240
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c645
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c124
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c203
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c244
-rw-r--r--drivers/gpu/drm/nouveau/nv30_fb.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c205
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c27
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c422
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c345
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c114
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c198
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c677
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c375
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c180
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c190
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c140
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c269
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c365
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c705
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h64
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c2874
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c317
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h3
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h48
-rw-r--r--drivers/gpu/drm/radeon/atom.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios.h997
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c57
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c806
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c92
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h53
-rw-r--r--drivers/gpu/drm/radeon/ni.c316
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h86
-rw-r--r--drivers/gpu/drm/radeon/nid.h41
-rw-r--r--drivers/gpu/drm/radeon/r100.c78
-rw-r--r--drivers/gpu/drm/radeon/r100d.h2
-rw-r--r--drivers/gpu/drm/radeon/r300.c21
-rw-r--r--drivers/gpu/drm/radeon/r300d.h1
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/r600.c357
-rw-r--r--drivers/gpu/drm/radeon/r600d.h48
-rw-r--r--drivers/gpu/drm/radeon/radeon.h151
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c153
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h65
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1246
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c391
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c205
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c94
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h13
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace_points.c9
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv51516
-rw-r--r--drivers/gpu/drm/radeon/rs600.c118
-rw-r--r--drivers/gpu/drm/radeon/rv770.c196
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h47
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c156
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c138
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c169
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c3
161 files changed, 23969 insertions, 10138 deletions
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 252fdb98b73..0cb2ba50af5 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_agp_bind_pages);
-void drm_agp_chipset_flush(struct drm_device *dev)
-{
- agp_flush_chipset(dev->agp->bridge);
-}
-EXPORT_SYMBOL(drm_agp_chipset_flush);
-
#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 2d4e17a004d..952b3d4fb2a 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -336,7 +336,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_display_mode *adjusted_mode, saved_mode;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_encoder_helper_funcs *encoder_funcs;
int saved_x, saved_y;
@@ -350,6 +350,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!crtc->enabled)
return true;
+ saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
saved_x = crtc->x;
saved_y = crtc->y;
@@ -427,11 +428,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc);
+
/* XXX free adjustedmode */
drm_mode_destroy(dev, adjusted_mode);
/* FIXME: add subpixel order */
done:
if (!ret) {
+ crtc->hwmode = saved_hwmode;
crtc->mode = saved_mode;
crtc->x = saved_x;
crtc->y = saved_y;
@@ -650,6 +661,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
old_fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
+ set->crtc->fb = old_fb;
ret = -EINVAL;
goto fail;
}
@@ -664,8 +676,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
set->crtc->fb = set->fb;
ret = crtc_funcs->mode_set_base(set->crtc,
set->x, set->y, old_fb);
- if (ret != 0)
+ if (ret != 0) {
+ set->crtc->fb = old_fb;
goto fail;
+ }
}
DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
for (i = 0; i < set->num_connectors; i++) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d2849e4ea4d..0307d601f5e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -607,6 +607,25 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_fini);
+void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
+{
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+ FB_VISUAL_TRUECOLOR;
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 1; /* doing it in hw */
+ info->fix.ypanstep = 1; /* doing it in hw */
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.type_aux = 0;
+
+ info->fix.line_length = fb->pitch;
+ return;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
{
@@ -816,6 +835,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
+ drm_fb_helper_fill_fix(info, fb_helper->fb);
}
mutex_unlock(&dev->mode_config.mutex);
@@ -953,6 +973,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (new_fb) {
info->var.pixclock = 0;
+ drm_fb_helper_fill_fix(info, fb_helper->fb);
if (register_framebuffer(info) < 0) {
return -EINVAL;
}
@@ -979,24 +1000,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
- uint32_t depth)
-{
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
- FB_VISUAL_TRUECOLOR;
- info->fix.type_aux = 0;
- info->fix.xpanstep = 1; /* doing it in hw */
- info->fix.ypanstep = 1; /* doing it in hw */
- info->fix.ywrapstep = 0;
- info->fix.accel = FB_ACCEL_NONE;
- info->fix.type_aux = 0;
-
- info->fix.line_length = pitch;
- return;
-}
-EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
@@ -1005,6 +1008,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
+ info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
@@ -1530,3 +1534,24 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED)
+ * but the module doesn't depend on any fb console symbols. At least
+ * attempt to load fbcon to avoid leaving the system without a usable console.
+ */
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED)
+static int __init drm_fb_helper_modinit(void)
+{
+ const char *name = "fbcon";
+ struct module *fbcon;
+
+ mutex_lock(&module_mutex);
+ fbcon = find_module(name);
+ mutex_unlock(&module_mutex);
+
+ if (!fbcon)
+ request_module_nowait(name);
+ return 0;
+}
+
+module_init(drm_fb_helper_modinit);
+#endif
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index a39794bac04..2ec7d48fc4a 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -236,6 +236,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ return -EINVAL;
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 16d5155edad..0054e957203 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -40,6 +40,22 @@
#include <linux/slab.h>
#include <linux/vgaarb.h>
+
+/* Access macro for slots in vblank timestamp ringbuffer. */
+#define vblanktimestamp(dev, crtc, count) ( \
+ (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
+ ((count) % DRM_VBLANKTIME_RBSIZE)])
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
+ */
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
/**
* Get interrupt from bus id.
*
@@ -77,6 +93,87 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
return 0;
}
+/*
+ * Clear vblank timestamp buffer for a crtc.
+ */
+static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
+{
+ memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
+ DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+}
+
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+ u32 vblcount;
+ s64 diff_ns;
+ int vblrc;
+ struct timeval tvblank;
+
+ /* Prevent vblank irq processing while disabling vblank irqs,
+ * so no updates of timestamps or count can happen after we've
+ * disabled. Needed to prevent races in case of delayed irq's.
+ * Disable preemption, so vblank_time_lock is held as short as
+ * possible, even under a kernel with PREEMPT_RT patches.
+ */
+ preempt_disable();
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ dev->driver->disable_vblank(dev, crtc);
+ dev->vblank_enabled[crtc] = 0;
+
+ /* No further vblank irq's will be processed after
+ * this point. Get current hardware vblank count and
+ * vblank timestamp, repeat until they are consistent.
+ *
+ * FIXME: There is still a race condition here and in
+ * drm_update_vblank_count() which can cause off-by-one
+ * reinitialization of software vblank counter. If gpu
+ * vblank counter doesn't increment exactly at the leading
+ * edge of a vblank interval, then we can lose 1 count if
+ * we happen to execute between start of vblank and the
+ * delayed gpu counter increment.
+ */
+ do {
+ dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+ } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Compute time difference to stored timestamp of last vblank
+ * as updated by last invocation of drm_handle_vblank() in vblank irq.
+ */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* If there is at least 1 msec difference between the last stored
+ * timestamp and tvblank, then we are currently executing our
+ * disable inside a new vblank interval, the tvblank timestamp
+ * corresponds to this new vblank interval and the irq handler
+ * for this vblank didn't run yet and won't run due to our disable.
+ * Therefore we need to do the job of drm_handle_vblank() and
+ * increment the vblank counter by one to account for this vblank.
+ *
+ * Skip this step if there isn't any high precision timestamp
+ * available. In that case we can't account for this and just
+ * hope for the best.
+ */
+ if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+ atomic_inc(&dev->_vblank_count[crtc]);
+
+ /* Invalidate all timestamps while vblank irq's are off. */
+ clear_vblank_timestamps(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ preempt_enable();
+}
+
static void vblank_disable_fn(unsigned long arg)
{
struct drm_device *dev = (struct drm_device *)arg;
@@ -91,10 +188,7 @@ static void vblank_disable_fn(unsigned long arg)
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
- dev->last_vblank[i] =
- dev->driver->get_vblank_counter(dev, i);
- dev->driver->disable_vblank(dev, i);
- dev->vblank_enabled[i] = 0;
+ vblank_disable_and_save(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -117,6 +211,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
kfree(dev->last_vblank);
kfree(dev->last_vblank_wait);
kfree(dev->vblank_inmodeset);
+ kfree(dev->_vblank_time);
dev->num_crtcs = 0;
}
@@ -129,6 +224,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
(unsigned long)dev);
spin_lock_init(&dev->vbl_lock);
+ spin_lock_init(&dev->vblank_time_lock);
+
dev->num_crtcs = num_crtcs;
dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
@@ -161,6 +258,19 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
if (!dev->vblank_inmodeset)
goto err;
+ dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
+ sizeof(struct timeval), GFP_KERNEL);
+ if (!dev->_vblank_time)
+ goto err;
+
+ DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+
+ /* Driver specific high-precision vblank timestamping supported? */
+ if (dev->driver->get_vblank_timestamp)
+ DRM_INFO("Driver supports precise vblank timestamp query.\n");
+ else
+ DRM_INFO("No driver support for vblank timestamp query.\n");
+
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
init_waitqueue_head(&dev->vbl_queue[i]);
@@ -279,7 +389,7 @@ EXPORT_SYMBOL(drm_irq_install);
*
* Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
*/
-int drm_irq_uninstall(struct drm_device * dev)
+int drm_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
int irq_enabled, i;
@@ -335,7 +445,9 @@ int drm_control(struct drm_device *dev, void *data,
{
struct drm_control *ctl = data;
- /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
+ /* if we haven't irq we fallback for compatibility reasons -
+ * this used to be a separate function in drm_dma.h
+ */
switch (ctl->func) {
@@ -360,6 +472,287 @@ int drm_control(struct drm_device *dev, void *data,
}
/**
+ * drm_calc_timestamping_constants - Calculate and
+ * store various constants which are later needed by
+ * vblank and swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ * They are derived from crtc's true scanout timing,
+ * so they take things like panel scaling or other
+ * adjustments into account.
+ *
+ * @crtc drm_crtc whose timestamp constants should be updated.
+ *
+ */
+void drm_calc_timestamping_constants(struct drm_crtc *crtc)
+{
+ s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+ u64 dotclock;
+
+ /* Dot clock in Hz: */
+ dotclock = (u64) crtc->hwmode.clock * 1000;
+
+ /* Valid dotclock? */
+ if (dotclock > 0) {
+ /* Convert scanline length in pixels and video dot clock to
+ * line duration, frame duration and pixel duration in
+ * nanoseconds:
+ */
+ pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
+ linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
+ 1000000000), dotclock);
+ framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
+ } else
+ DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+ crtc->base.id);
+
+ crtc->pixeldur_ns = pixeldur_ns;
+ crtc->linedur_ns = linedur_ns;
+ crtc->framedur_ns = framedur_ns;
+
+ DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ crtc->base.id, crtc->hwmode.crtc_htotal,
+ crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+ DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+ crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
+ (int) linedur_ns, (int) pixeldur_ns);
+}
+EXPORT_SYMBOL(drm_calc_timestamping_constants);
+
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
+ * drivers. Implements calculation of exact vblank timestamps from
+ * given drm_display_mode timings and current video scanout position
+ * of a crtc. This can be called from within get_vblank_timestamp()
+ * implementation of a kms driver to implement the actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * @dev: DRM device.
+ * @crtc: Which crtc's vblank timestamp to retrieve.
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs).
+ * On return contains true maximum error of timestamp.
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp.
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ *
+ * Returns negative value on error, failure or if not supported in current
+ * video mode:
+ *
+ * -EINVAL - Invalid crtc.
+ * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO - Failed, e.g., due to failed scanout position query.
+ *
+ * Returns or'ed positive status flags on success:
+ *
+ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
+ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
+ *
+ */
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ struct drm_crtc *refcrtc)
+{
+ struct timeval stime, raw_time;
+ struct drm_display_mode *mode;
+ int vbl_status, vtotal, vdisplay;
+ int vpos, hpos, i;
+ s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+ bool invbl;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Scanout position query not supported? Should not happen. */
+ if (!dev->driver->get_scanout_position) {
+ DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ return -EIO;
+ }
+
+ mode = &refcrtc->hwmode;
+ vtotal = mode->crtc_vtotal;
+ vdisplay = mode->crtc_vdisplay;
+
+ /* Durations of frames, lines, pixels in nanoseconds. */
+ framedur_ns = refcrtc->framedur_ns;
+ linedur_ns = refcrtc->linedur_ns;
+ pixeldur_ns = refcrtc->pixeldur_ns;
+
+ /* If mode timing undefined, just return as no-op:
+ * Happens during initial modesetting of a crtc.
+ */
+ if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+ DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+ return -EAGAIN;
+ }
+
+ /* Don't know yet how to handle interlaced or
+ * double scan modes. Just no-op for now.
+ */
+ if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
+ return -ENOTSUPP;
+ }
+
+ /* Get current scanout position with system timestamp.
+ * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+ * if single query takes longer than max_error nanoseconds.
+ *
+ * This guarantees a tight bound on maximum error if
+ * code gets preempted or delayed for some reason.
+ */
+ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+ /* Disable preemption to make it very likely to
+ * succeed in the first iteration even on PREEMPT_RT kernel.
+ */
+ preempt_disable();
+
+ /* Get system timestamp before query. */
+ do_gettimeofday(&stime);
+
+ /* Get vertical and horizontal scanout pos. vpos, hpos. */
+ vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
+
+ /* Get system timestamp after query. */
+ do_gettimeofday(&raw_time);
+
+ preempt_enable();
+
+ /* Return as no-op if scanout query unsupported or failed. */
+ if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+ DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+ crtc, vbl_status);
+ return -EIO;
+ }
+
+ duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+
+ /* Accept result with < max_error nsecs timing uncertainty. */
+ if (duration_ns <= (s64) *max_error)
+ break;
+ }
+
+ /* Noisy system timing? */
+ if (i == DRM_TIMESTAMP_MAXRETRIES) {
+ DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
+ crtc, (int) duration_ns/1000, *max_error/1000, i);
+ }
+
+ /* Return upper bound of timestamp precision error. */
+ *max_error = (int) duration_ns;
+
+ /* Check if in vblank area:
+ * vpos is >=0 in video scanout area, but negative
+ * within vblank area, counting down the number of lines until
+ * start of scanout.
+ */
+ invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+
+ /* Convert scanout position into elapsed time at raw_time query
+ * since start of scanout at first display scanline. delta_ns
+ * can be negative if start of scanout hasn't happened yet.
+ */
+ delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
+
+ /* Is vpos outside nominal vblank area, but less than
+ * 1/100 of a frame height away from start of vblank?
+ * If so, assume this isn't a massively delayed vblank
+ * interrupt, but a vblank interrupt that fired a few
+ * microseconds before true start of vblank. Compensate
+ * by adding a full frame duration to the final timestamp.
+ * Happens, e.g., on ATI R500, R600.
+ *
+ * We only do this if DRM_CALLED_FROM_VBLIRQ.
+ */
+ if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
+ ((vdisplay - vpos) < vtotal / 100)) {
+ delta_ns = delta_ns - framedur_ns;
+
+ /* Signal this correction as "applied". */
+ vbl_status |= 0x8;
+ }
+
+ /* Subtract time delta from raw timestamp to get final
+ * vblank_time timestamp for end of vblank.
+ */
+ *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+
+ DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n",
+ crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec,
+ raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec,
+ (int) duration_ns/1000, i);
+
+ vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+ if (invbl)
+ vbl_status |= DRM_VBLANKTIME_INVBL;
+
+ return vbl_status;
+}
+EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval.
+ *
+ * @dev: DRM device
+ * @crtc: which crtc's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ * 0 = Default.
+ * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified crtc. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns non-zero if timestamp is considered to be very precise.
+ */
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags)
+{
+ int ret = 0;
+
+ /* Define requested maximum error on timestamps (nanoseconds). */
+ int max_error = (int) drm_timestamp_precision * 1000;
+
+ /* Query driver if possible and precision timestamping enabled. */
+ if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
+ ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+ tvblank, flags);
+ if (ret > 0)
+ return (u32) ret;
+ }
+
+ /* GPU high precision timestamp query unsupported or failed.
+ * Return gettimeofday timestamp as best estimate.
+ */
+ do_gettimeofday(tvblank);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_get_last_vbltimestamp);
+
+/**
* drm_vblank_count - retrieve "cooked" vblank counter value
* @dev: DRM device
* @crtc: which counter to retrieve
@@ -375,6 +768,40 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(drm_vblank_count);
/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value.
+ *
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current value vblank counter
+ * value.
+ */
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime)
+{
+ u32 cur_vblank;
+
+ /* Read timestamp from slot of _vblank_time ringbuffer
+ * that corresponds to current vblank count. Retry if
+ * count has incremented during readout. This works like
+ * a seqlock.
+ */
+ do {
+ cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+ *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+ smp_rmb();
+ } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+
+ return cur_vblank;
+}
+EXPORT_SYMBOL(drm_vblank_count_and_time);
+
+/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
* @crtc: counter to update
@@ -392,7 +819,8 @@ EXPORT_SYMBOL(drm_vblank_count);
*/
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
- u32 cur_vblank, diff;
+ u32 cur_vblank, diff, tslot, rc;
+ struct timeval t_vblank;
/*
* Interrupts were disabled prior to this call, so deal with counter
@@ -400,8 +828,18 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
+ *
+ * We repeat the hardware vblank counter & timestamp query until
+ * we get consistent results. This to prevent races between gpu
+ * updating its hardware counter while we are retrieving the
+ * corresponding vblank timestamp.
*/
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ do {
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+ } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Deal with counter wrap */
diff = cur_vblank - dev->last_vblank[crtc];
if (cur_vblank < dev->last_vblank[crtc]) {
diff += dev->max_vblank_count;
@@ -413,6 +851,16 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
+ /* Reinitialize corresponding vblank timestamp if high-precision query
+ * available. Skip this step if query unsupported or failed. Will
+ * reinitialize delayed at next vblank interrupt in that case.
+ */
+ if (rc) {
+ tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+ vblanktimestamp(dev, crtc, tslot) = t_vblank;
+ smp_wmb();
+ }
+
atomic_add(diff, &dev->_vblank_count[crtc]);
}
@@ -429,15 +877,27 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
- unsigned long irqflags;
+ unsigned long irqflags, irqflags2;
int ret = 0;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ /* Disable preemption while holding vblank_time_lock. Do
+ * it explicitely to guard against PREEMPT_RT kernel.
+ */
+ preempt_disable();
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
if (!dev->vblank_enabled[crtc]) {
+ /* Enable vblank irqs under vblank_time_lock protection.
+ * All vblank count & timestamp updates are held off
+ * until we are done reinitializing master counter and
+ * timestamps. Filtercode in drm_handle_vblank() will
+ * prevent double-accounting of same vblank interval.
+ */
ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
+ crtc, ret);
if (ret)
atomic_dec(&dev->vblank_refcount[crtc]);
else {
@@ -445,6 +905,8 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
drm_update_vblank_count(dev, crtc);
}
}
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+ preempt_enable();
} else {
if (!dev->vblank_enabled[crtc]) {
atomic_dec(&dev->vblank_refcount[crtc]);
@@ -463,15 +925,17 @@ EXPORT_SYMBOL(drm_vblank_get);
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
- * if possible.
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
- BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
+ BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
/* Last user schedules interrupt disable */
- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
- mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+ if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+ (drm_vblank_offdelay > 0))
+ mod_timer(&dev->vblank_disable_timer,
+ jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
}
EXPORT_SYMBOL(drm_vblank_put);
@@ -480,10 +944,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
unsigned long irqflags;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->driver->disable_vblank(dev, crtc);
+ vblank_disable_and_save(dev, crtc);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
- dev->vblank_enabled[crtc] = 0;
- dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_off);
@@ -602,7 +1064,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->base.file_priv = file_priv;
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
- do_gettimeofday(&now);
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
@@ -611,7 +1072,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
}
file_priv->event_space -= sizeof e->event;
- seq = drm_vblank_count(dev, pipe);
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+
if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1 << 23)) {
vblwait->request.sequence = seq + 1;
@@ -626,15 +1088,18 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ e->event.sequence = seq;
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, pipe);
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
+ vblwait->reply.sequence = seq;
trace_drm_vblank_event_delivered(current->pid, pipe,
vblwait->request.sequence);
} else {
list_add_tail(&e->base.link, &dev->vblank_event_list);
+ vblwait->reply.sequence = vblwait->request.sequence;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -727,11 +1192,10 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
if (ret != -EINTR) {
struct timeval now;
- do_gettimeofday(&now);
-
+ vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
- vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+
DRM_DEBUG("returning %d to client\n",
vblwait->reply.sequence);
} else {
@@ -750,8 +1214,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
unsigned long flags;
unsigned int seq;
- do_gettimeofday(&now);
- seq = drm_vblank_count(dev, crtc);
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
spin_lock_irqsave(&dev->event_lock, flags);
@@ -789,11 +1252,64 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
*/
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
+ u32 vblcount;
+ s64 diff_ns;
+ struct timeval tvblank;
+ unsigned long irqflags;
+
if (!dev->num_crtcs)
return;
- atomic_inc(&dev->_vblank_count[crtc]);
+ /* Need timestamp lock to prevent concurrent execution with
+ * vblank enable/disable, as this would cause inconsistent
+ * or corrupted timestamps and vblank counts.
+ */
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ /* Vblank irq handling disabled. Nothing to do. */
+ if (!dev->vblank_enabled[crtc]) {
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ return;
+ }
+
+ /* Fetch corresponding timestamp for this vblank interval from
+ * driver and store it in proper slot of timestamp ringbuffer.
+ */
+
+ /* Get current timestamp and count. */
+ vblcount = atomic_read(&dev->_vblank_count[crtc]);
+ drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+ /* Compute time difference to timestamp of last vblank */
+ diff_ns = timeval_to_ns(&tvblank) -
+ timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+ /* Update vblank timestamp and count if at least
+ * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
+ * difference between last stored timestamp and current
+ * timestamp. A smaller difference means basically
+ * identical timestamps. Happens if this vblank has
+ * been already processed and this is a redundant call,
+ * e.g., due to spurious vblank interrupts. We need to
+ * ignore those for accounting.
+ */
+ if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ /* Store new timestamp in ringbuffer. */
+ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+ smp_wmb();
+
+ /* Increment cooked vblank count. This also atomically commits
+ * the timestamp computed above.
+ */
+ atomic_inc(&dev->_vblank_count[crtc]);
+ } else {
+ DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+ crtc, (int) diff_ns);
+ }
+
DRM_WAKEUP(&dev->vbl_queue[crtc]);
drm_handle_vblank_events(dev, crtc);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a6bfc302ed9..c59515ba7e6 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -392,10 +392,36 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
+ mm->scan_check_range = 0;
}
EXPORT_SYMBOL(drm_mm_init_scan);
/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. This version is for range-restricted scans.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_size = 0;
+ mm->scan_start = start;
+ mm->scan_end = end;
+ mm->scan_check_range = 1;
+}
+EXPORT_SYMBOL(drm_mm_init_scan_with_range);
+
+/**
* Add a node to the scan list that might be freed to make space for the desired
* hole.
*
@@ -406,6 +432,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct list_head *prev_free, *next_free;
struct drm_mm_node *prev_node, *next_node;
+ unsigned long adj_start;
+ unsigned long adj_end;
mm->scanned_blocks++;
@@ -452,7 +480,17 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->free_stack.prev = prev_free;
node->free_stack.next = next_free;
- if (check_free_hole(node->start, node->start + node->size,
+ if (mm->scan_check_range) {
+ adj_start = node->start < mm->scan_start ?
+ mm->scan_start : node->start;
+ adj_end = node->start + node->size > mm->scan_end ?
+ mm->scan_end : node->start + node->size;
+ } else {
+ adj_start = node->start;
+ adj_end = node->start + node->size;
+ }
+
+ if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = node->start;
mm->scan_hit_size = node->size;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index cdc89ee042c..d59edc18301 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -40,12 +40,22 @@
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
+unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
+EXPORT_SYMBOL(drm_vblank_offdelay);
+
+unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
+EXPORT_SYMBOL(drm_timestamp_precision);
+
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
module_param_named(debug, drm_debug, int, 0600);
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
struct idr drm_minors_idr;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fdc833d5cc7..0ae6a7c5020 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_evict.o \
+ i915_gem_execbuffer.o \
+ i915_gem_gtt.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1f4f3ceb63c..92f75782c33 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
#include "drmP.h"
#include "drm.h"
#include "intel_drv.h"
+#include "intel_ringbuffer.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -72,7 +73,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
B(is_broadwater);
B(is_crestline);
B(has_fbc);
- B(has_rc6);
B(has_pipe_cxsr);
B(has_hotplug);
B(cursor_needs_physical);
@@ -86,19 +86,19 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
- if (obj_priv->user_pin_count > 0)
+ if (obj->user_pin_count > 0)
return "P";
- else if (obj_priv->pin_count > 0)
+ else if (obj->pin_count > 0)
return "p";
else
return " ";
}
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
{
- switch (obj_priv->tiling_mode) {
+ switch (obj->tiling_mode) {
default:
case I915_TILING_NONE: return " ";
case I915_TILING_X: return "X";
@@ -109,7 +109,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+ seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
@@ -117,6 +117,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.read_domains,
obj->base.write_domain,
obj->last_rendering_seqno,
+ obj->last_fenced_seqno,
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
@@ -124,7 +125,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
if (obj->gtt_space != NULL)
- seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+ seq_printf(m, " (gtt offset: %08x, size: %08x)",
+ obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ if (obj->pin_mappable || obj->fault_mappable) {
+ char s[3], *t = s;
+ if (obj->pin_mappable)
+ *t++ = 'p';
+ if (obj->fault_mappable)
+ *t++ = 'f';
+ *t = '\0';
+ seq_printf(m, " (%s mappable)", s);
+ }
if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name);
}
@@ -136,7 +147,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct list_head *head;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
int count, ret;
@@ -171,12 +182,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj_priv, head, mm_list) {
+ list_for_each_entry(obj, head, mm_list) {
seq_printf(m, " ");
- describe_obj(m, obj_priv);
+ describe_obj(m, obj);
seq_printf(m, "\n");
- total_obj_size += obj_priv->base.size;
- total_gtt_size += obj_priv->gtt_space->size;
+ total_obj_size += obj->base.size;
+ total_gtt_size += obj->gtt_space->size;
count++;
}
mutex_unlock(&dev->struct_mutex);
@@ -186,24 +197,79 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
return 0;
}
+#define count_objects(list, member) do { \
+ list_for_each_entry(obj, list, member) { \
+ size += obj->gtt_space->size; \
+ ++count; \
+ if (obj->map_and_fenceable) { \
+ mappable_size += obj->gtt_space->size; \
+ ++mappable_count; \
+ } \
+ } \
+} while(0)
+
static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 count, mappable_count;
+ size_t size, mappable_size;
+ struct drm_i915_gem_object *obj;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
- seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
- seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
- seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
- seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
- seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
- seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+ seq_printf(m, "%u objects, %zu bytes\n",
+ dev_priv->mm.object_count,
+ dev_priv->mm.object_memory);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.gtt_list, gtt_list);
+ seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.active_list, mm_list);
+ count_objects(&dev_priv->mm.flushing_list, mm_list);
+ seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.pinned_list, mm_list);
+ seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.inactive_list, mm_list);
+ seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.deferred_free_list, mm_list);
+ seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (obj->fault_mappable) {
+ size += obj->gtt_space->size;
+ ++count;
+ }
+ if (obj->pin_mappable) {
+ mappable_size += obj->gtt_space->size;
+ ++mappable_count;
+ }
+ }
+ seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+ mappable_count, mappable_size);
+ seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+ count, size);
+
+ seq_printf(m, "%zu [%zu] gtt total\n",
+ dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
mutex_unlock(&dev->struct_mutex);
@@ -243,14 +309,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "%d prepares\n", work->pending);
if (work->old_fb_obj) {
- struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
- if(obj_priv)
- seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ struct drm_i915_gem_object *obj = work->old_fb_obj;
+ if (obj)
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
}
if (work->pending_flip_obj) {
- struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
- if(obj_priv)
- seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ struct drm_i915_gem_object *obj = work->pending_flip_obj;
+ if (obj)
+ seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -265,44 +331,80 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
- int ret;
+ int ret, count;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- seq_printf(m, "Request:\n");
- list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
- list) {
- seq_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
+ count = 0;
+ if (!list_empty(&dev_priv->ring[RCS].request_list)) {
+ seq_printf(m, "Render requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[RCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
+ }
+ if (!list_empty(&dev_priv->ring[VCS].request_list)) {
+ seq_printf(m, "BSD requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[VCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
+ }
+ if (!list_empty(&dev_priv->ring[BCS].request_list)) {
+ seq_printf(m, "BLT requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[BCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
}
mutex_unlock(&dev->struct_mutex);
+ if (count == 0)
+ seq_printf(m, "No requests\n");
+
return 0;
}
+static void i915_ring_seqno_info(struct seq_file *m,
+ struct intel_ring_buffer *ring)
+{
+ if (ring->get_seqno) {
+ seq_printf(m, "Current sequence (%s): %d\n",
+ ring->name, ring->get_seqno(ring));
+ seq_printf(m, "Waiter sequence (%s): %d\n",
+ ring->name, ring->waiting_seqno);
+ seq_printf(m, "IRQ sequence (%s): %d\n",
+ ring->name, ring->irq_seqno);
+ }
+}
+
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- if (dev_priv->render_ring.status_page.page_addr != NULL) {
- seq_printf(m, "Current sequence: %d\n",
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
- } else {
- seq_printf(m, "Current sequence: hws uninitialized\n");
- }
- seq_printf(m, "Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_ring_seqno_info(m, &dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
@@ -315,7 +417,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -354,16 +456,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- if (dev_priv->render_ring.status_page.page_addr != NULL) {
- seq_printf(m, "Current sequence: %d\n",
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
- } else {
- seq_printf(m, "Current sequence: hws uninitialized\n");
- }
- seq_printf(m, "Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- seq_printf(m, "IRQ sequence: %d\n",
- dev_priv->mm.irq_gem_seqno);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_ring_seqno_info(m, &dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -383,29 +477,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
+ struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
- if (obj == NULL) {
- seq_printf(m, "Fenced object[%2d] = unused\n", i);
- } else {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(obj);
- seq_printf(m, "Fenced object[%2d] = %p: %s "
- "%08x %08zx %08x %s %08x %08x %d",
- i, obj, get_pin_flag(obj_priv),
- obj_priv->gtt_offset,
- obj->size, obj_priv->stride,
- get_tiling_flag(obj_priv),
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- if (obj->name)
- seq_printf(m, " (name: %d)", obj->name);
- seq_printf(m, "\n");
- }
+ seq_printf(m, "Fenced object[%2d] = ", i);
+ if (obj == NULL)
+ seq_printf(m, "unused");
+ else
+ describe_obj(m, obj);
+ seq_printf(m, "\n");
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -414,10 +496,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ struct intel_ring_buffer *ring;
volatile u32 *hws;
+ int i;
- hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ hws = (volatile u32 *)ring->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -431,14 +515,14 @@ static int i915_hws_info(struct seq_file *m, void *data)
static void i915_dump_object(struct seq_file *m,
struct io_mapping *mapping,
- struct drm_i915_gem_object *obj_priv)
+ struct drm_i915_gem_object *obj)
{
int page, page_count, i;
- page_count = obj_priv->base.size / PAGE_SIZE;
+ page_count = obj->base.size / PAGE_SIZE;
for (page = 0; page < page_count; page++) {
u32 *mem = io_mapping_map_wc(mapping,
- obj_priv->gtt_offset + page * PAGE_SIZE);
+ obj->gtt_offset + page * PAGE_SIZE);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
io_mapping_unmap(mem);
@@ -450,25 +534,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- obj = &obj_priv->base;
- if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- seq_printf(m, "--- gtt_offset = 0x%08x\n",
- obj_priv->gtt_offset);
- i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
+ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+ i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
}
}
mutex_unlock(&dev->struct_mutex);
-
return 0;
}
@@ -477,19 +557,21 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- if (!dev_priv->render_ring.gem_object) {
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ if (!ring->obj) {
seq_printf(m, "No ringbuffer setup\n");
} else {
- u8 *virt = dev_priv->render_ring.virtual_start;
+ u8 *virt = ring->virtual_start;
uint32_t off;
- for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+ for (off = 0; off < ring->size; off += 4) {
uint32_t *ptr = (uint32_t *)(virt + off);
seq_printf(m, "%08x : %08x\n", off, *ptr);
}
@@ -504,19 +586,38 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned int head, tail;
+ struct intel_ring_buffer *ring;
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ if (ring->size == 0)
+ return 0;
- seq_printf(m, "RingHead : %08x\n", head);
- seq_printf(m, "RingTail : %08x\n", tail);
- seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
- seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
+ seq_printf(m, "Ring %s:\n", ring->name);
+ seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
+ seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
+ seq_printf(m, " Size : %08x\n", ring->size);
+ seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
+ seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
+ if (IS_GEN6(dev)) {
+ seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
+ seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
+ }
+ seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
+ seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
return 0;
}
+static const char *ring_str(int ring)
+{
+ switch (ring) {
+ case RING_RENDER: return " render";
+ case RING_BSD: return " bsd";
+ case RING_BLT: return " blt";
+ default: return "";
+ }
+}
+
static const char *pin_flag(int pinned)
{
if (pinned > 0)
@@ -547,6 +648,36 @@ static const char *purgeable_flag(int purgeable)
return purgeable ? " purgeable" : "";
}
+static void print_error_buffers(struct seq_file *m,
+ const char *name,
+ struct drm_i915_error_buffer *err,
+ int count)
+{
+ seq_printf(m, "%s [%d]:\n", name, count);
+
+ while (count--) {
+ seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s",
+ err->gtt_offset,
+ err->size,
+ err->read_domains,
+ err->write_domain,
+ err->seqno,
+ pin_flag(err->pinned),
+ tiling_flag(err->tiling),
+ dirty_flag(err->dirty),
+ purgeable_flag(err->purgeable),
+ ring_str(err->ring));
+
+ if (err->name)
+ seq_printf(m, " (name: %d)", err->name);
+ if (err->fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", err->fence_reg);
+
+ seq_printf(m, "\n");
+ err++;
+ }
+}
+
static int i915_error_state(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -568,41 +699,46 @@ static int i915_error_state(struct seq_file *m, void *unused)
error->time.tv_usec);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
- seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
+ seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ seq_printf(m, "ERROR: 0x%08x\n", error->error);
+ seq_printf(m, "Blitter command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
+ seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
+ seq_printf(m, "Video (BSD) command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
+ seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
+ }
+ seq_printf(m, "Render command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
- seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
if (INTEL_INFO(dev)->gen >= 4) {
- seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
}
- seq_printf(m, "seqno: 0x%08x\n", error->seqno);
-
- if (error->active_bo_count) {
- seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
-
- for (i = 0; i < error->active_bo_count; i++) {
- seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
- error->active_bo[i].gtt_offset,
- error->active_bo[i].size,
- error->active_bo[i].read_domains,
- error->active_bo[i].write_domain,
- error->active_bo[i].seqno,
- pin_flag(error->active_bo[i].pinned),
- tiling_flag(error->active_bo[i].tiling),
- dirty_flag(error->active_bo[i].dirty),
- purgeable_flag(error->active_bo[i].purgeable));
-
- if (error->active_bo[i].name)
- seq_printf(m, " (name: %d)", error->active_bo[i].name);
- if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
- seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
-
- seq_printf(m, "\n");
- }
- }
+ seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
+ seq_printf(m, " seqno: 0x%08x\n", error->seqno);
+
+ for (i = 0; i < 16; i++)
+ seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+
+ if (error->active_bo)
+ print_error_buffers(m, "Active",
+ error->active_bo,
+ error->active_bo_count);
+
+ if (error->pinned_bo)
+ print_error_buffers(m, "Pinned",
+ error->pinned_bo,
+ error->pinned_bo_count);
for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
if (error->batchbuffer[i]) {
@@ -635,6 +771,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
+ if (error->display)
+ intel_display_print_error_state(m, dev, error->display);
+
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -658,15 +797,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
- u16 rgvstat = I915_READ16(MEMSTAT_ILK);
- seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
- seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
- seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
- MEMSTAT_VID_SHIFT);
- seq_printf(m, "Current P-state: %d\n",
- (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ if (IS_GEN5(dev)) {
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ seq_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ } else if (IS_GEN6(dev)) {
+ u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ int max_freq;
+
+ /* RPSTAT1 is in the GT power well */
+ __gen6_force_wake_get(dev_priv);
+
+ seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
+ seq_printf(m, "Render p-state ratio: %d\n",
+ (gt_perf_status & 0xff00) >> 8);
+ seq_printf(m, "Render p-state VID: %d\n",
+ gt_perf_status & 0xff);
+ seq_printf(m, "Render p-state limit: %d\n",
+ rp_state_limits & 0xff);
+
+ max_freq = (rp_state_cap & 0xff0000) >> 16;
+ seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ max_freq * 100);
+
+ max_freq = (rp_state_cap & 0xff00) >> 8;
+ seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ max_freq * 100);
+
+ max_freq = rp_state_cap & 0xff;
+ seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ max_freq * 100);
+
+ __gen6_force_wake_put(dev_priv);
+ } else {
+ seq_printf(m, "no P-state info available\n");
+ }
return 0;
}
@@ -794,7 +969,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
bool sr_enabled = false;
- if (IS_GEN5(dev))
+ if (HAS_PCH_SPLIT(dev))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
@@ -886,7 +1061,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel);
- describe_obj(m, to_intel_bo(fb->obj));
+ describe_obj(m, fb->obj);
seq_printf(m, "\n");
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
@@ -898,7 +1073,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel);
- describe_obj(m, to_intel_bo(fb->obj));
+ describe_obj(m, fb->obj);
seq_printf(m, "\n");
}
@@ -943,7 +1118,6 @@ i915_wedged_write(struct file *filp,
loff_t *ppos)
{
struct drm_device *dev = filp->private_data;
- drm_i915_private_t *dev_priv = dev->dev_private;
char buf[20];
int val = 1;
@@ -959,12 +1133,7 @@ i915_wedged_write(struct file *filp,
}
DRM_INFO("Manually setting wedged to %d\n", val);
-
- atomic_set(&dev_priv->mm.wedged, val);
- if (val) {
- wake_up_all(&dev_priv->irq_queue);
- queue_work(dev_priv->wq, &dev_priv->error_work);
- }
+ i915_handle_error(dev, val);
return cnt;
}
@@ -1028,9 +1197,15 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_gem_hws", i915_hws_info, 0},
- {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
- {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
+ {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
+ {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
+ {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
+ {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
+ {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
+ {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
+ {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
+ {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
+ {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
{"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index cb900dc83d9..0568dbdc10e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,6 +50,8 @@
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
@@ -58,11 +60,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- dev_priv->render_ring.status_page.page_addr
- = dev_priv->status_page_dmah->vaddr;
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
- memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -80,13 +81,15 @@ static int i915_init_phys_hws(struct drm_device *dev)
static void i915_free_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
}
- if (dev_priv->render_ring.status_page.gfx_addr) {
- dev_priv->render_ring.status_page.gfx_addr = 0;
+ if (ring->status_page.gfx_addr) {
+ ring->status_page.gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
}
@@ -98,7 +101,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
/*
* We should never lose context on the ring with modesetting
@@ -107,8 +110,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
@@ -124,6 +127,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
static int i915_dma_cleanup(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
@@ -132,9 +137,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
/* Clear the HWS virtual address at teardown */
@@ -148,6 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
master_priv->sarea = drm_getsarea(dev);
if (master_priv->sarea) {
@@ -158,24 +163,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
if (init->ring_size != 0) {
- if (dev_priv->render_ring.gem_object != NULL) {
+ if (ring->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
- dev_priv->render_ring.size = init->ring_size;
+ ring->size = init->ring_size;
- dev_priv->render_ring.map.offset = init->ring_start;
- dev_priv->render_ring.map.size = init->ring_size;
- dev_priv->render_ring.map.type = 0;
- dev_priv->render_ring.map.flags = 0;
- dev_priv->render_ring.map.mtrr = 0;
+ ring->map.offset = init->ring_start;
+ ring->map.size = init->ring_size;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
- drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
+ drm_core_ioremap_wc(&ring->map, dev);
- if (dev_priv->render_ring.map.handle == NULL) {
+ if (ring->map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -183,7 +188,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
}
- dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
+ ring->virtual_start = ring->map.handle;
dev_priv->cpp = init->cpp;
dev_priv->back_offset = init->back_offset;
@@ -202,12 +207,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
- struct intel_ring_buffer *ring;
DRM_DEBUG_DRIVER("%s\n", __func__);
- ring = &dev_priv->render_ring;
-
if (ring->map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -222,7 +225,7 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG_DRIVER("hw status page @ %p\n",
ring->status_page.page_addr);
if (ring->status_page.gfx_addr != 0)
- intel_ring_setup_status_page(dev, ring);
+ intel_ring_setup_status_page(ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
@@ -264,7 +267,7 @@ static int i915_dma_init(struct drm_device *dev, void *data,
* instruction detected will be given a size of zero, which is a
* signal to abort the rest of the buffer.
*/
-static int do_validate_cmd(int cmd)
+static int validate_cmd(int cmd)
{
switch (((cmd >> 29) & 0x7)) {
case 0x0:
@@ -322,40 +325,27 @@ static int do_validate_cmd(int cmd)
return 0;
}
-static int validate_cmd(int cmd)
-{
- int ret = do_validate_cmd(cmd);
-
-/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
-
- return ret;
-}
-
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ int i, ret;
- if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
+ if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
return -EINVAL;
- BEGIN_LP_RING((dwords+1)&~1);
-
for (i = 0; i < dwords;) {
- int cmd, sz;
-
- cmd = buffer[i];
-
- if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+ int sz = validate_cmd(buffer[i]);
+ if (sz == 0 || i + sz > dwords)
return -EINVAL;
-
- OUT_RING(cmd);
-
- while (++i, --sz) {
- OUT_RING(buffer[i]);
- }
+ i += sz;
}
+ ret = BEGIN_LP_RING((dwords+1)&~1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dwords; i++)
+ OUT_RING(buffer[i]);
if (dwords & 1)
OUT_RING(0);
@@ -366,34 +356,41 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
int
i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *boxes,
- int i, int DR1, int DR4)
+ struct drm_clip_rect *box,
+ int DR1, int DR4)
{
- struct drm_clip_rect box = boxes[i];
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
- if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+ if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+ box->y2 <= 0 || box->x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
- box.x1, box.y1, box.x2, box.y2);
+ box->x1, box->y1, box->x2, box->y2);
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
- ADVANCE_LP_RING();
} else {
- BEGIN_LP_RING(6);
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ return ret;
+
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(DR1);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
OUT_RING(0);
- ADVANCE_LP_RING();
}
+ ADVANCE_LP_RING();
return 0;
}
@@ -413,12 +410,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
}
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
@@ -440,7 +438,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
- ret = i915_emit_box(dev, cliprects, i,
+ ret = i915_emit_box(dev, &cliprects[i],
cmd->DR1, cmd->DR4);
if (ret)
return ret;
@@ -459,8 +457,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects;
- int i = 0, count;
+ int i, count, ret;
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
@@ -470,17 +469,19 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
i915_kernel_lost_context(dev);
count = nbox ? nbox : 1;
-
for (i = 0; i < count; i++) {
if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- batch->DR1, batch->DR4);
+ ret = i915_emit_box(dev, &cliprects[i],
+ batch->DR1, batch->DR4);
if (ret)
return ret;
}
if (!IS_I830(dev) && !IS_845G(dev)) {
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret)
+ return ret;
+
if (INTEL_INFO(dev)->gen >= 4) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
@@ -488,26 +489,29 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
- ADVANCE_LP_RING();
} else {
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
OUT_RING(MI_BATCH_BUFFER);
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
OUT_RING(batch->start + batch->used - 4);
OUT_RING(0);
- ADVANCE_LP_RING();
}
+ ADVANCE_LP_RING();
}
if (IS_G4X(dev) || IS_GEN5(dev)) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(2) == 0) {
+ OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ }
}
- i915_emit_breadcrumb(dev);
+ i915_emit_breadcrumb(dev);
return 0;
}
@@ -516,6 +520,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
+ int ret;
if (!master_priv->sarea_priv)
return -EINVAL;
@@ -527,12 +532,13 @@ static int i915_dispatch_flip(struct drm_device * dev)
i915_kernel_lost_context(dev);
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(10);
+ if (ret)
+ return ret;
+
OUT_RING(MI_FLUSH | MI_READ_FLUSH);
OUT_RING(0);
- ADVANCE_LP_RING();
- BEGIN_LP_RING(6);
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
if (dev_priv->current_page == 0) {
@@ -543,33 +549,32 @@ static int i915_dispatch_flip(struct drm_device * dev)
dev_priv->current_page = 0;
}
OUT_RING(0);
- ADVANCE_LP_RING();
- BEGIN_LP_RING(2);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
OUT_RING(0);
+
ADVANCE_LP_RING();
master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
return 0;
}
-static int i915_quiescent(struct drm_device * dev)
+static int i915_quiescent(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
i915_kernel_lost_context(dev);
- return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
- dev_priv->render_ring.size - 8);
+ return intel_wait_ring_buffer(ring, ring->size - 8);
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -768,9 +773,15 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BLT:
value = HAS_BLT(dev);
break;
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ value = 1;
+ break;
case I915_PARAM_HAS_COHERENT_RINGS:
value = 1;
break;
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ value = INTEL_INFO(dev)->gen >= 4;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -826,7 +837,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -1005,73 +1016,47 @@ intel_teardown_mchbar(struct drm_device *dev)
#define PTE_VALID (1 << 0)
/**
- * i915_gtt_to_phys - take a GTT address and turn it into a physical one
+ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+ * a physical one
* @dev: drm device
- * @gtt_addr: address to translate
+ * @offset: address to translate
*
- * Some chip functions require allocations from stolen space but need the
- * physical address of the memory in question. We use this routine
- * to get a physical address suitable for register programming from a given
- * GTT address.
+ * Some chip functions require allocations from stolen space and need the
+ * physical address of the memory in question.
*/
-static unsigned long i915_gtt_to_phys(struct drm_device *dev,
- unsigned long gtt_addr)
+static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
{
- unsigned long *gtt;
- unsigned long entry, phys;
- int gtt_bar = IS_GEN2(dev) ? 1 : 0;
- int gtt_offset, gtt_size;
-
- if (INTEL_INFO(dev)->gen >= 4) {
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
- gtt_offset = 2*1024*1024;
- gtt_size = 2*1024*1024;
- } else {
- gtt_offset = 512*1024;
- gtt_size = 512*1024;
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
+#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+ * is unreliable, so compute the base by subtracting the stolen memory
+ * from the Top of Low Usable DRAM which is where the BIOS places
+ * the graphics stolen memory.
+ */
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* top 32bits are reserved = 0 */
+ pci_read_config_dword(pdev, 0xA4, &base);
} else {
- gtt_bar = 3;
- gtt_offset = 0;
- gtt_size = pci_resource_len(dev->pdev, gtt_bar);
- }
-
- gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
- gtt_size);
- if (!gtt) {
- DRM_ERROR("ioremap of GTT failed\n");
- return 0;
- }
-
- entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
-
- DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
-
- /* Mask out these reserved bits on this hardware. */
- if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
- entry &= ~PTE_ADDRESS_MASK_HIGH;
-
- /* If it's not a mapping type we know, then bail. */
- if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
- (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
- iounmap(gtt);
- return 0;
- }
-
- if (!(entry & PTE_VALID)) {
- DRM_ERROR("bad GTT entry in stolen space\n");
- iounmap(gtt);
- return 0;
+ /* XXX presume 8xx is the same as i915 */
+ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+ }
+#else
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ u16 val;
+ pci_read_config_word(pdev, 0xb0, &val);
+ base = val >> 4 << 20;
+ } else {
+ u8 val;
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
}
+ base -= dev_priv->mm.gtt->stolen_size;
+#endif
- iounmap(gtt);
-
- phys =(entry & PTE_ADDRESS_MASK) |
- ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
-
- DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
-
- return phys;
+ return base + offset;
}
static void i915_warn_stolen(struct drm_device *dev)
@@ -1087,54 +1072,35 @@ static void i915_setup_compression(struct drm_device *dev, int size)
unsigned long cfb_base;
unsigned long ll_base = 0;
- /* Leave 1M for line length buffer & misc. */
- compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
- if (!compressed_fb) {
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- i915_warn_stolen(dev);
- return;
- }
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (compressed_fb)
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb)
+ goto err;
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb) {
- i915_warn_stolen(dev);
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- return;
- }
+ cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ if (!cfb_base)
+ goto err_fb;
- cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
- if (!cfb_base) {
- DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
- drm_mm_put_block(compressed_fb);
- }
+ if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+ 4096, 4096, 0);
+ if (compressed_llb)
+ compressed_llb = drm_mm_get_block(compressed_llb,
+ 4096, 4096);
+ if (!compressed_llb)
+ goto err_fb;
- if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
- compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
- 4096, 0);
- if (!compressed_llb) {
- i915_warn_stolen(dev);
- return;
- }
-
- compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
- if (!compressed_llb) {
- i915_warn_stolen(dev);
- return;
- }
-
- ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
- if (!ll_base) {
- DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
- drm_mm_put_block(compressed_fb);
- drm_mm_put_block(compressed_llb);
- }
+ ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ if (!ll_base)
+ goto err_llb;
}
dev_priv->cfb_size = size;
intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
- if (IS_IRONLAKE_M(dev))
+ if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1144,8 +1110,17 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->compressed_llb = compressed_llb;
}
- DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
- ll_base, size >> 20);
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+ cfb_base, ll_base, size >> 20);
+ return;
+
+err_llb:
+ drm_mm_put_block(compressed_llb);
+err_fb:
+ drm_mm_put_block(compressed_fb);
+err:
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ i915_warn_stolen(dev);
}
static void i915_cleanup_compression(struct drm_device *dev)
@@ -1176,12 +1151,16 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_INFO "i915: switched on\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
printk(KERN_ERR "i915: switched off\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend(dev, pmm);
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1196,17 +1175,20 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
-static int i915_load_modeset_init(struct drm_device *dev,
- unsigned long prealloc_size,
- unsigned long agp_size)
+static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long prealloc_size, gtt_size, mappable_size;
int ret = 0;
- /* Basic memrange allocator for stolen space (aka mm.vram) */
- drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
+ prealloc_size = dev_priv->mm.gtt->stolen_size;
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
- /* Let GEM Manage from end of prealloc space to end of aperture.
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+ /* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch page.
* There are a number of places where the hardware apparently
@@ -1215,7 +1197,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
* at the last page of the aperture. One page should be enough to
* keep any prefetching inside of the aperture.
*/
- i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
+ i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
mutex_lock(&dev->struct_mutex);
ret = i915_gem_init_ringbuffer(dev);
@@ -1227,16 +1209,17 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (I915_HAS_FBC(dev) && i915_powersave) {
int cfb_size;
- /* Try to get an 8M buffer... */
- if (prealloc_size > (9*1024*1024))
- cfb_size = 8*1024*1024;
+ /* Leave 1M for line length buffer & misc. */
+
+ /* Try to get a 32M buffer... */
+ if (prealloc_size > (36*1024*1024))
+ cfb_size = 32*1024*1024;
else /* fall back to 7/8 of the stolen space */
cfb_size = prealloc_size * 7 / 8;
i915_setup_compression(dev, cfb_size);
}
- /* Allow hardware batchbuffers unless told otherwise.
- */
+ /* Allow hardware batchbuffers unless told otherwise. */
dev_priv->allow_batchbuffer = 1;
ret = intel_parse_bios(dev);
@@ -1252,6 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
ret = vga_switcheroo_register_client(dev->pdev,
i915_switcheroo_set_state,
+ NULL,
i915_switcheroo_can_switch);
if (ret)
goto cleanup_vga_client;
@@ -1426,152 +1410,12 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
}
}
-struct v_table {
- u8 vid;
- unsigned long vd; /* in .1 mil */
- unsigned long vm; /* in .1 mil */
- u8 pvid;
-};
-
-static struct v_table v_table[] = {
- { 0, 16125, 15000, 0x7f, },
- { 1, 16000, 14875, 0x7e, },
- { 2, 15875, 14750, 0x7d, },
- { 3, 15750, 14625, 0x7c, },
- { 4, 15625, 14500, 0x7b, },
- { 5, 15500, 14375, 0x7a, },
- { 6, 15375, 14250, 0x79, },
- { 7, 15250, 14125, 0x78, },
- { 8, 15125, 14000, 0x77, },
- { 9, 15000, 13875, 0x76, },
- { 10, 14875, 13750, 0x75, },
- { 11, 14750, 13625, 0x74, },
- { 12, 14625, 13500, 0x73, },
- { 13, 14500, 13375, 0x72, },
- { 14, 14375, 13250, 0x71, },
- { 15, 14250, 13125, 0x70, },
- { 16, 14125, 13000, 0x6f, },
- { 17, 14000, 12875, 0x6e, },
- { 18, 13875, 12750, 0x6d, },
- { 19, 13750, 12625, 0x6c, },
- { 20, 13625, 12500, 0x6b, },
- { 21, 13500, 12375, 0x6a, },
- { 22, 13375, 12250, 0x69, },
- { 23, 13250, 12125, 0x68, },
- { 24, 13125, 12000, 0x67, },
- { 25, 13000, 11875, 0x66, },
- { 26, 12875, 11750, 0x65, },
- { 27, 12750, 11625, 0x64, },
- { 28, 12625, 11500, 0x63, },
- { 29, 12500, 11375, 0x62, },
- { 30, 12375, 11250, 0x61, },
- { 31, 12250, 11125, 0x60, },
- { 32, 12125, 11000, 0x5f, },
- { 33, 12000, 10875, 0x5e, },
- { 34, 11875, 10750, 0x5d, },
- { 35, 11750, 10625, 0x5c, },
- { 36, 11625, 10500, 0x5b, },
- { 37, 11500, 10375, 0x5a, },
- { 38, 11375, 10250, 0x59, },
- { 39, 11250, 10125, 0x58, },
- { 40, 11125, 10000, 0x57, },
- { 41, 11000, 9875, 0x56, },
- { 42, 10875, 9750, 0x55, },
- { 43, 10750, 9625, 0x54, },
- { 44, 10625, 9500, 0x53, },
- { 45, 10500, 9375, 0x52, },
- { 46, 10375, 9250, 0x51, },
- { 47, 10250, 9125, 0x50, },
- { 48, 10125, 9000, 0x4f, },
- { 49, 10000, 8875, 0x4e, },
- { 50, 9875, 8750, 0x4d, },
- { 51, 9750, 8625, 0x4c, },
- { 52, 9625, 8500, 0x4b, },
- { 53, 9500, 8375, 0x4a, },
- { 54, 9375, 8250, 0x49, },
- { 55, 9250, 8125, 0x48, },
- { 56, 9125, 8000, 0x47, },
- { 57, 9000, 7875, 0x46, },
- { 58, 8875, 7750, 0x45, },
- { 59, 8750, 7625, 0x44, },
- { 60, 8625, 7500, 0x43, },
- { 61, 8500, 7375, 0x42, },
- { 62, 8375, 7250, 0x41, },
- { 63, 8250, 7125, 0x40, },
- { 64, 8125, 7000, 0x3f, },
- { 65, 8000, 6875, 0x3e, },
- { 66, 7875, 6750, 0x3d, },
- { 67, 7750, 6625, 0x3c, },
- { 68, 7625, 6500, 0x3b, },
- { 69, 7500, 6375, 0x3a, },
- { 70, 7375, 6250, 0x39, },
- { 71, 7250, 6125, 0x38, },
- { 72, 7125, 6000, 0x37, },
- { 73, 7000, 5875, 0x36, },
- { 74, 6875, 5750, 0x35, },
- { 75, 6750, 5625, 0x34, },
- { 76, 6625, 5500, 0x33, },
- { 77, 6500, 5375, 0x32, },
- { 78, 6375, 5250, 0x31, },
- { 79, 6250, 5125, 0x30, },
- { 80, 6125, 5000, 0x2f, },
- { 81, 6000, 4875, 0x2e, },
- { 82, 5875, 4750, 0x2d, },
- { 83, 5750, 4625, 0x2c, },
- { 84, 5625, 4500, 0x2b, },
- { 85, 5500, 4375, 0x2a, },
- { 86, 5375, 4250, 0x29, },
- { 87, 5250, 4125, 0x28, },
- { 88, 5125, 4000, 0x27, },
- { 89, 5000, 3875, 0x26, },
- { 90, 4875, 3750, 0x25, },
- { 91, 4750, 3625, 0x24, },
- { 92, 4625, 3500, 0x23, },
- { 93, 4500, 3375, 0x22, },
- { 94, 4375, 3250, 0x21, },
- { 95, 4250, 3125, 0x20, },
- { 96, 4125, 3000, 0x1f, },
- { 97, 4125, 3000, 0x1e, },
- { 98, 4125, 3000, 0x1d, },
- { 99, 4125, 3000, 0x1c, },
- { 100, 4125, 3000, 0x1b, },
- { 101, 4125, 3000, 0x1a, },
- { 102, 4125, 3000, 0x19, },
- { 103, 4125, 3000, 0x18, },
- { 104, 4125, 3000, 0x17, },
- { 105, 4125, 3000, 0x16, },
- { 106, 4125, 3000, 0x15, },
- { 107, 4125, 3000, 0x14, },
- { 108, 4125, 3000, 0x13, },
- { 109, 4125, 3000, 0x12, },
- { 110, 4125, 3000, 0x11, },
- { 111, 4125, 3000, 0x10, },
- { 112, 4125, 3000, 0x0f, },
- { 113, 4125, 3000, 0x0e, },
- { 114, 4125, 3000, 0x0d, },
- { 115, 4125, 3000, 0x0c, },
- { 116, 4125, 3000, 0x0b, },
- { 117, 4125, 3000, 0x0a, },
- { 118, 4125, 3000, 0x09, },
- { 119, 4125, 3000, 0x08, },
- { 120, 1125, 0, 0x07, },
- { 121, 1000, 0, 0x06, },
- { 122, 875, 0, 0x05, },
- { 123, 750, 0, 0x04, },
- { 124, 625, 0, 0x03, },
- { 125, 500, 0, 0x02, },
- { 126, 375, 0, 0x01, },
- { 127, 0, 0, 0x00, },
-};
-
-struct cparams {
- int i;
- int t;
- int m;
- int c;
-};
-
-static struct cparams cparams[] = {
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
{ 1, 1333, 301, 28664 },
{ 1, 1066, 294, 24460 },
{ 1, 800, 294, 25192 },
@@ -1637,21 +1481,145 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
return ((m * x) / 127) - b;
}
-static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
- unsigned long val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(v_table); i++) {
- if (v_table[i].pvid == pxvid) {
- if (IS_MOBILE(dev_priv->dev))
- val = v_table[i].vm;
- else
- val = v_table[i].vd;
- }
- }
-
- return val;
+ static const struct v_table {
+ u16 vd; /* in .1 mil */
+ u16 vm; /* in .1 mil */
+ } v_table[] = {
+ { 0, 0, },
+ { 375, 0, },
+ { 500, 0, },
+ { 625, 0, },
+ { 750, 0, },
+ { 875, 0, },
+ { 1000, 0, },
+ { 1125, 0, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4250, 3125, },
+ { 4375, 3250, },
+ { 4500, 3375, },
+ { 4625, 3500, },
+ { 4750, 3625, },
+ { 4875, 3750, },
+ { 5000, 3875, },
+ { 5125, 4000, },
+ { 5250, 4125, },
+ { 5375, 4250, },
+ { 5500, 4375, },
+ { 5625, 4500, },
+ { 5750, 4625, },
+ { 5875, 4750, },
+ { 6000, 4875, },
+ { 6125, 5000, },
+ { 6250, 5125, },
+ { 6375, 5250, },
+ { 6500, 5375, },
+ { 6625, 5500, },
+ { 6750, 5625, },
+ { 6875, 5750, },
+ { 7000, 5875, },
+ { 7125, 6000, },
+ { 7250, 6125, },
+ { 7375, 6250, },
+ { 7500, 6375, },
+ { 7625, 6500, },
+ { 7750, 6625, },
+ { 7875, 6750, },
+ { 8000, 6875, },
+ { 8125, 7000, },
+ { 8250, 7125, },
+ { 8375, 7250, },
+ { 8500, 7375, },
+ { 8625, 7500, },
+ { 8750, 7625, },
+ { 8875, 7750, },
+ { 9000, 7875, },
+ { 9125, 8000, },
+ { 9250, 8125, },
+ { 9375, 8250, },
+ { 9500, 8375, },
+ { 9625, 8500, },
+ { 9750, 8625, },
+ { 9875, 8750, },
+ { 10000, 8875, },
+ { 10125, 9000, },
+ { 10250, 9125, },
+ { 10375, 9250, },
+ { 10500, 9375, },
+ { 10625, 9500, },
+ { 10750, 9625, },
+ { 10875, 9750, },
+ { 11000, 9875, },
+ { 11125, 10000, },
+ { 11250, 10125, },
+ { 11375, 10250, },
+ { 11500, 10375, },
+ { 11625, 10500, },
+ { 11750, 10625, },
+ { 11875, 10750, },
+ { 12000, 10875, },
+ { 12125, 11000, },
+ { 12250, 11125, },
+ { 12375, 11250, },
+ { 12500, 11375, },
+ { 12625, 11500, },
+ { 12750, 11625, },
+ { 12875, 11750, },
+ { 13000, 11875, },
+ { 13125, 12000, },
+ { 13250, 12125, },
+ { 13375, 12250, },
+ { 13500, 12375, },
+ { 13625, 12500, },
+ { 13750, 12625, },
+ { 13875, 12750, },
+ { 14000, 12875, },
+ { 14125, 13000, },
+ { 14250, 13125, },
+ { 14375, 13250, },
+ { 14500, 13375, },
+ { 14625, 13500, },
+ { 14750, 13625, },
+ { 14875, 13750, },
+ { 15000, 13875, },
+ { 15125, 14000, },
+ { 15250, 14125, },
+ { 15375, 14250, },
+ { 15500, 14375, },
+ { 15625, 14500, },
+ { 15750, 14625, },
+ { 15875, 14750, },
+ { 16000, 14875, },
+ { 16125, 15000, },
+ };
+ if (dev_priv->info->is_mobile)
+ return v_table[pxvid].vm;
+ else
+ return v_table[pxvid].vd;
}
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@ -1905,9 +1873,9 @@ ips_ping_for_i915_load(void)
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
- resource_size_t base, size;
int ret = 0, mmio_bar;
- uint32_t agp_size, prealloc_size;
+ uint32_t agp_size;
+
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -1923,11 +1891,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = (struct intel_device_info *) flags;
- /* Add register map (needed for suspend/resume) */
- mmio_bar = IS_GEN2(dev) ? 1 : 0;
- base = pci_resource_start(dev->pdev, mmio_bar);
- size = pci_resource_len(dev->pdev, mmio_bar);
-
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
@@ -1937,16 +1900,25 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
- dev_priv->regs = ioremap(base, size);
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
goto put_bridge;
}
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
+ goto out_iomapfree;
+ }
+
+ agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base,
- dev->agp->agp_info.aper_size * 1024*1024);
+ io_mapping_create_wc(dev->agp->base, agp_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
goto out_rmmap;
@@ -1958,24 +1930,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* MTRR if present. Even if a UC MTRR isn't present.
*/
dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024,
+ agp_size,
MTRR_TYPE_WRCOMB, 1);
if (dev_priv->mm.gtt_mtrr < 0) {
DRM_INFO("MTRR allocation failed. Graphics "
"performance may suffer.\n");
}
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- ret = -ENODEV;
- goto out_iomapfree;
- }
-
- prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
- agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
* by the GPU. i915_gem_retire_requests() is called directly when we
@@ -1983,7 +1944,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* bo.
*
* It is also used for periodic low-priority events, such as
- * idle-timers and hangcheck.
+ * idle-timers and recording error state.
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
@@ -2001,20 +1962,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* enable GEM by default */
dev_priv->has_gem = 1;
- if (prealloc_size > agp_size * 3 / 4) {
- DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
- "memory stolen.\n",
- prealloc_size / 1024, agp_size / 1024);
- DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
- "updating the BIOS to fix).\n");
- dev_priv->has_gem = 0;
- }
-
if (dev_priv->has_gem == 0 &&
drm_core_check_feature(dev, DRIVER_MODESET)) {
DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
ret = -ENODEV;
- goto out_iomapfree;
+ goto out_workqueue_free;
}
dev->driver->get_vblank_counter = i915_get_vblank_counter;
@@ -2037,8 +1989,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
- if (ret != 0)
- goto out_workqueue_free;
+ if (ret)
+ goto out_gem_unload;
}
if (IS_PINEVIEW(dev))
@@ -2060,16 +2012,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->user_irq_lock);
+ spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
-
- if (ret) {
- (void) i915_driver_unload(dev);
- return ret;
- }
+ if (ret)
+ goto out_gem_unload;
/* Start out suspended */
dev_priv->mm.suspended = 1;
@@ -2077,10 +2026,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
+ ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
- goto out_workqueue_free;
+ goto out_gem_unload;
}
}
@@ -2100,12 +2049,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
+out_gem_unload:
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+
+ intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
out_workqueue_free:
destroy_workqueue(dev_priv->wq);
out_iomapfree:
io_mapping_free(dev_priv->mm.gtt_mapping);
out_rmmap:
- iounmap(dev_priv->regs);
+ pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -2122,6 +2077,9 @@ int i915_driver_unload(struct drm_device *dev)
i915_mch_dev = NULL;
spin_unlock(&mchdev_lock);
+ if (dev_priv->mm.inactive_shrinker.shrink)
+ unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
mutex_lock(&dev->struct_mutex);
ret = i915_gpu_idle(dev);
if (ret)
@@ -2179,7 +2137,7 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
- drm_mm_takedown(&dev_priv->mm.vram);
+ drm_mm_takedown(&dev_priv->mm.stolen);
intel_cleanup_overlay(dev);
@@ -2188,7 +2146,7 @@ int i915_driver_unload(struct drm_device *dev)
}
if (dev_priv->regs != NULL)
- iounmap(dev_priv->regs);
+ pci_iounmap(dev->pdev, dev_priv->regs);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f737960712e..87249333198 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -111,7 +111,7 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
.gen = 4, .is_crestline = 1,
- .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
};
@@ -130,7 +130,7 @@ static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_gm45_info = {
.gen = 4, .is_g4x = 1,
- .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.has_bsd_ring = 1,
@@ -150,7 +150,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1,
- .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0, /* disabled due to buggy hardware */
.has_bsd_ring = 1,
};
@@ -165,6 +165,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
};
@@ -244,10 +245,34 @@ void intel_detect_pch (struct drm_device *dev)
}
}
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE, 1);
+ POSTING_READ(FORCEWAKE);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
+ udelay(10);
+}
+
+void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ POSTING_READ(FORCEWAKE);
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_kms_helper_poll_disable(dev);
+
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
@@ -284,7 +309,9 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
if (state.event == PM_EVENT_PRETHAW)
return 0;
- drm_kms_helper_poll_disable(dev);
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
error = i915_drm_freeze(dev);
if (error)
@@ -304,6 +331,12 @@ static int i915_drm_thaw(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
i915_restore_state(dev);
intel_opregion_setup(dev);
@@ -332,6 +365,9 @@ int i915_resume(struct drm_device *dev)
{
int ret;
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
if (pci_enable_device(dev->pdev))
return -EIO;
@@ -405,6 +441,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
+static int gen6_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
+ return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+}
+
/**
* i965_reset - reset chip after a hang
* @dev: drm device to reset
@@ -431,7 +475,8 @@ int i915_reset(struct drm_device *dev, u8 flags)
bool need_display = true;
int ret;
- mutex_lock(&dev->struct_mutex);
+ if (!mutex_trylock(&dev->struct_mutex))
+ return -EBUSY;
i915_gem_reset(dev);
@@ -439,6 +484,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ ret = gen6_do_reset(dev, flags);
+ break;
case 5:
ret = ironlake_do_reset(dev, flags);
break;
@@ -472,9 +520,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
dev_priv->mm.suspended = 0;
- ring->init(dev, ring);
+
+ dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
+ if (HAS_BSD(dev))
+ dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
+ if (HAS_BLT(dev))
+ dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
@@ -523,6 +576,9 @@ static int i915_pm_suspend(struct device *dev)
return -ENODEV;
}
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
error = i915_drm_freeze(drm_dev);
if (error)
return error;
@@ -606,6 +662,8 @@ static struct drm_driver driver = {
.device_is_agp = i915_driver_device_is_agp,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
+ .get_vblank_timestamp = i915_get_vblank_timestamp,
+ .get_scanout_position = i915_get_crtc_scanoutpos,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
@@ -661,8 +719,6 @@ static int __init i915_init(void)
driver.num_ioctls = i915_max_ioctl;
- i915_gem_shrinker_init();
-
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
@@ -684,17 +740,11 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
- if (!(driver.driver_features & DRIVER_MODESET)) {
- driver.suspend = i915_suspend;
- driver.resume = i915_resume;
- }
-
return drm_init(&driver);
}
static void __exit i915_exit(void)
{
- i915_gem_shrinker_exit();
drm_exit(&driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 409826da309..aac1bf332f7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -89,7 +89,7 @@ struct drm_i915_gem_phys_object {
int id;
struct page **page_list;
drm_dma_handle_t *handle;
- struct drm_gem_object *cur_obj;
+ struct drm_i915_gem_object *cur_obj;
};
struct mem_block {
@@ -124,9 +124,9 @@ struct drm_i915_master_private {
#define I915_FENCE_REG_NONE -1
struct drm_i915_fence_reg {
- struct drm_gem_object *obj;
struct list_head lru_list;
- bool gpu;
+ struct drm_i915_gem_object *obj;
+ uint32_t setup_seqno;
};
struct sdvo_device_mapping {
@@ -139,6 +139,8 @@ struct sdvo_device_mapping {
u8 ddc_pin;
};
+struct intel_display_error_state;
+
struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
@@ -148,11 +150,23 @@ struct drm_i915_error_state {
u32 ipehr;
u32 instdone;
u32 acthd;
+ u32 error; /* gen6+ */
+ u32 bcs_acthd; /* gen6+ blt engine */
+ u32 bcs_ipehr;
+ u32 bcs_ipeir;
+ u32 bcs_instdone;
+ u32 bcs_seqno;
+ u32 vcs_acthd; /* gen6+ bsd engine */
+ u32 vcs_ipehr;
+ u32 vcs_ipeir;
+ u32 vcs_instdone;
+ u32 vcs_seqno;
u32 instpm;
u32 instps;
u32 instdone1;
u32 seqno;
u64 bbaddr;
+ u64 fence[16];
struct timeval time;
struct drm_i915_error_object {
int page_count;
@@ -171,9 +185,11 @@ struct drm_i915_error_state {
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
- } *active_bo;
- u32 active_bo_count;
+ u32 ring:4;
+ } *active_bo, *pinned_bo;
+ u32 active_bo_count, pinned_bo_count;
struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
};
struct drm_i915_display_funcs {
@@ -207,7 +223,6 @@ struct intel_device_info {
u8 is_broadwater : 1;
u8 is_crestline : 1;
u8 has_fbc : 1;
- u8 has_rc6 : 1;
u8 has_pipe_cxsr : 1;
u8 has_hotplug : 1;
u8 cursor_needs_physical : 1;
@@ -243,6 +258,7 @@ typedef struct drm_i915_private {
const struct intel_device_info *info;
int has_gem;
+ int relative_constants_mode;
void __iomem *regs;
@@ -253,20 +269,15 @@ typedef struct drm_i915_private {
} *gmbus;
struct pci_dev *bridge_dev;
- struct intel_ring_buffer render_ring;
- struct intel_ring_buffer bsd_ring;
- struct intel_ring_buffer blt_ring;
+ struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
- void *seqno_page;
dma_addr_t dma_status_page;
uint32_t counter;
- unsigned int seqno_gfx_addr;
drm_local_map_t hws_map;
- struct drm_gem_object *seqno_obj;
- struct drm_gem_object *pwrctx;
- struct drm_gem_object *renderctx;
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
struct resource mch_res;
@@ -275,25 +286,17 @@ typedef struct drm_i915_private {
int front_offset;
int current_page;
int page_flipping;
-#define I915_DEBUG_READ (1<<0)
-#define I915_DEBUG_WRITE (1<<1)
- unsigned long debug_flags;
- wait_queue_head_t irq_queue;
atomic_t irq_received;
- /** Protects user_irq_refcount and irq_mask_reg */
- spinlock_t user_irq_lock;
u32 trace_irq_seqno;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
- u32 irq_mask_reg;
u32 pipestat[2];
- /** splitted irq regs for graphics and display engine on Ironlake,
- irq_mask_reg is still used for display irq. */
- u32 gt_irq_mask_reg;
- u32 gt_irq_enable_reg;
- u32 de_irq_enable_reg;
- u32 pch_irq_mask_reg;
- u32 pch_irq_enable_reg;
+ u32 irq_mask;
+ u32 gt_irq_mask;
+ u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
@@ -306,7 +309,7 @@ typedef struct drm_i915_private {
int num_pipe;
/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
@@ -530,23 +533,21 @@ typedef struct drm_i915_private {
struct {
/** Bridge to intel-gtt-ko */
- struct intel_gtt *gtt;
+ const struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
- struct drm_mm vram;
+ struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
+ /** List of all objects in gtt_space. Used to restore gtt
+ * mappings on resume */
+ struct list_head gtt_list;
+ /** End of mappable part of GTT */
+ unsigned long gtt_mappable_end;
struct io_mapping *gtt_mapping;
int gtt_mtrr;
- /**
- * Membership on list of all loaded devices, used to evict
- * inactive buffers under memory pressure.
- *
- * Modifications should only be done whilst holding the
- * shrink_list_lock spinlock.
- */
- struct list_head shrink_list;
+ struct shrinker inactive_shrinker;
/**
* List of objects currently involved in rendering.
@@ -609,16 +610,6 @@ typedef struct drm_i915_private {
struct delayed_work retire_work;
/**
- * Waiting sequence number, if any
- */
- uint32_t waiting_gem_seqno;
-
- /**
- * Last seq seen at irq time
- */
- uint32_t irq_gem_seqno;
-
- /**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
@@ -645,16 +636,11 @@ typedef struct drm_i915_private {
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
- uint32_t flush_rings;
-
/* accounting, useful for userland debugging */
- size_t object_memory;
- size_t pin_memory;
- size_t gtt_memory;
size_t gtt_total;
+ size_t mappable_gtt_total;
+ size_t object_memory;
u32 object_count;
- u32 pin_count;
- u32 gtt_count;
} mm;
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
@@ -688,14 +674,14 @@ typedef struct drm_i915_private {
u8 fmax;
u8 fstart;
- u64 last_count1;
- unsigned long last_time1;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- int c_m;
- int r_t;
- u8 corr;
+ u64 last_count1;
+ unsigned long last_time1;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ int c_m;
+ int r_t;
+ u8 corr;
spinlock_t *mchdev_lock;
enum no_fbc_reason no_fbc_reason;
@@ -709,20 +695,20 @@ typedef struct drm_i915_private {
struct intel_fbdev *fbdev;
} drm_i915_private_t;
-/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
struct drm_gem_object base;
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
+ struct list_head gtt_list;
/** This object's place on the active/flushing/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
- /** This object's place on eviction list */
- struct list_head evict_list;
+ /** This object's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
/**
* This is set if the object is on the active or flushing lists
@@ -738,6 +724,12 @@ struct drm_i915_gem_object {
unsigned int dirty : 1;
/**
+ * This is set if the object has been written to since the last
+ * GPU flush.
+ */
+ unsigned int pending_gpu_write : 1;
+
+ /**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
@@ -747,29 +739,15 @@ struct drm_i915_gem_object {
signed int fence_reg : 5;
/**
- * Used for checking the object doesn't appear more than once
- * in an execbuffer object list.
- */
- unsigned int in_execbuffer : 1;
-
- /**
* Advice: are the backing pages purgeable?
*/
unsigned int madv : 2;
/**
- * Refcount for the pages array. With the current locking scheme, there
- * are at most two concurrent users: Binding a bo to the gtt and
- * pwrite/pread using physical addresses. So two bits for a maximum
- * of two users are enough.
- */
- unsigned int pages_refcount : 2;
-#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
-
- /**
* Current tiling mode for the object.
*/
unsigned int tiling_mode : 2;
+ unsigned int tiling_changed : 1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -783,28 +761,54 @@ struct drm_i915_gem_object {
unsigned int pin_count : 4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
- /** AGP memory structure for our GTT binding. */
- DRM_AGP_MEM *agp_mem;
+ /**
+ * Is the object at the current location in the gtt mappable and
+ * fenceable? Used to avoid costly recalculations.
+ */
+ unsigned int map_and_fenceable : 1;
+
+ /**
+ * Whether the current gtt mapping needs to be mappable (and isn't just
+ * mappable by accident). Track pin and fault separate for a more
+ * accurate mappable working set.
+ */
+ unsigned int fault_mappable : 1;
+ unsigned int pin_mappable : 1;
+
+ /*
+ * Is the GPU currently using a fence to access this buffer,
+ */
+ unsigned int pending_fenced_gpu_access:1;
+ unsigned int fenced_gpu_access:1;
struct page **pages;
/**
- * Current offset of the object in GTT space.
- *
- * This is the same as gtt_space->start
+ * DMAR support
*/
- uint32_t gtt_offset;
+ struct scatterlist *sg_list;
+ int num_sg;
- /* Which ring is refering to is this object */
- struct intel_ring_buffer *ring;
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
/**
- * Fake offset for use by mmap(2)
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
*/
- uint64_t mmap_offset;
+ uint32_t gtt_offset;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
+ struct intel_ring_buffer *ring;
+
+ /** Breadcrumb of last fenced GPU access to the buffer. */
+ uint32_t last_fenced_seqno;
+ struct intel_ring_buffer *last_fenced_ring;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@@ -880,6 +884,68 @@ enum intel_chip_family {
CHIP_I965 = 0x08,
};
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+
+#include "i915_trace.h"
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
@@ -907,8 +973,8 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern int i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *boxes,
- int i, int DR1, int DR4);
+ struct drm_clip_rect *box,
+ int DR1, int DR4);
extern int i915_reset(struct drm_device *dev, u8 flags);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -918,6 +984,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
+void i915_handle_error(struct drm_device *dev, bool wedged);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -953,6 +1020,13 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void intel_enable_asle (struct drm_device *dev);
+int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ int *vpos, int *hpos);
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1017,15 +1091,28 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
- size_t size);
+void i915_gem_flush_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
-void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_object_unbind(struct drm_gem_object *obj);
-void i915_gem_release_mmap(struct drm_gem_object *obj);
+int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool interruptible);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ u32 seqno);
+
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1035,73 +1122,88 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
- bool interruptible);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
- bool interruptible);
+static inline u32
+i915_gem_next_request_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return ring->outstanding_lazy_request = dev_priv->next_seqno;
+}
+
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev);
-void i915_gem_clflush_object(struct drm_gem_object *obj);
-int i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
-int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
- bool interruptible);
-int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+ bool interruptible);
+int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
- unsigned long end);
-int i915_gpu_idle(struct drm_device *dev);
-int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_i915_gem_request *request,
- struct intel_ring_buffer *ring);
-int i915_do_wait_request(struct drm_device *dev,
- uint32_t seqno,
- bool interruptible,
- struct intel_ring_buffer *ring);
+void i915_gem_do_init(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_add_request(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_request *request,
+ struct intel_ring_buffer *ring);
+int __must_check i915_do_wait_request(struct drm_device *dev,
+ uint32_t seqno,
+ bool interruptible,
+ struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
- int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
- bool pipelined);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+ bool write);
+int __must_check
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
+ struct drm_i915_gem_object *obj,
int id,
int align);
void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj);
+ struct drm_i915_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
-void i915_gem_shrinker_init(void);
-void i915_gem_shrinker_exit(void);
+/* i915_gem_gtt.c */
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
-int i915_gem_evict_everything(struct drm_device *dev);
-int i915_gem_evict_inactive(struct drm_device *dev);
+int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable);
+int __must_check i915_gem_evict_everything(struct drm_device *dev,
+ bool purgeable_only);
+int __must_check i915_gem_evict_inactive(struct drm_device *dev,
+ bool purgeable_only);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
-bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
- int tiling_mode);
-bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
- int tiling_mode);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
#else
#define i915_verify_lists(dev) 0
#endif
-void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+ int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
/* i915_debugfs.c */
@@ -1163,6 +1265,7 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
@@ -1170,79 +1273,120 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern void intel_display_print_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error);
#endif
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+ intel_ring_advance(LP_RING(dev_priv))
+
/**
* Lock test for when it's just for synchronization of ring access.
*
* In that case, we don't need to do it when GEM is initialized as nobody else
* has access to the ring.
*/
-#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
- if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
- == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file_priv); \
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
+ if (LP_RING(dev->dev_private)->obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
-static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
+
+#define __i915_read(x, y) \
+static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ u##x val = read##y(dev_priv->regs + reg); \
+ trace_i915_reg_rw('R', reg, val, sizeof(val)); \
+ return val; \
+}
+__i915_read(8, b)
+__i915_read(16, w)
+__i915_read(32, l)
+__i915_read(64, q)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ trace_i915_reg_rw('W', reg, val, sizeof(val)); \
+ write##y(val, dev_priv->regs + reg); \
+}
+__i915_write(8, b)
+__i915_write(16, w)
+__i915_write(32, l)
+__i915_write(64, q)
+#undef __i915_write
+
+#define I915_READ8(reg) i915_read8(dev_priv, (reg))
+#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
+
+#define I915_READ16(reg) i915_read16(dev_priv, (reg))
+#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
+#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
+#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
+
+#define I915_READ(reg) i915_read32(dev_priv, (reg))
+#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
+#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
+#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
+
+#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
+#define I915_READ64(reg) i915_read64(dev_priv, (reg))
+
+#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
+#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
+
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
+ */
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
+static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val;
- val = readl(dev_priv->regs + reg);
- if (dev_priv->debug_flags & I915_DEBUG_READ)
- printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
+ if (dev_priv->info->gen >= 6) {
+ __gen6_force_wake_get(dev_priv);
+ val = I915_READ(reg);
+ __gen6_force_wake_put(dev_priv);
+ } else
+ val = I915_READ(reg);
+
return val;
}
-static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
- u32 val)
+static inline void
+i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
{
- writel(val, dev_priv->regs + reg);
- if (dev_priv->debug_flags & I915_DEBUG_WRITE)
- printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
+ /* Trace down the write operation before the real write */
+ trace_i915_reg_rw('W', reg, val, len);
+ switch (len) {
+ case 8:
+ writeq(val, dev_priv->regs + reg);
+ break;
+ case 4:
+ writel(val, dev_priv->regs + reg);
+ break;
+ case 2:
+ writew(val, dev_priv->regs + reg);
+ break;
+ case 1:
+ writeb(val, dev_priv->regs + reg);
+ break;
+ }
}
-#define I915_READ(reg) i915_read(dev_priv, (reg))
-#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
-#define I915_READ16(reg) readw(dev_priv->regs + (reg))
-#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
-#define I915_READ8(reg) readb(dev_priv->regs + (reg))
-#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
-#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
-#define I915_READ64(reg) readq(dev_priv->regs + (reg))
-#define POSTING_READ(reg) (void)I915_READ(reg)
-#define POSTING_READ16(reg) (void)I915_READ16(reg)
-
-#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
- I915_DEBUG_WRITE)
-#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
- I915_DEBUG_WRITE))
-
-#define I915_VERBOSE 0
-
-#define BEGIN_LP_RING(n) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
- intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
-} while (0)
-
-
-#define OUT_RING(x) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
- intel_ring_emit(dev, &dev_priv__->render_ring, x); \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG("ADVANCE_LP_RING %x\n", \
- dev_priv__->render_ring.tail); \
- intel_ring_advance(dev, &dev_priv__->render_ring); \
-} while(0)
-
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -1259,72 +1403,9 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
- (dev_priv->render_ring.status_page.page_addr))[reg])
+ (LP_RING(dev_priv)->status_page.page_addr))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
-#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
-
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
-#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
-#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-
-#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-
-#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
-#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-
-#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
-
-/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
- * rows, which changed the alignment requirements and fence programming.
- */
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-
-#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-
-#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-
-#define PRIMARY_RINGBUFFER_SIZE (128*1024)
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 275ec6ed43a..c79c0b62ef6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,38 +34,31 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
-#include <linux/intel-gtt.h>
-static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
- int write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
+static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
+ bool write);
+static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset,
uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
- bool interruptible);
-static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
- unsigned alignment);
-static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
+static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable);
+static void i915_gem_clear_fence_reg(struct drm_device *dev,
+ struct drm_i915_fence_reg *reg);
+static int i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv);
-static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+ struct drm_file *file);
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask);
+static int i915_gem_inactive_shrink(struct shrinker *shrinker,
+ int nr_to_scan,
+ gfp_t gfp_mask);
-static void
-i915_gem_object_put_pages(struct drm_gem_object *obj);
-
-static LIST_HEAD(shrink_list);
-static DEFINE_SPINLOCK(shrink_list_lock);
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -82,34 +75,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
dev_priv->mm.object_memory -= size;
}
-static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.gtt_count++;
- dev_priv->mm.gtt_memory += size;
-}
-
-static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.gtt_count--;
- dev_priv->mm.gtt_memory -= size;
-}
-
-static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.pin_count++;
- dev_priv->mm.pin_memory += size;
-}
-
-static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.pin_count--;
- dev_priv->mm.pin_memory -= size;
-}
-
int
i915_gem_check_is_wedged(struct drm_device *dev)
{
@@ -140,7 +105,7 @@ i915_gem_check_is_wedged(struct drm_device *dev)
return -EIO;
}
-static int i915_mutex_lock_interruptible(struct drm_device *dev)
+int i915_mutex_lock_interruptible(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -163,75 +128,76 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev)
}
static inline bool
-i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj_priv->gtt_space &&
- !obj_priv->active &&
- obj_priv->pin_count == 0;
+ return obj->gtt_space && !obj->active && obj->pin_count == 0;
}
-int i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long end)
+void i915_gem_do_init(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (start >= end ||
- (start & (PAGE_SIZE - 1)) != 0 ||
- (end & (PAGE_SIZE - 1)) != 0) {
- return -EINVAL;
- }
-
drm_mm_init(&dev_priv->mm.gtt_space, start,
end - start);
dev_priv->mm.gtt_total = end - start;
-
- return 0;
+ dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+ dev_priv->mm.gtt_mappable_end = mappable_end;
}
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_init *args = data;
- int ret;
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
+ return -EINVAL;
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
+ i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
+ struct drm_i915_gem_object *obj;
+ size_t pinned;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
+ pinned = 0;
mutex_lock(&dev->struct_mutex);
- args->aper_size = dev_priv->mm.gtt_total;
- args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+ pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
+ args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_available_size = args->aper_size -pinned;
+
return 0;
}
-
/**
* Creates a new mm object and returns a handle to it.
*/
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_create *args = data;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret;
u32 handle;
@@ -242,45 +208,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOMEM;
- ret = drm_gem_handle_create(file_priv, obj, &handle);
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
if (ret) {
- drm_gem_object_release(obj);
- i915_gem_info_remove_obj(dev->dev_private, obj->size);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
kfree(obj);
return ret;
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj);
args->handle = handle;
return 0;
}
-static inline int
-fast_shmem_read(struct page **pages,
- loff_t page_base, int page_offset,
- char __user *data,
- int length)
-{
- char *vaddr;
- int ret;
-
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
- ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
- kunmap_atomic(vaddr);
-
- return ret;
-}
-
-static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
+static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- drm_i915_private_t *dev_priv = obj->dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
- obj_priv->tiling_mode != I915_TILING_NONE;
+ obj->tiling_mode != I915_TILING_NONE;
}
static inline void
@@ -356,38 +305,51 @@ slow_shmem_bit17_copy(struct page *gpu_page,
* fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
*/
static int
-i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
- loff_t offset, page_base;
+ loff_t offset;
char __user *user_data;
int page_offset, page_length;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
+ struct page *page;
+ char *vaddr;
+ int ret;
+
/* Operation in this page
*
- * page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = (offset & ~(PAGE_SIZE-1));
page_offset = offset & (PAGE_SIZE-1);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- if (fast_shmem_read(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length))
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap_atomic(page);
+ ret = __copy_to_user_inatomic(user_data,
+ vaddr + page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
+
+ mark_page_accessed(page);
+ page_cache_release(page);
+ if (ret)
return -EFAULT;
remain -= page_length;
@@ -398,30 +360,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
return 0;
}
-static int
-i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
-{
- int ret;
-
- ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
-
- /* If we've insufficient memory to map in the pages, attempt
- * to make some space by throwing out some old buffers.
- */
- if (ret == -ENOMEM) {
- struct drm_device *dev = obj->dev;
-
- ret = i915_gem_evict_something(dev, obj->size,
- i915_gem_get_gtt_alignment(obj));
- if (ret)
- return ret;
-
- ret = i915_gem_object_get_pages(obj, 0);
- }
-
- return ret;
-}
-
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -429,18 +367,19 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
* and not take page faults.
*/
static int
-i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
loff_t offset, pinned_pages, i;
loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_index, shmem_page_offset;
- int data_page_index, data_page_offset;
+ int shmem_page_offset;
+ int data_page_index, data_page_offset;
int page_length;
int ret;
uint64_t data_ptr = args->data_ptr;
@@ -479,19 +418,18 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
+ struct page *page;
+
/* Operation in this page
*
- * shmem_page_index = page number within shmem file
* shmem_page_offset = offset within page in shmem file
* data_page_index = page number in get_user_pages return
* data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
- shmem_page_index = offset / PAGE_SIZE;
shmem_page_offset = offset & ~PAGE_MASK;
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
data_page_offset = data_ptr & ~PAGE_MASK;
@@ -502,8 +440,13 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
@@ -512,11 +455,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
} else {
slow_shmem_copy(user_pages[data_page_index],
data_page_offset,
- obj_priv->pages[shmem_page_index],
+ page,
shmem_page_offset,
page_length);
}
+ mark_page_accessed(page);
+ page_cache_release(page);
+
remain -= page_length;
data_ptr += page_length;
offset += page_length;
@@ -525,6 +471,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
out:
for (i = 0; i < pinned_pages; i++) {
SetPageDirty(user_pages[i]);
+ mark_page_accessed(user_pages[i]);
page_cache_release(user_pages[i]);
}
drm_free_large(user_pages);
@@ -539,11 +486,10 @@ out:
*/
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pread *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret = 0;
if (args->size == 0)
@@ -563,39 +509,33 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Bounds check source. */
- if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
ret = -EINVAL;
goto out;
}
- ret = i915_gem_object_get_pages_or_evict(obj);
- if (ret)
- goto out;
-
ret = i915_gem_object_set_cpu_read_domain_range(obj,
args->offset,
args->size);
if (ret)
- goto out_put;
+ goto out;
ret = -EFAULT;
if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+ ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
if (ret == -EFAULT)
- ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+ ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
-out_put:
- i915_gem_object_put_pages(obj);
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -645,32 +585,16 @@ slow_kernel_write(struct io_mapping *mapping,
io_mapping_unmap(dst_vaddr);
}
-static inline int
-fast_shmem_write(struct page **pages,
- loff_t page_base, int page_offset,
- char __user *data,
- int length)
-{
- char *vaddr;
- int ret;
-
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
- ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
- kunmap_atomic(vaddr);
-
- return ret;
-}
-
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
*/
static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, page_base;
@@ -680,8 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
- offset = obj_priv->gtt_offset + args->offset;
+ offset = obj->gtt_offset + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -721,11 +644,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
* than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
*/
static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t gtt_page_base, offset;
@@ -762,12 +685,15 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
goto out_unpin_pages;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out_unpin_pages;
- obj_priv = to_intel_bo(obj);
- offset = obj_priv->gtt_offset + args->offset;
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto out_unpin_pages;
+
+ offset = obj->gtt_offset + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -813,39 +739,58 @@ out_unpin_pages:
* copy_from_user into the kmapped pages backing the object.
*/
static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
- loff_t offset, page_base;
+ loff_t offset;
char __user *user_data;
int page_offset, page_length;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
offset = args->offset;
- obj_priv->dirty = 1;
+ obj->dirty = 1;
while (remain > 0) {
+ struct page *page;
+ char *vaddr;
+ int ret;
+
/* Operation in this page
*
- * page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = (offset & ~(PAGE_SIZE-1));
page_offset = offset & (PAGE_SIZE-1);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- if (fast_shmem_write(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length))
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap_atomic(page, KM_USER0);
+ ret = __copy_from_user_inatomic(vaddr + page_offset,
+ user_data,
+ page_length);
+ kunmap_atomic(vaddr, KM_USER0);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+
+ /* If we get a fault while copying data, then (presumably) our
+ * source page isn't available. Return the error and we'll
+ * retry in the slow path.
+ */
+ if (ret)
return -EFAULT;
remain -= page_length;
@@ -864,17 +809,18 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
* struct_mutex is held.
*/
static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
loff_t offset, pinned_pages, i;
loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_index, shmem_page_offset;
+ int shmem_page_offset;
int data_page_index, data_page_offset;
int page_length;
int ret;
@@ -912,20 +858,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- obj_priv = to_intel_bo(obj);
offset = args->offset;
- obj_priv->dirty = 1;
+ obj->dirty = 1;
while (remain > 0) {
+ struct page *page;
+
/* Operation in this page
*
- * shmem_page_index = page number within shmem file
* shmem_page_offset = offset within page in shmem file
* data_page_index = page number in get_user_pages return
* data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
- shmem_page_index = offset / PAGE_SIZE;
shmem_page_offset = offset & ~PAGE_MASK;
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
data_page_offset = data_ptr & ~PAGE_MASK;
@@ -936,21 +881,32 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+
if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length,
0);
} else {
- slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length);
}
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+
remain -= page_length;
data_ptr += page_length;
offset += page_length;
@@ -974,8 +930,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pwrite *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
if (args->size == 0)
@@ -995,15 +950,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Bounds check destination. */
- if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
ret = -EINVAL;
goto out;
}
@@ -1014,16 +969,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj_priv->phys_obj)
+ if (obj->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file);
- else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- obj_priv->gtt_space &&
- obj->write_domain != I915_GEM_DOMAIN_CPU) {
- ret = i915_gem_object_pin(obj, 0);
+ else if (obj->gtt_space &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ ret = i915_gem_object_pin(obj, 0, true);
if (ret)
goto out;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_object_put_fence(obj);
if (ret)
goto out_unpin;
@@ -1034,26 +992,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
out_unpin:
i915_gem_object_unpin(obj);
} else {
- ret = i915_gem_object_get_pages_or_evict(obj);
- if (ret)
- goto out;
-
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret)
- goto out_put;
+ goto out;
ret = -EFAULT;
if (!i915_gem_object_needs_bit17_swizzle(obj))
ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
if (ret == -EFAULT)
ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
-
-out_put:
- i915_gem_object_put_pages(obj);
}
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1065,12 +1016,10 @@ unlock:
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_set_domain *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
int ret;
@@ -1095,28 +1044,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
-
- intel_mark_busy(dev, obj);
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
- /* Update the LRU on the fence for the CPU access that's
- * about to occur.
- */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
- }
-
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
@@ -1127,11 +1063,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
- /* Maintain LRU order of "inactive" objects */
- if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1142,10 +1074,10 @@ unlock:
*/
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_sw_finish *args = data;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret = 0;
if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1155,17 +1087,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
/* Pinned buffers may be scanout, so flush the cache */
- if (to_intel_bo(obj)->pin_count)
+ if (obj->pin_count)
i915_gem_object_flush_cpu_write_domain(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1180,8 +1112,9 @@ unlock:
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
loff_t offset;
@@ -1190,10 +1123,15 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
+ if (obj->size > dev_priv->mm.gtt_mappable_end) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -E2BIG;
+ }
+
offset = args->offset;
down_write(&current->mm->mmap_sem);
@@ -1228,10 +1166,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
*/
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
@@ -1243,27 +1180,35 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
- if (!obj_priv->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret)
- goto unlock;
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (!obj->map_and_fenceable) {
+ ret = i915_gem_object_unbind(obj);
if (ret)
goto unlock;
}
-
- /* Need a new fence register? */
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj, true);
+ if (!obj->gtt_space) {
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
}
- if (i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
+
+ if (obj->tiling_mode == I915_TILING_NONE)
+ ret = i915_gem_object_put_fence(obj);
+ else
+ ret = i915_gem_object_get_fence(obj, NULL, true);
+ if (ret)
+ goto unlock;
+
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ obj->fault_mappable = true;
- pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+ pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
@@ -1272,11 +1217,12 @@ unlock:
mutex_unlock(&dev->struct_mutex);
switch (ret) {
+ case -EAGAIN:
+ set_need_resched();
case 0:
case -ERESTARTSYS:
return VM_FAULT_NOPAGE;
case -ENOMEM:
- case -EAGAIN:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
@@ -1295,37 +1241,39 @@ unlock:
* This routine allocates and attaches a fake offset for @obj.
*/
static int
-i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_map_list *list;
struct drm_local_map *map;
int ret = 0;
/* Set the object up for mmap'ing */
- list = &obj->map_list;
+ list = &obj->base.map_list;
list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
if (!list->map)
return -ENOMEM;
map = list->map;
map->type = _DRM_GEM;
- map->size = obj->size;
+ map->size = obj->base.size;
map->handle = obj;
/* Get a DRM GEM mmap offset allocated... */
list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, 0);
+ obj->base.size / PAGE_SIZE,
+ 0, 0);
if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+ DRM_ERROR("failed to allocate offset for bo %d\n",
+ obj->base.name);
ret = -ENOSPC;
goto out_free_list;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->size / PAGE_SIZE, 0);
+ obj->base.size / PAGE_SIZE,
+ 0);
if (!list->file_offset_node) {
ret = -ENOMEM;
goto out_free_list;
@@ -1338,16 +1286,13 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
goto out_free_mm;
}
- /* By now we should be all set, any drm_mmap request on the offset
- * below will get to our mmap & fault handler */
- obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
-
return 0;
out_free_mm:
drm_mm_put_block(list->file_offset_node);
out_free_list:
kfree(list->map);
+ list->map = NULL;
return ret;
}
@@ -1367,38 +1312,51 @@ out_free_list:
* fixup by i915_gem_fault().
*/
void
-i915_gem_release_mmap(struct drm_gem_object *obj)
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ if (!obj->fault_mappable)
+ return;
- if (dev->dev_mapping)
- unmap_mapping_range(dev->dev_mapping,
- obj_priv->mmap_offset, obj->size, 1);
+ unmap_mapping_range(obj->base.dev->dev_mapping,
+ (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+ obj->base.size, 1);
+
+ obj->fault_mappable = false;
}
static void
-i915_gem_free_mmap_offset(struct drm_gem_object *obj)
+i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
+ struct drm_map_list *list = &obj->base.map_list;
- list = &obj->map_list;
drm_ht_remove_item(&mm->offset_hash, &list->hash);
+ drm_mm_put_block(list->file_offset_node);
+ kfree(list->map);
+ list->map = NULL;
+}
- if (list->file_offset_node) {
- drm_mm_put_block(list->file_offset_node);
- list->file_offset_node = NULL;
- }
+static uint32_t
+i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ uint32_t size;
- if (list->map) {
- kfree(list->map);
- list->map = NULL;
- }
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ obj->tiling_mode == I915_TILING_NONE)
+ return obj->base.size;
+
+ /* Previous chips need a power-of-two fence region when tiling */
+ if (INTEL_INFO(dev)->gen == 3)
+ size = 1024*1024;
+ else
+ size = 512*1024;
+
+ while (size < obj->base.size)
+ size <<= 1;
- obj_priv->mmap_offset = 0;
+ return size;
}
/**
@@ -1406,42 +1364,68 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
* @obj: object to check
*
* Return the required GTT alignment for an object, taking into account
- * potential fence register mapping if needed.
+ * potential fence register mapping.
*/
static uint32_t
-i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int start, i;
+ struct drm_device *dev = obj->base.dev;
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ obj->tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- if (INTEL_INFO(dev)->gen == 3)
- start = 1024*1024;
- else
- start = 512*1024;
+ return i915_gem_get_gtt_size(obj);
+}
+
+/**
+ * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
+ * unfenced object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, only taking into account
+ * unfenced tiled surface requirements.
+ */
+static uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ int tile_height;
- for (i = start; i < obj->size; i <<= 1)
- ;
+ /*
+ * Minimum alignment is 4k (GTT page size) for sane hw.
+ */
+ if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
+ obj->tiling_mode == I915_TILING_NONE)
+ return 4096;
- return i;
+ /*
+ * Older chips need unfenced tiled buffers to be aligned to the left
+ * edge of an even tile row (where tile rows are counted as if the bo is
+ * placed in a fenced gtt region).
+ */
+ if (IS_GEN2(dev) ||
+ (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ tile_height = 32;
+ else
+ tile_height = 8;
+
+ return tile_height * obj->stride * 2;
}
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
- * @file_priv: GEM object info
+ * @file: GEM object info
*
* Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things
@@ -1454,11 +1438,11 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
*/
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap_gtt *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1468,130 +1452,196 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+ ret = -E2BIG;
+ goto unlock;
+ }
+
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
ret = -EINVAL;
goto out;
}
- if (!obj_priv->mmap_offset) {
+ if (!obj->base.map_list.map) {
ret = i915_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
- args->offset = obj_priv->mmap_offset;
-
- /*
- * Pull it into the GTT so that we have a page list (makes the
- * initial fault faster and any subsequent flushing possible).
- */
- if (!obj_priv->agp_mem) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret)
- goto out;
- }
+ args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+ gfp_t gfpmask)
+{
+ int page_count, i;
+ struct address_space *mapping;
+ struct inode *inode;
+ struct page *page;
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ */
+ page_count = obj->base.size / PAGE_SIZE;
+ BUG_ON(obj->pages != NULL);
+ obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
+ if (obj->pages == NULL)
+ return -ENOMEM;
+
+ inode = obj->base.filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+ for (i = 0; i < page_count; i++) {
+ page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER |
+ __GFP_COLD |
+ __GFP_RECLAIMABLE |
+ gfpmask);
+ if (IS_ERR(page))
+ goto err_pages;
+
+ obj->pages[i] = page;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_do_bit_17_swizzle(obj);
+
+ return 0;
+
+err_pages:
+ while (i--)
+ page_cache_release(obj->pages[i]);
+
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return PTR_ERR(page);
+}
+
static void
-i915_gem_object_put_pages(struct drm_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size / PAGE_SIZE;
+ int page_count = obj->base.size / PAGE_SIZE;
int i;
- BUG_ON(obj_priv->pages_refcount == 0);
- BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
-
- if (--obj_priv->pages_refcount != 0)
- return;
+ BUG_ON(obj->madv == __I915_MADV_PURGED);
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
i915_gem_object_save_bit_17_swizzle(obj);
- if (obj_priv->madv == I915_MADV_DONTNEED)
- obj_priv->dirty = 0;
+ if (obj->madv == I915_MADV_DONTNEED)
+ obj->dirty = 0;
for (i = 0; i < page_count; i++) {
- if (obj_priv->dirty)
- set_page_dirty(obj_priv->pages[i]);
+ if (obj->dirty)
+ set_page_dirty(obj->pages[i]);
- if (obj_priv->madv == I915_MADV_WILLNEED)
- mark_page_accessed(obj_priv->pages[i]);
+ if (obj->madv == I915_MADV_WILLNEED)
+ mark_page_accessed(obj->pages[i]);
- page_cache_release(obj_priv->pages[i]);
+ page_cache_release(obj->pages[i]);
}
- obj_priv->dirty = 0;
+ obj->dirty = 0;
- drm_free_large(obj_priv->pages);
- obj_priv->pages = NULL;
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
}
-static uint32_t
-i915_gem_next_request_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- ring->outstanding_lazy_request = true;
- return dev_priv->next_seqno;
-}
-
-static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj,
- struct intel_ring_buffer *ring)
+void
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ u32 seqno)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
BUG_ON(ring == NULL);
- obj_priv->ring = ring;
+ obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
- if (!obj_priv->active) {
- drm_gem_object_reference(obj);
- obj_priv->active = 1;
+ if (!obj->active) {
+ drm_gem_object_reference(&obj->base);
+ obj->active = 1;
}
/* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
- list_move_tail(&obj_priv->ring_list, &ring->active_list);
- obj_priv->last_rendering_seqno = seqno;
+ list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+ list_move_tail(&obj->ring_list, &ring->active_list);
+
+ obj->last_rendering_seqno = seqno;
+ if (obj->fenced_gpu_access) {
+ struct drm_i915_fence_reg *reg;
+
+ BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
+
+ obj->last_fenced_seqno = seqno;
+ obj->last_fenced_ring = ring;
+
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ }
}
static void
-i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ list_del_init(&obj->ring_list);
+ obj->last_rendering_seqno = 0;
+}
+
+static void
+i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- BUG_ON(!obj_priv->active);
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
- list_del_init(&obj_priv->ring_list);
- obj_priv->last_rendering_seqno = 0;
+ BUG_ON(!obj->active);
+ list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
+
+ i915_gem_object_move_off_active(obj);
+}
+
+static void
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (obj->pin_count != 0)
+ list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
+ else
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ BUG_ON(!list_empty(&obj->gpu_write_list));
+ BUG_ON(!obj->active);
+ obj->ring = NULL;
+
+ i915_gem_object_move_off_active(obj);
+ obj->fenced_gpu_access = false;
+
+ obj->active = 0;
+ obj->pending_gpu_write = false;
+ drm_gem_object_unreference(&obj->base);
+
+ WARN_ON(i915_verify_lists(dev));
}
/* Immediately discard the backing storage */
static void
-i915_gem_object_truncate(struct drm_gem_object *obj)
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
/* Our goal here is to return as much of the memory as
@@ -1600,42 +1650,18 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
* backing pages, *now*. Here we mirror the actions taken
* when by shmem_delete_inode() to release the backing store.
*/
- inode = obj->filp->f_path.dentry->d_inode;
+ inode = obj->base.filp->f_path.dentry->d_inode;
truncate_inode_pages(inode->i_mapping, 0);
if (inode->i_op->truncate_range)
inode->i_op->truncate_range(inode, 0, (loff_t)-1);
- obj_priv->madv = __I915_MADV_PURGED;
+ obj->madv = __I915_MADV_PURGED;
}
static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{
- return obj_priv->madv == I915_MADV_DONTNEED;
-}
-
-static void
-i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (obj_priv->pin_count != 0)
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
- else
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
- list_del_init(&obj_priv->ring_list);
-
- BUG_ON(!list_empty(&obj_priv->gpu_write_list));
-
- obj_priv->last_rendering_seqno = 0;
- obj_priv->ring = NULL;
- if (obj_priv->active) {
- obj_priv->active = 0;
- drm_gem_object_unreference(obj);
- }
- WARN_ON(i915_verify_lists(dev));
+ return obj->madv == I915_MADV_DONTNEED;
}
static void
@@ -1643,37 +1669,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
uint32_t flush_domains,
struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv, *next;
+ struct drm_i915_gem_object *obj, *next;
- list_for_each_entry_safe(obj_priv, next,
+ list_for_each_entry_safe(obj, next,
&ring->gpu_write_list,
gpu_write_list) {
- struct drm_gem_object *obj = &obj_priv->base;
+ if (obj->base.write_domain & flush_domains) {
+ uint32_t old_write_domain = obj->base.write_domain;
- if (obj->write_domain & flush_domains) {
- uint32_t old_write_domain = obj->write_domain;
-
- obj->write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, ring);
-
- /* update the fence lru list */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
- }
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_active(obj, ring,
+ i915_gem_next_request_seqno(dev, ring));
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
}
}
-uint32_t
+int
i915_add_request(struct drm_device *dev,
struct drm_file *file,
struct drm_i915_gem_request *request,
@@ -1683,17 +1699,17 @@ i915_add_request(struct drm_device *dev,
struct drm_i915_file_private *file_priv = NULL;
uint32_t seqno;
int was_empty;
+ int ret;
+
+ BUG_ON(request == NULL);
if (file != NULL)
file_priv = file->driver_priv;
- if (request == NULL) {
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return 0;
- }
+ ret = ring->add_request(ring, &seqno);
+ if (ret)
+ return ret;
- seqno = ring->add_request(dev, ring, 0);
ring->outstanding_lazy_request = false;
request->seqno = seqno;
@@ -1717,26 +1733,7 @@ i915_add_request(struct drm_device *dev,
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
}
- return seqno;
-}
-
-/**
- * Command execution barrier
- *
- * Ensures that all commands in the ring are finished
- * before signalling the CPU
- */
-static void
-i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
-{
- uint32_t flush_domains = 0;
-
- /* The sampler always gets flushed on i965 (sigh) */
- if (INTEL_INFO(dev)->gen >= 4)
- flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-
- ring->flush(dev, ring,
- I915_GEM_DOMAIN_COMMAND, flush_domains);
+ return 0;
}
static inline void
@@ -1769,62 +1766,76 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
}
while (!list_empty(&ring->active_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
- obj_priv->base.write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_inactive(&obj_priv->base);
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_inactive(obj);
+ }
+}
+
+static void i915_gem_reset_fences(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ struct drm_i915_gem_object *obj = reg->obj;
+
+ if (!obj)
+ continue;
+
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ reg->obj->fence_reg = I915_FENCE_REG_NONE;
+ reg->obj->fenced_gpu_access = false;
+ reg->obj->last_fenced_seqno = 0;
+ reg->obj->last_fenced_ring = NULL;
+ i915_gem_clear_fence_reg(dev, reg);
}
}
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int i;
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
/* Remove anything from the flushing lists. The GPU cache is likely
* to be lost on reset along with the data, so simply move the
* lost bo to the inactive list.
*/
while (!list_empty(&dev_priv->mm.flushing_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- mm_list);
+ obj= list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ mm_list);
- obj_priv->base.write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_inactive(&obj_priv->base);
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_inactive(obj);
}
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
- list_for_each_entry(obj_priv,
+ list_for_each_entry(obj,
&dev_priv->mm.inactive_list,
mm_list)
{
- obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
}
/* The fence registers are invalidated so clear them out */
- for (i = 0; i < 16; i++) {
- struct drm_i915_fence_reg *reg;
-
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- continue;
-
- i915_gem_clear_fence_reg(reg->obj);
- }
+ i915_gem_reset_fences(dev);
}
/**
@@ -1836,6 +1847,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
+ int i;
if (!ring->status_page.page_addr ||
list_empty(&ring->request_list))
@@ -1843,7 +1855,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
WARN_ON(i915_verify_lists(dev));
- seqno = ring->get_seqno(dev, ring);
+ seqno = ring->get_seqno(ring);
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (seqno >= ring->sync_seqno[i])
+ ring->sync_seqno[i] = 0;
+
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -1865,18 +1882,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
while (!list_empty(&ring->active_list)) {
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
+ obj= list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
- if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+ if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
break;
- obj = &obj_priv->base;
- if (obj->write_domain != 0)
+ if (obj->base.write_domain != 0)
i915_gem_object_move_to_flushing(obj);
else
i915_gem_object_move_to_inactive(obj);
@@ -1884,7 +1899,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
- ring->user_irq_put(dev, ring);
+ ring->irq_put(ring);
dev_priv->trace_irq_seqno = 0;
}
@@ -1895,24 +1910,24 @@ void
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
if (!list_empty(&dev_priv->mm.deferred_free_list)) {
- struct drm_i915_gem_object *obj_priv, *tmp;
+ struct drm_i915_gem_object *obj, *next;
/* We must be careful that during unbind() we do not
* accidentally infinitely recurse into retire requests.
* Currently:
* retire -> free -> unbind -> wait -> retire_ring
*/
- list_for_each_entry_safe(obj_priv, tmp,
+ list_for_each_entry_safe(obj, next,
&dev_priv->mm.deferred_free_list,
mm_list)
- i915_gem_free_object_tail(&obj_priv->base);
+ i915_gem_free_object_tail(obj);
}
- i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
- i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
- i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
}
static void
@@ -1934,9 +1949,9 @@ i915_gem_retire_work_handler(struct work_struct *work)
i915_gem_retire_requests(dev);
if (!dev_priv->mm.suspended &&
- (!list_empty(&dev_priv->render_ring.request_list) ||
- !list_empty(&dev_priv->bsd_ring.request_list) ||
- !list_empty(&dev_priv->blt_ring.request_list)))
+ (!list_empty(&dev_priv->ring[RCS].request_list) ||
+ !list_empty(&dev_priv->ring[VCS].request_list) ||
+ !list_empty(&dev_priv->ring[BCS].request_list)))
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
@@ -1954,14 +1969,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
- if (ring->outstanding_lazy_request) {
- seqno = i915_add_request(dev, NULL, NULL, ring);
- if (seqno == 0)
+ if (seqno == ring->outstanding_lazy_request) {
+ struct drm_i915_gem_request *request;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
return -ENOMEM;
+
+ ret = i915_add_request(dev, NULL, request, ring);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ seqno = request->seqno;
}
- BUG_ON(seqno == dev_priv->next_seqno);
- if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
@@ -1975,21 +1999,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
trace_i915_gem_request_wait_begin(dev, seqno);
- ring->waiting_gem_seqno = seqno;
- ring->user_irq_get(dev, ring);
- if (interruptible)
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(
- ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- else
- wait_event(ring->irq_queue,
- i915_seqno_passed(
- ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
+ ring->waiting_seqno = seqno;
+ if (ring->irq_get(ring)) {
+ if (interruptible)
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ else
+ wait_event(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
- ring->user_irq_put(dev, ring);
- ring->waiting_gem_seqno = 0;
+ ring->irq_put(ring);
+ } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000))
+ ret = -EBUSY;
+ ring->waiting_seqno = 0;
trace_i915_gem_request_wait_end(dev, seqno);
}
@@ -1998,7 +2024,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
- __func__, ret, seqno, ring->get_seqno(dev, ring),
+ __func__, ret, seqno, ring->get_seqno(ring),
dev_priv->next_seqno);
/* Directly dispatch request retiring. While we have the work queue
@@ -2023,70 +2049,30 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
return i915_do_wait_request(dev, seqno, 1, ring);
}
-static void
-i915_gem_flush_ring(struct drm_device *dev,
- struct drm_file *file_priv,
- struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- ring->flush(dev, ring, invalidate_domains, flush_domains);
- i915_gem_process_flushing_list(dev, flush_domains, ring);
-}
-
-static void
-i915_gem_flush(struct drm_device *dev,
- struct drm_file *file_priv,
- uint32_t invalidate_domains,
- uint32_t flush_domains,
- uint32_t flush_rings)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- drm_agp_chipset_flush(dev);
-
- if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
- if (flush_rings & RING_RENDER)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->render_ring,
- invalidate_domains, flush_domains);
- if (flush_rings & RING_BSD)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->bsd_ring,
- invalidate_domains, flush_domains);
- if (flush_rings & RING_BLT)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->blt_ring,
- invalidate_domains, flush_domains);
- }
-}
-
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
-static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool interruptible)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
int ret;
/* This function only exists to support waiting for existing rendering,
* not for emitting required flushes.
*/
- BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
+ BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
- if (obj_priv->active) {
+ if (obj->active) {
ret = i915_do_wait_request(dev,
- obj_priv->last_rendering_seqno,
+ obj->last_rendering_seqno,
interruptible,
- obj_priv->ring);
+ obj->ring);
if (ret)
return ret;
}
@@ -2098,17 +2084,14 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj,
* Unbinds an object from the GTT aperture.
*/
int
-i915_gem_object_unbind(struct drm_gem_object *obj)
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0;
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return 0;
- if (obj_priv->pin_count != 0) {
+ if (obj->pin_count != 0) {
DRM_ERROR("Attempting to unbind pinned buffer\n");
return -EINVAL;
}
@@ -2131,27 +2114,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
*/
if (ret) {
i915_gem_clflush_object(obj);
- obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
/* release the fence reg _after_ flushing */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
- drm_unbind_agp(obj_priv->agp_mem);
- drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+ ret = i915_gem_object_put_fence(obj);
+ if (ret == -ERESTARTSYS)
+ return ret;
- i915_gem_object_put_pages(obj);
- BUG_ON(obj_priv->pages_refcount);
+ i915_gem_gtt_unbind_object(obj);
+ i915_gem_object_put_pages_gtt(obj);
- i915_gem_info_remove_gtt(dev_priv, obj->size);
- list_del_init(&obj_priv->mm_list);
+ list_del_init(&obj->gtt_list);
+ list_del_init(&obj->mm_list);
+ /* Avoid an unnecessary call to unbind on rebind. */
+ obj->map_and_fenceable = true;
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- obj_priv->gtt_offset = 0;
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+ obj->gtt_offset = 0;
- if (i915_gem_object_is_purgeable(obj_priv))
+ if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
trace_i915_gem_object_unbind(obj);
@@ -2159,14 +2142,25 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
+void
+i915_gem_flush_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ ring->flush(ring, invalidate_domains, flush_domains);
+ i915_gem_process_flushing_list(dev, flush_domains, ring);
+}
+
static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0;
- i915_gem_flush_ring(dev, NULL, ring,
- I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (!list_empty(&ring->gpu_write_list))
+ i915_gem_flush_ring(dev, ring,
+ I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring),
ring);
@@ -2177,7 +2171,7 @@ i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty;
- int ret;
+ int ret, i;
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
@@ -2185,258 +2179,296 @@ i915_gpu_idle(struct drm_device *dev)
return 0;
/* Flush everything onto the inactive list. */
- ret = i915_ring_idle(dev, &dev_priv->render_ring);
- if (ret)
- return ret;
-
- ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
- if (ret)
- return ret;
-
- ret = i915_ring_idle(dev, &dev_priv->blt_ring);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
-{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count, i;
- struct address_space *mapping;
- struct inode *inode;
- struct page *page;
-
- BUG_ON(obj_priv->pages_refcount
- == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
-
- if (obj_priv->pages_refcount++ != 0)
- return 0;
-
- /* Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- */
- page_count = obj->size / PAGE_SIZE;
- BUG_ON(obj_priv->pages != NULL);
- obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
- if (obj_priv->pages == NULL) {
- obj_priv->pages_refcount--;
- return -ENOMEM;
- }
-
- inode = obj->filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
- for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_RECLAIMABLE |
- gfpmask);
- if (IS_ERR(page))
- goto err_pages;
-
- obj_priv->pages[i] = page;
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+ if (ret)
+ return ret;
}
- if (obj_priv->tiling_mode != I915_TILING_NONE)
- i915_gem_object_do_bit_17_swizzle(obj);
-
return 0;
-
-err_pages:
- while (i--)
- page_cache_release(obj_priv->pages[i]);
-
- drm_free_large(obj_priv->pages);
- obj_priv->pages = NULL;
- obj_priv->pages_refcount--;
- return PTR_ERR(page);
}
-static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
- 0xfffff000) << 32;
- val |= obj_priv->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) <<
SANDYBRIDGE_FENCE_PITCH_SHIFT;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
+ intel_ring_emit(pipelined, (u32)val);
+ intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
+ intel_ring_emit(pipelined, (u32)(val >> 32));
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+
+ return 0;
}
-static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
- val |= obj_priv->gtt_offset & 0xfffff000;
- val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
+ intel_ring_emit(pipelined, (u32)val);
+ intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
+ intel_ring_emit(pipelined, (u32)(val >> 32));
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+
+ return 0;
}
-static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ u32 fence_reg, val, pitch_val;
int tile_width;
- uint32_t fence_reg, val;
- uint32_t pitch_val;
- if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
- __func__, obj_priv->gtt_offset, obj->size);
- return;
- }
+ if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ obj->gtt_offset, obj->map_and_fenceable, size))
+ return -EINVAL;
- if (obj_priv->tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev))
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
else
tile_width = 512;
/* Note: pitch better be a power of two tile widths */
- pitch_val = obj_priv->stride / tile_width;
+ pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
- if (obj_priv->tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev))
- WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
- else
- WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
-
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(obj->size);
+ val |= I915_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
- if (regnum < 8)
- fence_reg = FENCE_REG_830_0 + (regnum * 4);
+ fence_reg = obj->fence_reg;
+ if (fence_reg < 8)
+ fence_reg = FENCE_REG_830_0 + fence_reg * 4;
else
- fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
- I915_WRITE(fence_reg, val);
+ fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(pipelined, fence_reg);
+ intel_ring_emit(pipelined, val);
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE(fence_reg, val);
+
+ return 0;
}
-static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint32_t val;
uint32_t pitch_val;
- uint32_t fence_size_bits;
- if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
- __func__, obj_priv->gtt_offset);
- return;
- }
+ if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+ obj->gtt_offset, size))
+ return -EINVAL;
- pitch_val = obj_priv->stride / 128;
+ pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
- WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
- WARN_ON(fence_size_bits & ~0x00000f00);
- val |= fence_size_bits;
+ val |= I830_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
- I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
+ intel_ring_emit(pipelined, val);
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+
+ return 0;
}
-static int i915_find_fence_reg(struct drm_device *dev,
- bool interruptible)
+static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ return i915_seqno_passed(ring->get_seqno(ring), seqno);
+}
+
+static int
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible)
+{
+ int ret;
+
+ if (obj->fenced_gpu_access) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(obj->base.dev,
+ obj->last_fenced_ring,
+ 0, obj->base.write_domain);
+
+ obj->fenced_gpu_access = false;
+ }
+
+ if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ obj->last_fenced_seqno)) {
+ ret = i915_do_wait_request(obj->base.dev,
+ obj->last_fenced_seqno,
+ interruptible,
+ obj->last_fenced_ring);
+ if (ret)
+ return ret;
+ }
+
+ obj->last_fenced_seqno = 0;
+ obj->last_fenced_ring = NULL;
+ }
+
+ return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ ret = i915_gem_object_flush_fence(obj, NULL, true);
+ if (ret)
+ return ret;
+
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ i915_gem_clear_fence_reg(obj->base.dev,
+ &dev_priv->fence_regs[obj->fence_reg]);
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ }
+
+ return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_fence_reg *reg = NULL;
- struct drm_i915_gem_object *obj_priv = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_gem_object *obj = NULL;
- int i, avail, ret;
+ struct drm_i915_fence_reg *reg, *first, *avail;
+ int i;
/* First try to find a free reg */
- avail = 0;
+ avail = NULL;
for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
if (!reg->obj)
- return i;
+ return reg;
- obj_priv = to_intel_bo(reg->obj);
- if (!obj_priv->pin_count)
- avail++;
+ if (!reg->obj->pin_count)
+ avail = reg;
}
- if (avail == 0)
- return -ENOSPC;
+ if (avail == NULL)
+ return NULL;
/* None available, try to steal one or wait for a user to finish */
- i = I915_FENCE_REG_NONE;
- list_for_each_entry(reg, &dev_priv->mm.fence_list,
- lru_list) {
- obj = reg->obj;
- obj_priv = to_intel_bo(obj);
-
- if (obj_priv->pin_count)
+ avail = first = NULL;
+ list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+ if (reg->obj->pin_count)
continue;
- /* found one! */
- i = obj_priv->fence_reg;
- break;
- }
+ if (first == NULL)
+ first = reg;
- BUG_ON(i == I915_FENCE_REG_NONE);
+ if (!pipelined ||
+ !reg->obj->last_fenced_ring ||
+ reg->obj->last_fenced_ring == pipelined) {
+ avail = reg;
+ break;
+ }
+ }
- /* We only have a reference on obj from the active list. put_fence_reg
- * might drop that one, causing a use-after-free in it. So hold a
- * private reference to obj like the other callers of put_fence_reg
- * (set_tiling ioctl) do. */
- drm_gem_object_reference(obj);
- ret = i915_gem_object_put_fence_reg(obj, interruptible);
- drm_gem_object_unreference(obj);
- if (ret != 0)
- return ret;
+ if (avail == NULL)
+ avail = first;
- return i;
+ return avail;
}
/**
- * i915_gem_object_get_fence_reg - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up a fence reg for an object
* @obj: object to map through a fence reg
+ * @pipelined: ring on which to queue the change, or NULL for CPU access
+ * @interruptible: must we wait uninterruptibly for the register to retire?
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
@@ -2448,72 +2480,138 @@ static int i915_find_fence_reg(struct drm_device *dev,
* and tiling format.
*/
int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
- bool interruptible)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg = NULL;
+ struct drm_i915_fence_reg *reg;
int ret;
- /* Just update our place in the LRU if our fence is getting used. */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ /* XXX disable pipelining. There are bugs. Shocking. */
+ pipelined = NULL;
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+
+ if (!pipelined) {
+ if (reg->setup_seqno) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ reg->setup_seqno)) {
+ ret = i915_do_wait_request(obj->base.dev,
+ reg->setup_seqno,
+ interruptible,
+ obj->last_fenced_ring);
+ if (ret)
+ return ret;
+ }
+
+ reg->setup_seqno = 0;
+ }
+ } else if (obj->last_fenced_ring &&
+ obj->last_fenced_ring != pipelined) {
+ ret = i915_gem_object_flush_fence(obj,
+ pipelined,
+ interruptible);
+ if (ret)
+ return ret;
+ } else if (obj->tiling_changed) {
+ if (obj->fenced_gpu_access) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(obj->base.dev, obj->ring,
+ 0, obj->base.write_domain);
+
+ obj->fenced_gpu_access = false;
+ }
+ }
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+ BUG_ON(!pipelined && reg->setup_seqno);
+
+ if (obj->tiling_changed) {
+ if (pipelined) {
+ reg->setup_seqno =
+ i915_gem_next_request_seqno(dev, pipelined);
+ obj->last_fenced_seqno = reg->setup_seqno;
+ obj->last_fenced_ring = pipelined;
+ }
+ goto update;
+ }
+
return 0;
}
- switch (obj_priv->tiling_mode) {
- case I915_TILING_NONE:
- WARN(1, "allocating a fence for non-tiled object?\n");
- break;
- case I915_TILING_X:
- if (!obj_priv->stride)
- return -EINVAL;
- WARN((obj_priv->stride & (512 - 1)),
- "object 0x%08x is X tiled but has non-512B pitch\n",
- obj_priv->gtt_offset);
- break;
- case I915_TILING_Y:
- if (!obj_priv->stride)
- return -EINVAL;
- WARN((obj_priv->stride & (128 - 1)),
- "object 0x%08x is Y tiled but has non-128B pitch\n",
- obj_priv->gtt_offset);
- break;
- }
+ reg = i915_find_fence_reg(dev, pipelined);
+ if (reg == NULL)
+ return -ENOSPC;
- ret = i915_find_fence_reg(dev, interruptible);
- if (ret < 0)
+ ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+ if (ret)
return ret;
- obj_priv->fence_reg = ret;
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
+
+ drm_gem_object_reference(&old->base);
+
+ if (old->tiling_mode)
+ i915_gem_release_mmap(old);
+
+ ret = i915_gem_object_flush_fence(old,
+ pipelined,
+ interruptible);
+ if (ret) {
+ drm_gem_object_unreference(&old->base);
+ return ret;
+ }
+
+ if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
+ pipelined = NULL;
+
+ old->fence_reg = I915_FENCE_REG_NONE;
+ old->last_fenced_ring = pipelined;
+ old->last_fenced_seqno =
+ pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+
+ drm_gem_object_unreference(&old->base);
+ } else if (obj->last_fenced_seqno == 0)
+ pipelined = NULL;
reg->obj = obj;
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ obj->fence_reg = reg - dev_priv->fence_regs;
+ obj->last_fenced_ring = pipelined;
+ reg->setup_seqno =
+ pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+ obj->last_fenced_seqno = reg->setup_seqno;
+
+update:
+ obj->tiling_changed = false;
switch (INTEL_INFO(dev)->gen) {
case 6:
- sandybridge_write_fence_reg(reg);
+ ret = sandybridge_write_fence_reg(obj, pipelined);
break;
case 5:
case 4:
- i965_write_fence_reg(reg);
+ ret = i965_write_fence_reg(obj, pipelined);
break;
case 3:
- i915_write_fence_reg(reg);
+ ret = i915_write_fence_reg(obj, pipelined);
break;
case 2:
- i830_write_fence_reg(reg);
+ ret = i830_write_fence_reg(obj, pipelined);
break;
}
- trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
- obj_priv->tiling_mode);
-
- return 0;
+ return ret;
}
/**
@@ -2521,154 +2619,127 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
* @obj: object to clear
*
* Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
+ * data structures in dev_priv and obj.
*/
static void
-i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+i915_gem_clear_fence_reg(struct drm_device *dev,
+ struct drm_i915_fence_reg *reg)
{
- struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- uint32_t fence_reg;
+ uint32_t fence_reg = reg - dev_priv->fence_regs;
switch (INTEL_INFO(dev)->gen) {
case 6:
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
- (obj_priv->fence_reg * 8), 0);
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
break;
case 5:
case 4:
- I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
+ I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
break;
case 3:
- if (obj_priv->fence_reg >= 8)
- fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
+ if (fence_reg >= 8)
+ fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
else
case 2:
- fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+ fence_reg = FENCE_REG_830_0 + fence_reg * 4;
I915_WRITE(fence_reg, 0);
break;
}
- reg->obj = NULL;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
list_del_init(&reg->lru_list);
-}
-
-/**
- * i915_gem_object_put_fence_reg - waits on outstanding fenced access
- * to the buffer to finish, and then resets the fence register.
- * @obj: tiled object holding a fence register.
- * @bool: whether the wait upon the fence is interruptible
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
- */
-int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
- bool interruptible)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg;
-
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
- return 0;
-
- /* If we've changed tiling, GTT-mappings of the object
- * need to re-fault to ensure that the correct fence register
- * setup is in place.
- */
- i915_gem_release_mmap(obj);
-
- /* On the i915, GPU access to tiled buffers is via a fence,
- * therefore we must wait for any outstanding access to complete
- * before clearing the fence.
- */
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- if (reg->gpu) {
- int ret;
-
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- ret = i915_gem_object_wait_rendering(obj, interruptible);
- if (ret)
- return ret;
-
- reg->gpu = false;
- }
-
- i915_gem_object_flush_gtt_write_domain(obj);
- i915_gem_clear_fence_reg(obj);
-
- return 0;
+ reg->obj = NULL;
+ reg->setup_seqno = 0;
}
/**
* Finds free space in the GTT aperture and binds the object there.
*/
static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_mm_node *free_space;
- gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+ gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+ u32 size, fence_size, fence_alignment, unfenced_alignment;
+ bool mappable, fenceable;
int ret;
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
}
+ fence_size = i915_gem_get_gtt_size(obj);
+ fence_alignment = i915_gem_get_gtt_alignment(obj);
+ unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+
if (alignment == 0)
- alignment = i915_gem_get_gtt_alignment(obj);
- if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
+ alignment = map_and_fenceable ? fence_alignment :
+ unfenced_alignment;
+ if (map_and_fenceable && alignment & (fence_alignment - 1)) {
DRM_ERROR("Invalid object alignment requested %u\n", alignment);
return -EINVAL;
}
+ size = map_and_fenceable ? fence_size : obj->base.size;
+
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
- if (obj->size > dev_priv->mm.gtt_total) {
+ if (obj->base.size >
+ (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
search_free:
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- obj->size, alignment, 0);
- if (free_space != NULL)
- obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
- alignment);
- if (obj_priv->gtt_space == NULL) {
+ if (map_and_fenceable)
+ free_space =
+ drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0);
+ else
+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+ size, alignment, 0);
+
+ if (free_space != NULL) {
+ if (map_and_fenceable)
+ obj->gtt_space =
+ drm_mm_get_block_range_generic(free_space,
+ size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0);
+ else
+ obj->gtt_space =
+ drm_mm_get_block(free_space, size, alignment);
+ }
+ if (obj->gtt_space == NULL) {
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, size, alignment,
+ map_and_fenceable);
if (ret)
return ret;
goto search_free;
}
- ret = i915_gem_object_get_pages(obj, gfpmask);
+ ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
if (ret) {
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
- ret = i915_gem_evict_something(dev, obj->size,
- alignment);
+ ret = i915_gem_evict_something(dev, size,
+ alignment,
+ map_and_fenceable);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
@@ -2685,122 +2756,113 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
return ret;
}
- /* Create an AGP memory structure pointing at our pages, and bind it
- * into the GTT.
- */
- obj_priv->agp_mem = drm_agp_bind_pages(dev,
- obj_priv->pages,
- obj->size >> PAGE_SHIFT,
- obj_priv->gtt_space->start,
- obj_priv->agp_type);
- if (obj_priv->agp_mem == NULL) {
- i915_gem_object_put_pages(obj);
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
-
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_gtt_bind_object(obj);
+ if (ret) {
+ i915_gem_object_put_pages_gtt(obj);
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+
+ ret = i915_gem_evict_something(dev, size,
+ alignment, map_and_fenceable);
if (ret)
return ret;
goto search_free;
}
- /* keep track of bounds object by adding it to the inactive list */
- list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
- i915_gem_info_add_gtt(dev_priv, obj->size);
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+ list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+ obj->gtt_offset = obj->gtt_space->start;
- obj_priv->gtt_offset = obj_priv->gtt_space->start;
- trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+ fenceable =
+ obj->gtt_space->size == fence_size &&
+ (obj->gtt_space->start & (fence_alignment -1)) == 0;
+ mappable =
+ obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+
+ obj->map_and_fenceable = mappable && fenceable;
+
+ trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
return 0;
}
void
-i915_gem_clflush_object(struct drm_gem_object *obj)
+i915_gem_clflush_object(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
- if (obj_priv->pages == NULL)
+ if (obj->pages == NULL)
return;
trace_i915_gem_object_clflush(obj);
- drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
+ drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
}
/** Flushes any GPU write domain for the object if it's dirty. */
-static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+static void
+i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- uint32_t old_write_domain;
+ struct drm_device *dev = obj->base.dev;
- if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
+ if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ return;
/* Queue the GPU write cache flushing we need. */
- old_write_domain = obj->write_domain;
- i915_gem_flush_ring(dev, NULL,
- to_intel_bo(obj)->ring,
- 0, obj->write_domain);
- BUG_ON(obj->write_domain);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
-
- return 0;
+ i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
+ BUG_ON(obj->base.write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
{
uint32_t old_write_domain;
- if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+ if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
return;
/* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
- old_write_domain = obj->write_domain;
- obj->write_domain = 0;
+ i915_gem_release_mmap(obj);
+
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
/** Flushes the CPU write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
- if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- old_write_domain = obj->write_domain;
- obj->write_domain = 0;
+ intel_gtt_chipset_flush();
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
@@ -2811,37 +2873,36 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
* flushes to occur.
*/
int
-i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
- return ret;
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
+ i915_gem_object_flush_gpu_write_domain(obj);
+ if (obj->pending_gpu_write || write) {
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+ }
i915_gem_object_flush_cpu_write_domain(obj);
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
- obj->read_domains = I915_GEM_DOMAIN_GTT;
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj_priv->dirty = 1;
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->dirty = 1;
}
trace_i915_gem_object_change_domain(obj,
@@ -2856,23 +2917,20 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
* wait, as in modesetting process we're not supposed to be interrupted.
*/
int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
- bool pipelined)
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
+ i915_gem_object_flush_gpu_write_domain(obj);
/* Currently, we are always called from an non-interruptible context. */
- if (!pipelined) {
+ if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
@@ -2880,12 +2938,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
i915_gem_object_flush_cpu_write_domain(obj);
- old_read_domains = obj->read_domains;
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ old_read_domains = obj->base.read_domains;
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->write_domain);
+ obj->base.write_domain);
return 0;
}
@@ -2898,10 +2956,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
- i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
+ i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain);
- return i915_gem_object_wait_rendering(&obj->base, interruptible);
+ return i915_gem_object_wait_rendering(obj, interruptible);
}
/**
@@ -2911,14 +2969,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
* flushes to occur.
*/
static int
-i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
- return ret;
+ i915_gem_object_flush_gpu_write_domain(obj);
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
@@ -2930,27 +2986,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* Flush the CPU cache if it's still invalid. */
- if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
*/
if (write) {
- obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
trace_i915_gem_object_change_domain(obj,
@@ -2960,184 +3016,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
return 0;
}
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * wait_rendering (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
- struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t invalidate_domains = 0;
- uint32_t flush_domains = 0;
- uint32_t old_read_domains;
-
- intel_mark_busy(dev, obj);
-
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (obj->pending_write_domain == 0)
- obj->pending_read_domains |= obj->read_domains;
- else
- obj_priv->dirty = 1;
-
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->write_domain &&
- (obj->write_domain != obj->pending_read_domains ||
- obj_priv->ring != ring)) {
- flush_domains |= obj->write_domain;
- invalidate_domains |=
- obj->pending_read_domains & ~obj->write_domain;
- }
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
- i915_gem_clflush_object(obj);
-
- old_read_domains = obj->read_domains;
-
- /* The actual obj->write_domain will be updated with
- * pending_write_domain after we emit the accumulated flush for all
- * of our domain changes in execbuffers (which clears objects'
- * write_domains). So if we have a current write domain that we
- * aren't changing, set pending_write_domain to that.
- */
- if (flush_domains == 0 && obj->pending_write_domain == 0)
- obj->pending_write_domain = obj->write_domain;
- obj->read_domains = obj->pending_read_domains;
-
- dev->invalidate_domains |= invalidate_domains;
- dev->flush_domains |= flush_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- dev_priv->mm.flush_rings |= obj_priv->ring->id;
- if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- dev_priv->mm.flush_rings |= ring->id;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- obj->write_domain);
-}
-
/**
* Moves the object from a partially CPU read to a full one.
*
@@ -3145,30 +3023,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
* and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/
static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (!obj_priv->page_cpu_valid)
+ if (!obj->page_cpu_valid)
return;
/* If we're partially in the CPU read domain, finish moving it in.
*/
- if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
+ if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
int i;
- for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
- if (obj_priv->page_cpu_valid[i])
+ for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
+ if (obj->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->pages + i, 1);
+ drm_clflush_pages(obj->pages + i, 1);
}
}
/* Free the page_cpu_valid mappings which are now stale, whether
* or not we've got I915_GEM_DOMAIN_CPU.
*/
- kfree(obj_priv->page_cpu_valid);
- obj_priv->page_cpu_valid = NULL;
+ kfree(obj->page_cpu_valid);
+ obj->page_cpu_valid = NULL;
}
/**
@@ -3184,19 +3060,16 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
* flushes to occur.
*/
static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset, uint64_t size)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int i, ret;
- if (offset == 0 && size == obj->size)
+ if (offset == 0 && size == obj->base.size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret != 0)
- return ret;
+ i915_gem_object_flush_gpu_write_domain(obj);
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
@@ -3204,457 +3077,45 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */
- if (obj_priv->page_cpu_valid == NULL &&
- (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
+ if (obj->page_cpu_valid == NULL &&
+ (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
return 0;
/* Otherwise, create/clear the per-page CPU read domain flag if we're
* newly adding I915_GEM_DOMAIN_CPU
*/
- if (obj_priv->page_cpu_valid == NULL) {
- obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
- GFP_KERNEL);
- if (obj_priv->page_cpu_valid == NULL)
+ if (obj->page_cpu_valid == NULL) {
+ obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
+ GFP_KERNEL);
+ if (obj->page_cpu_valid == NULL)
return -ENOMEM;
- } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
- memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
+ } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
/* Flush the cache on any pages that are still invalid from the CPU's
* perspective.
*/
for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
i++) {
- if (obj_priv->page_cpu_valid[i])
+ if (obj->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->pages + i, 1);
+ drm_clflush_pages(obj->pages + i, 1);
- obj_priv->page_cpu_valid[i] = 1;
+ obj->page_cpu_valid[i] = 1;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
- old_read_domains = obj->read_domains;
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ old_read_domains = obj->base.read_domains;
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->write_domain);
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry,
- struct drm_i915_gem_relocation_entry *reloc)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_gem_object *target_obj;
- uint32_t target_offset;
- int ret = -EINVAL;
-
- target_obj = drm_gem_object_lookup(dev, file_priv,
- reloc->target_handle);
- if (target_obj == NULL)
- return -ENOENT;
-
- target_offset = to_intel_bo(target_obj)->gtt_offset;
-
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
-
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_offset == 0) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc->target_handle);
- goto err;
- }
-
- /* Validate that the target is in a valid r/w GPU domain */
- if (reloc->write_domain & (reloc->write_domain - 1)) {
- DRM_ERROR("reloc with multiple write domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->read_domains,
- reloc->write_domain);
- goto err;
- }
- if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
- reloc->read_domains & I915_GEM_DOMAIN_CPU) {
- DRM_ERROR("reloc with read/write CPU domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->read_domains,
- reloc->write_domain);
- goto err;
- }
- if (reloc->write_domain && target_obj->pending_write_domain &&
- reloc->write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- reloc->write_domain,
- target_obj->pending_write_domain);
- goto err;
- }
-
- target_obj->pending_read_domains |= reloc->read_domains;
- target_obj->pending_write_domain |= reloc->write_domain;
-
- /* If the relocation already has the right value in it, no
- * more work needs to be done.
- */
- if (target_offset == reloc->presumed_offset)
- goto out;
-
- /* Check that the relocation address is valid... */
- if (reloc->offset > obj->base.size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset,
- (int) obj->base.size);
- goto err;
- }
- if (reloc->offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
- goto err;
- }
-
- /* and points to somewhere within the target object. */
- if (reloc->delta >= target_obj->size) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->delta,
- (int) target_obj->size);
- goto err;
- }
-
- reloc->delta += target_offset;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
- uint32_t page_offset = reloc->offset & ~PAGE_MASK;
- char *vaddr;
-
- vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
- *(uint32_t *)(vaddr + page_offset) = reloc->delta;
- kunmap_atomic(vaddr);
- } else {
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t __iomem *reloc_entry;
- void __iomem *reloc_page;
-
- ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
- if (ret)
- goto err;
-
- /* Map the page containing the relocation we're going to perform. */
- reloc->offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc->offset & PAGE_MASK);
- reloc_entry = (uint32_t __iomem *)
- (reloc_page + (reloc->offset & ~PAGE_MASK));
- iowrite32(reloc->delta, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
- }
-
- /* and update the user's relocation entry */
- reloc->presumed_offset = target_offset;
-
-out:
- ret = 0;
-err:
- drm_gem_object_unreference(target_obj);
- return ret;
-}
-
-static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry)
-{
- struct drm_i915_gem_relocation_entry __user *user_relocs;
- int i, ret;
-
- user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry reloc;
-
- if (__copy_from_user_inatomic(&reloc,
- user_relocs+i,
- sizeof(reloc)))
- return -EFAULT;
-
- ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
- if (ret)
- return ret;
-
- if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
- &reloc.presumed_offset,
- sizeof(reloc.presumed_offset)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry,
- struct drm_i915_gem_relocation_entry *relocs)
-{
- int i, ret;
-
- for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
-{
- int i, ret;
-
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->base.pending_read_domains = 0;
- obj->base.pending_write_domain = 0;
- ret = i915_gem_execbuffer_relocate_object(obj, file,
- &exec_list[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_reserve(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret, i, retry;
-
- /* attempt to pin all of the buffers into the GTT */
- for (retry = 0; retry < 2; retry++) {
- ret = 0;
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
- struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
- bool need_fence =
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
-
- /* Check fence reg constraints and rebind if necessary */
- if (need_fence &&
- !i915_gem_object_fence_offset_ok(&obj->base,
- obj->tiling_mode)) {
- ret = i915_gem_object_unbind(&obj->base);
- if (ret)
- break;
- }
-
- ret = i915_gem_object_pin(&obj->base, entry->alignment);
- if (ret)
- break;
-
- /*
- * Pre-965 chips need a fence register set up in order
- * to properly handle blits to/from tiled surfaces.
- */
- if (need_fence) {
- ret = i915_gem_object_get_fence_reg(&obj->base, true);
- if (ret) {
- i915_gem_object_unpin(&obj->base);
- break;
- }
-
- dev_priv->fence_regs[obj->fence_reg].gpu = true;
- }
-
- entry->offset = obj->gtt_offset;
- }
-
- while (i--)
- i915_gem_object_unpin(object_list[i]);
-
- if (ret == 0)
- break;
-
- if (ret != -ENOSPC || retry)
- return ret;
-
- ret = i915_gem_evict_everything(dev);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
-{
- struct drm_i915_gem_relocation_entry *reloc;
- int i, total, ret;
-
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->in_execbuffer = false;
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- total = 0;
- for (i = 0; i < count; i++)
- total += exec_list[i].relocation_count;
-
- reloc = drm_malloc_ab(total, sizeof(*reloc));
- if (reloc == NULL) {
- mutex_lock(&dev->struct_mutex);
- return -ENOMEM;
- }
-
- total = 0;
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_relocation_entry __user *user_relocs;
-
- user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
-
- if (copy_from_user(reloc+total, user_relocs,
- exec_list[i].relocation_count *
- sizeof(*reloc))) {
- ret = -EFAULT;
- mutex_lock(&dev->struct_mutex);
- goto err;
- }
-
- total += exec_list[i].relocation_count;
- }
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- goto err;
- }
-
- ret = i915_gem_execbuffer_reserve(dev, file,
- object_list, exec_list,
- count);
- if (ret)
- goto err;
-
- total = 0;
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->base.pending_read_domains = 0;
- obj->base.pending_write_domain = 0;
- ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
- &exec_list[i],
- reloc + total);
- if (ret)
- goto err;
-
- total += exec_list[i].relocation_count;
- }
-
- /* Leave the user relocations as are, this is the painfully slow path,
- * and we want to avoid the complication of dropping the lock whilst
- * having buffers reserved in the aperture and so causing spurious
- * ENOSPC for random operations.
- */
-
-err:
- drm_free_large(reloc);
- return ret;
-}
-
-static int
-i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
- struct drm_file *file,
- struct intel_ring_buffer *ring,
- struct drm_gem_object **objects,
- int count)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret, i;
-
- /* Zero the global flush/invalidate flags. These
- * will be modified as new domains are computed
- * for each object
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- dev_priv->mm.flush_rings = 0;
- for (i = 0; i < count; i++)
- i915_gem_object_set_to_gpu_domain(objects[i], ring);
-
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev, file,
- dev->invalidate_domains,
- dev->flush_domains,
- dev_priv->mm.flush_rings);
- }
-
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
- /* XXX replace with semaphores */
- if (obj->ring && ring != obj->ring) {
- ret = i915_gem_object_wait_rendering(&obj->base, true);
- if (ret)
- return ret;
- }
- }
+ obj->base.write_domain);
return 0;
}
@@ -3694,599 +3155,129 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return 0;
ret = 0;
- if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
/* And wait for the seqno passing without holding any locks and
* causing extra latency for others. This is safe as the irq
* generation is designed to be run atomically and so is
* lockless.
*/
- ring->user_irq_get(dev, ring);
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- ring->user_irq_put(dev, ring);
-
- if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
- }
-
- if (ret == 0)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
-
- return ret;
-}
-
-static int
-i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
- uint64_t exec_offset)
-{
- uint32_t exec_start, exec_len;
-
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
-
- if ((exec_start | exec_len) & 0x7)
- return -EINVAL;
-
- if (!exec_start)
- return -EINVAL;
-
- return 0;
-}
-
-static int
-validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
- int count)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
- int length; /* limited by fault_in_pages_readable() */
-
- /* First check for malicious input causing overflow */
- if (exec[i].relocation_count >
- INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
- return -EINVAL;
-
- length = exec[i].relocation_count *
- sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(VERIFY_READ, ptr, length))
- return -EFAULT;
-
- /* we may also need to update the presumed offsets */
- if (!access_ok(VERIFY_WRITE, ptr, length))
- return -EFAULT;
-
- if (fault_in_pages_readable(ptr, length))
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int
-i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec_list)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object **object_list = NULL;
- struct drm_gem_object *batch_obj;
- struct drm_i915_gem_object *obj_priv;
- struct drm_clip_rect *cliprects = NULL;
- struct drm_i915_gem_request *request = NULL;
- int ret, i, flips;
- uint64_t exec_offset;
-
- struct intel_ring_buffer *ring = NULL;
-
- ret = i915_gem_check_is_wedged(dev);
- if (ret)
- return ret;
-
- ret = validate_exec_list(exec_list, args->buffer_count);
- if (ret)
- return ret;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
- switch (args->flags & I915_EXEC_RING_MASK) {
- case I915_EXEC_DEFAULT:
- case I915_EXEC_RENDER:
- ring = &dev_priv->render_ring;
- break;
- case I915_EXEC_BSD:
- if (!HAS_BSD(dev)) {
- DRM_ERROR("execbuf with invalid ring (BSD)\n");
- return -EINVAL;
- }
- ring = &dev_priv->bsd_ring;
- break;
- case I915_EXEC_BLT:
- if (!HAS_BLT(dev)) {
- DRM_ERROR("execbuf with invalid ring (BLT)\n");
- return -EINVAL;
- }
- ring = &dev_priv->blt_ring;
- break;
- default:
- DRM_ERROR("execbuf with unknown ring: %d\n",
- (int)(args->flags & I915_EXEC_RING_MASK));
- return -EINVAL;
- }
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
- object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
- if (object_list == NULL) {
- DRM_ERROR("Failed to allocate object list for %d buffers\n",
- args->buffer_count);
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- if (args->num_cliprects != 0) {
- cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- ret = copy_from_user(cliprects,
- (struct drm_clip_rect __user *)
- (uintptr_t) args->cliprects_ptr,
- sizeof(*cliprects) * args->num_cliprects);
- if (ret != 0) {
- DRM_ERROR("copy %d cliprects failed: %d\n",
- args->num_cliprects, ret);
- ret = -EFAULT;
- goto pre_mutex_err;
- }
- }
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto pre_mutex_err;
-
- if (dev_priv->mm.suspended) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EBUSY;
- goto pre_mutex_err;
- }
-
- /* Look up object handles */
- for (i = 0; i < args->buffer_count; i++) {
- object_list[i] = drm_gem_object_lookup(dev, file,
- exec_list[i].handle);
- if (object_list[i] == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
- exec_list[i].handle, i);
- /* prevent error path from reading uninitialized data */
- args->buffer_count = i + 1;
- ret = -ENOENT;
- goto err;
- }
-
- obj_priv = to_intel_bo(object_list[i]);
- if (obj_priv->in_execbuffer) {
- DRM_ERROR("Object %p appears more than once in object list\n",
- object_list[i]);
- /* prevent error path from reading uninitialized data */
- args->buffer_count = i + 1;
- ret = -EINVAL;
- goto err;
- }
- obj_priv->in_execbuffer = true;
- }
-
- /* Move the objects en-masse into the GTT, evicting if necessary. */
- ret = i915_gem_execbuffer_reserve(dev, file,
- object_list, exec_list,
- args->buffer_count);
- if (ret)
- goto err;
-
- /* The objects are in their final locations, apply the relocations. */
- ret = i915_gem_execbuffer_relocate(dev, file,
- object_list, exec_list,
- args->buffer_count);
- if (ret) {
- if (ret == -EFAULT) {
- ret = i915_gem_execbuffer_relocate_slow(dev, file,
- object_list,
- exec_list,
- args->buffer_count);
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- }
- if (ret)
- goto err;
- }
-
- /* Set the pending read domains for the batch buffer to COMMAND */
- batch_obj = object_list[args->buffer_count-1];
- if (batch_obj->pending_write_domain) {
- DRM_ERROR("Attempting to use self-modifying batch buffer\n");
- ret = -EINVAL;
- goto err;
- }
- batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
-
- /* Sanity check the batch buffer */
- exec_offset = to_intel_bo(batch_obj)->gtt_offset;
- ret = i915_gem_check_execbuffer(args, exec_offset);
- if (ret != 0) {
- DRM_ERROR("execbuf with invalid offset/length\n");
- goto err;
- }
-
- ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
- object_list, args->buffer_count);
- if (ret)
- goto err;
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
- uint32_t old_write_domain = obj->write_domain;
- obj->write_domain = obj->pending_write_domain;
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
-
-#if WATCH_COHERENCY
- for (i = 0; i < args->buffer_count; i++) {
- i915_gem_object_check_coherency(object_list[i],
- exec_list[i].handle);
- }
-#endif
-
-#if WATCH_EXEC
- i915_gem_dump_object(batch_obj,
- args->batch_len,
- __func__,
- ~0);
-#endif
-
- /* Check for any pending flips. As we only maintain a flip queue depth
- * of 1, we can simply insert a WAIT for the next display flip prior
- * to executing the batch and avoid stalling the CPU.
- */
- flips = 0;
- for (i = 0; i < args->buffer_count; i++) {
- if (object_list[i]->write_domain)
- flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
- }
- if (flips) {
- int plane, flip_mask;
-
- for (plane = 0; flips >> plane; plane++) {
- if (((flips >> plane) & 1) == 0)
- continue;
-
- if (plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring,
- MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
- }
- }
-
- /* Exec the batchbuffer */
- ret = ring->dispatch_gem_execbuffer(dev, ring, args,
- cliprects, exec_offset);
- if (ret) {
- DRM_ERROR("dispatch failed %d\n", ret);
- goto err;
- }
-
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires
- */
- i915_retire_commands(dev, ring);
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- i915_gem_object_move_to_active(obj, ring);
- if (obj->write_domain)
- list_move_tail(&to_intel_bo(obj)->gpu_write_list,
- &ring->gpu_write_list);
- }
-
- i915_add_request(dev, file, request, ring);
- request = NULL;
-
-err:
- for (i = 0; i < args->buffer_count; i++) {
- if (object_list[i]) {
- obj_priv = to_intel_bo(object_list[i]);
- obj_priv->in_execbuffer = false;
- }
- drm_gem_object_unreference(object_list[i]);
- }
-
- mutex_unlock(&dev->struct_mutex);
-
-pre_mutex_err:
- drm_free_large(object_list);
- kfree(cliprects);
- kfree(request);
-
- return ret;
-}
-
-/*
- * Legacy execbuffer just creates an exec2 list from the original exec object
- * list array and passes it to the real function.
- */
-int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_execbuffer2 exec2;
- struct drm_i915_gem_exec_object *exec_list = NULL;
- struct drm_i915_gem_exec_object2 *exec2_list = NULL;
- int ret, i;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
-
- /* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
- if (exec_list == NULL || exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return -ENOMEM;
- }
- ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return -EFAULT;
- }
-
- for (i = 0; i < args->buffer_count; i++) {
- exec2_list[i].handle = exec_list[i].handle;
- exec2_list[i].relocation_count = exec_list[i].relocation_count;
- exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
- exec2_list[i].alignment = exec_list[i].alignment;
- exec2_list[i].offset = exec_list[i].offset;
- if (INTEL_INFO(dev)->gen < 4)
- exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
- else
- exec2_list[i].flags = 0;
- }
+ if (ring->irq_get(ring)) {
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ ring->irq_put(ring);
- exec2.buffers_ptr = args->buffers_ptr;
- exec2.buffer_count = args->buffer_count;
- exec2.batch_start_offset = args->batch_start_offset;
- exec2.batch_len = args->batch_len;
- exec2.DR1 = args->DR1;
- exec2.DR4 = args->DR4;
- exec2.num_cliprects = args->num_cliprects;
- exec2.cliprects_ptr = args->cliprects_ptr;
- exec2.flags = I915_EXEC_RENDER;
-
- ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- for (i = 0; i < args->buffer_count; i++)
- exec_list[i].offset = exec2_list[i].offset;
- /* ... and back out to userspace */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
}
}
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return ret;
-}
-
-int
-i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_execbuffer2 *args = data;
- struct drm_i915_gem_exec_object2 *exec2_list = NULL;
- int ret;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
-
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
- if (exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- return -ENOMEM;
- }
- ret = copy_from_user(exec2_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec2_list);
- return -EFAULT;
- }
-
- ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec2_list,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
- }
- }
+ if (ret == 0)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
- drm_free_large(exec2_list);
return ret;
}
int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
- BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+ BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
WARN_ON(i915_verify_lists(dev));
- if (obj_priv->gtt_space != NULL) {
- if (alignment == 0)
- alignment = i915_gem_get_gtt_alignment(obj);
- if (obj_priv->gtt_offset & (alignment - 1)) {
- WARN(obj_priv->pin_count,
- "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
- obj_priv->gtt_offset, alignment);
+ if (obj->gtt_space != NULL) {
+ if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ (map_and_fenceable && !obj->map_and_fenceable)) {
+ WARN(obj->pin_count,
+ "bo is already pinned with incorrect alignment:"
+ " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " obj->map_and_fenceable=%d\n",
+ obj->gtt_offset, alignment,
+ map_and_fenceable,
+ obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
}
}
- if (obj_priv->gtt_space == NULL) {
- ret = i915_gem_object_bind_to_gtt(obj, alignment);
+ if (obj->gtt_space == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj, alignment,
+ map_and_fenceable);
if (ret)
return ret;
}
- obj_priv->pin_count++;
-
- /* If the object is not active and not pending a flush,
- * remove it from the inactive list
- */
- if (obj_priv->pin_count == 1) {
- i915_gem_info_add_pin(dev_priv, obj->size);
- if (!obj_priv->active)
- list_move_tail(&obj_priv->mm_list,
+ if (obj->pin_count++ == 0) {
+ if (!obj->active)
+ list_move_tail(&obj->mm_list,
&dev_priv->mm.pinned_list);
}
+ obj->pin_mappable |= map_and_fenceable;
WARN_ON(i915_verify_lists(dev));
return 0;
}
void
-i915_gem_object_unpin(struct drm_gem_object *obj)
+i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
WARN_ON(i915_verify_lists(dev));
- obj_priv->pin_count--;
- BUG_ON(obj_priv->pin_count < 0);
- BUG_ON(obj_priv->gtt_space == NULL);
+ BUG_ON(obj->pin_count == 0);
+ BUG_ON(obj->gtt_space == NULL);
- /* If the object is no longer pinned, and is
- * neither active nor being flushed, then stick it on
- * the inactive list
- */
- if (obj_priv->pin_count == 0) {
- if (!obj_priv->active)
- list_move_tail(&obj_priv->mm_list,
+ if (--obj->pin_count == 0) {
+ if (!obj->active)
+ list_move_tail(&obj->mm_list,
&dev_priv->mm.inactive_list);
- i915_gem_info_remove_pin(dev_priv, obj->size);
+ obj->pin_mappable = false;
}
WARN_ON(i915_verify_lists(dev));
}
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
ret = -EINVAL;
goto out;
}
- if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
+ if (obj->pin_filp != NULL && obj->pin_filp != file) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
- obj_priv->user_pin_count++;
- obj_priv->pin_filp = file_priv;
- if (obj_priv->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args->alignment);
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+ if (obj->user_pin_count == 1) {
+ ret = i915_gem_object_pin(obj, args->alignment, true);
if (ret)
goto out;
}
@@ -4295,9 +3286,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet
*/
i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = obj_priv->gtt_offset;
+ args->offset = obj->gtt_offset;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4305,38 +3296,36 @@ unlock:
int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->pin_filp != file_priv) {
+ if (obj->pin_filp != file) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
- obj_priv->user_pin_count--;
- if (obj_priv->user_pin_count == 0) {
- obj_priv->pin_filp = NULL;
+ obj->user_pin_count--;
+ if (obj->user_pin_count == 0) {
+ obj->pin_filp = NULL;
i915_gem_object_unpin(obj);
}
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4344,48 +3333,50 @@ unlock:
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_busy *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Count all active objects as busy, even if they are currently not used
* by the gpu. Users of this interface expect objects to eventually
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
- args->busy = obj_priv->active;
+ args->busy = obj->active;
if (args->busy) {
/* Unconditionally flush objects, even when the gpu still uses this
* object. Userspace calling this function indicates that it wants to
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
- if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
- i915_gem_flush_ring(dev, file_priv,
- obj_priv->ring,
- 0, obj->write_domain);
- } else if (obj_priv->ring->outstanding_lazy_request) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ i915_gem_flush_ring(dev, obj->ring,
+ 0, obj->base.write_domain);
+ } else if (obj->ring->outstanding_lazy_request ==
+ obj->last_rendering_seqno) {
+ struct drm_i915_gem_request *request;
+
/* This ring is not being cleared by active usage,
* so emit a request to do so.
*/
- u32 seqno = i915_add_request(dev,
- NULL, NULL,
- obj_priv->ring);
- if (seqno == 0)
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request)
+ ret = i915_add_request(dev,
+ NULL, request,
+ obj->ring);
+ else
ret = -ENOMEM;
}
@@ -4394,12 +3385,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* are actually unmasked, and our working set ends up being
* larger than required.
*/
- i915_gem_retire_requests_ring(dev, obj_priv->ring);
+ i915_gem_retire_requests_ring(dev, obj->ring);
- args->busy = obj_priv->active;
+ args->busy = obj->active;
}
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4417,8 +3408,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_madvise *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
switch (args->madv) {
@@ -4433,37 +3423,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->pin_count) {
+ if (obj->pin_count) {
ret = -EINVAL;
goto out;
}
- if (obj_priv->madv != __I915_MADV_PURGED)
- obj_priv->madv = args->madv;
+ if (obj->madv != __I915_MADV_PURGED)
+ obj->madv = args->madv;
/* if the object is no longer bound, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj_priv) &&
- obj_priv->gtt_space == NULL)
+ if (i915_gem_object_is_purgeable(obj) &&
+ obj->gtt_space == NULL)
i915_gem_object_truncate(obj);
- args->retained = obj_priv->madv != __I915_MADV_PURGED;
+ args->retained = obj->madv != __I915_MADV_PURGED;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
- size_t size)
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
@@ -4486,11 +3475,15 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->exec_list);
INIT_LIST_HEAD(&obj->gpu_write_list);
obj->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj->map_and_fenceable = true;
- return &obj->base;
+ return obj;
}
int i915_gem_init_object(struct drm_gem_object *obj)
@@ -4500,42 +3493,41 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-static void i915_gem_free_object_tail(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) {
- list_move(&obj_priv->mm_list,
+ list_move(&obj->mm_list,
&dev_priv->mm.deferred_free_list);
return;
}
- if (obj_priv->mmap_offset)
+ if (obj->base.map_list.map)
i915_gem_free_mmap_offset(obj);
- drm_gem_object_release(obj);
- i915_gem_info_remove_obj(dev_priv, obj->size);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev_priv, obj->base.size);
- kfree(obj_priv->page_cpu_valid);
- kfree(obj_priv->bit_17);
- kfree(obj_priv);
+ kfree(obj->page_cpu_valid);
+ kfree(obj->bit_17);
+ kfree(obj);
}
-void i915_gem_free_object(struct drm_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ struct drm_device *dev = obj->base.dev;
trace_i915_gem_object_destroy(obj);
- while (obj_priv->pin_count > 0)
+ while (obj->pin_count > 0)
i915_gem_object_unpin(obj);
- if (obj_priv->phys_obj)
+ if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
i915_gem_free_object_tail(obj);
@@ -4562,13 +3554,15 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_inactive(dev);
+ ret = i915_gem_evict_inactive(dev, false);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
}
+ i915_gem_reset_fences(dev);
+
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound mm.suspended!
@@ -4587,82 +3581,15 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-static int
-i915_gem_init_pipe_control(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- obj = i915_gem_alloc_object(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate seqno page\n");
- ret = -ENOMEM;
- goto err;
- }
- obj_priv = to_intel_bo(obj);
- obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
- ret = i915_gem_object_pin(obj, 4096);
- if (ret)
- goto err_unref;
-
- dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
- dev_priv->seqno_page = kmap(obj_priv->pages[0]);
- if (dev_priv->seqno_page == NULL)
- goto err_unpin;
-
- dev_priv->seqno_obj = obj;
- memset(dev_priv->seqno_page, 0, PAGE_SIZE);
-
- return 0;
-
-err_unpin:
- i915_gem_object_unpin(obj);
-err_unref:
- drm_gem_object_unreference(obj);
-err:
- return ret;
-}
-
-
-static void
-i915_gem_cleanup_pipe_control(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- obj = dev_priv->seqno_obj;
- obj_priv = to_intel_bo(obj);
- kunmap(obj_priv->pages[0]);
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- dev_priv->seqno_obj = NULL;
-
- dev_priv->seqno_page = NULL;
-}
-
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- if (HAS_PIPE_CONTROL(dev)) {
- ret = i915_gem_init_pipe_control(dev);
- if (ret)
- return ret;
- }
-
ret = intel_init_render_ring_buffer(dev);
if (ret)
- goto cleanup_pipe_control;
+ return ret;
if (HAS_BSD(dev)) {
ret = intel_init_bsd_ring_buffer(dev);
@@ -4681,12 +3608,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
return 0;
cleanup_bsd_ring:
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
cleanup_render_ring:
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-cleanup_pipe_control:
- if (HAS_PIPE_CONTROL(dev))
- i915_gem_cleanup_pipe_control(dev);
+ intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
return ret;
}
@@ -4694,12 +3618,10 @@ void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
- if (HAS_PIPE_CONTROL(dev))
- i915_gem_cleanup_pipe_control(dev);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->ring[i]);
}
int
@@ -4707,7 +3629,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
@@ -4727,14 +3649,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
- BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
- BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
- BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
- BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
+ BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
+ }
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4796,17 +3716,14 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
- init_ring_lists(&dev_priv->render_ring);
- init_ring_lists(&dev_priv->bsd_ring);
- init_ring_lists(&dev_priv->blt_ring);
+ INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
init_completion(&dev_priv->error_completion);
- spin_lock(&shrink_list_lock);
- list_add(&dev_priv->mm.shrink_list, &shrink_list);
- spin_unlock(&shrink_list_lock);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
@@ -4818,6 +3735,8 @@ i915_gem_load(struct drm_device *dev)
}
}
+ dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
/* Old X drivers will take 0-2 for front, back, depth buffers */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3;
@@ -4849,6 +3768,10 @@ i915_gem_load(struct drm_device *dev)
}
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
+
+ dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+ dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.inactive_shrinker);
}
/*
@@ -4918,47 +3841,47 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
}
void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj)
+ struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv;
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ char *vaddr;
int i;
- int ret;
int page_count;
- obj_priv = to_intel_bo(obj);
- if (!obj_priv->phys_obj)
+ if (!obj->phys_obj)
return;
+ vaddr = obj->phys_obj->handle->vaddr;
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret)
- goto out;
-
- page_count = obj->size / PAGE_SIZE;
-
+ page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *dst = kmap_atomic(obj_priv->pages[i]);
- char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
-
- memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst);
+ struct page *page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (!IS_ERR(page)) {
+ char *dst = kmap_atomic(page);
+ memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ drm_clflush_pages(&page, 1);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
}
- drm_clflush_pages(obj_priv->pages, page_count);
- drm_agp_chipset_flush(dev);
+ intel_gtt_chipset_flush();
- i915_gem_object_put_pages(obj);
-out:
- obj_priv->phys_obj->cur_obj = NULL;
- obj_priv->phys_obj = NULL;
+ obj->phys_obj->cur_obj = NULL;
+ obj->phys_obj = NULL;
}
int
i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
+ struct drm_i915_gem_object *obj,
int id,
int align)
{
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
int ret = 0;
int page_count;
int i;
@@ -4966,10 +3889,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
if (id > I915_MAX_PHYS_OBJECT)
return -EINVAL;
- obj_priv = to_intel_bo(obj);
-
- if (obj_priv->phys_obj) {
- if (obj_priv->phys_obj->id == id)
+ if (obj->phys_obj) {
+ if (obj->phys_obj->id == id)
return 0;
i915_gem_detach_phys_object(dev, obj);
}
@@ -4977,51 +3898,50 @@ i915_gem_attach_phys_object(struct drm_device *dev,
/* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id,
- obj->size, align);
+ obj->base.size, align);
if (ret) {
- DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
- goto out;
+ DRM_ERROR("failed to init phys object %d size: %zu\n",
+ id, obj->base.size);
+ return ret;
}
}
/* bind to the object */
- obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
- obj_priv->phys_obj->cur_obj = obj;
+ obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
+ obj->phys_obj->cur_obj = obj;
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret) {
- DRM_ERROR("failed to get page list\n");
- goto out;
- }
-
- page_count = obj->size / PAGE_SIZE;
+ page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *src = kmap_atomic(obj_priv->pages[i]);
- char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+ struct page *page;
+ char *dst, *src;
+ page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ src = kmap_atomic(page);
+ dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
- }
- i915_gem_object_put_pages(obj);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
return 0;
-out:
- return ret;
}
static int
-i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
- DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
-
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -5036,7 +3956,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
return -EFAULT;
}
- drm_agp_chipset_flush(dev);
+ intel_gtt_chipset_flush();
return 0;
}
@@ -5074,144 +3994,68 @@ i915_gpu_is_active(struct drm_device *dev)
}
static int
-i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
-{
- drm_i915_private_t *dev_priv, *next_dev;
- struct drm_i915_gem_object *obj_priv, *next_obj;
- int cnt = 0;
- int would_deadlock = 1;
+i915_gem_inactive_shrink(struct shrinker *shrinker,
+ int nr_to_scan,
+ gfp_t gfp_mask)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker,
+ struct drm_i915_private,
+ mm.inactive_shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj, *next;
+ int cnt;
+
+ if (!mutex_trylock(&dev->struct_mutex))
+ return 0;
/* "fast-path" to count number of available objects */
if (nr_to_scan == 0) {
- spin_lock(&shrink_list_lock);
- list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (mutex_trylock(&dev->struct_mutex)) {
- list_for_each_entry(obj_priv,
- &dev_priv->mm.inactive_list,
- mm_list)
- cnt++;
- mutex_unlock(&dev->struct_mutex);
- }
- }
- spin_unlock(&shrink_list_lock);
-
- return (cnt / 100) * sysctl_vfs_cache_pressure;
+ cnt = 0;
+ list_for_each_entry(obj,
+ &dev_priv->mm.inactive_list,
+ mm_list)
+ cnt++;
+ mutex_unlock(&dev->struct_mutex);
+ return cnt / 100 * sysctl_vfs_cache_pressure;
}
- spin_lock(&shrink_list_lock);
-
rescan:
/* first scan for clean buffers */
- list_for_each_entry_safe(dev_priv, next_dev,
- &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (! mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev);
- list_for_each_entry_safe(obj_priv, next_obj,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (i915_gem_object_is_purgeable(obj_priv)) {
- i915_gem_object_unbind(&obj_priv->base);
- if (--nr_to_scan <= 0)
- break;
- }
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (i915_gem_object_is_purgeable(obj)) {
+ if (i915_gem_object_unbind(obj) == 0 &&
+ --nr_to_scan == 0)
+ break;
}
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
-
- would_deadlock = 0;
-
- if (nr_to_scan <= 0)
- break;
}
/* second pass, evict/count anything still on the inactive list */
- list_for_each_entry_safe(dev_priv, next_dev,
- &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (! mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
-
- list_for_each_entry_safe(obj_priv, next_obj,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (nr_to_scan > 0) {
- i915_gem_object_unbind(&obj_priv->base);
- nr_to_scan--;
- } else
- cnt++;
- }
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
-
- would_deadlock = 0;
+ cnt = 0;
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (nr_to_scan &&
+ i915_gem_object_unbind(obj) == 0)
+ nr_to_scan--;
+ else
+ cnt++;
}
- if (nr_to_scan) {
- int active = 0;
-
+ if (nr_to_scan && i915_gpu_is_active(dev)) {
/*
* We are desperate for pages, so as a last resort, wait
* for the GPU to finish and discard whatever we can.
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (!mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
-
- if (i915_gpu_is_active(dev)) {
- i915_gpu_idle(dev);
- active++;
- }
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
- }
-
- if (active)
+ if (i915_gpu_idle(dev) == 0)
goto rescan;
}
-
- spin_unlock(&shrink_list_lock);
-
- if (would_deadlock)
- return -1;
- else if (cnt > 0)
- return (cnt / 100) * sysctl_vfs_cache_pressure;
- else
- return 0;
-}
-
-static struct shrinker shrinker = {
- .shrink = i915_gem_shrink,
- .seeks = DEFAULT_SEEKS,
-};
-
-__init void
-i915_gem_shrinker_init(void)
-{
- register_shrinker(&shrinker);
-}
-
-__exit void
-i915_gem_shrinker_exit(void)
-{
- unregister_shrinker(&shrinker);
+ mutex_unlock(&dev->struct_mutex);
+ return cnt / 100 * sysctl_vfs_cache_pressure;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 48644b840a8..29d014c48ca 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
}
void
-i915_gem_dump_object(struct drm_gem_object *obj, int len,
+i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page;
- DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+ DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
- i915_gem_dump_page(obj_priv->pages[page],
+ i915_gem_dump_page(obj->pages[page],
chunk, chunk + chunk_len,
- obj_priv->gtt_offset +
+ obj->gtt_offset +
page * PAGE_SIZE,
mark);
}
@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
#if WATCH_COHERENCY
void
-i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
- __func__, obj, obj_priv->gtt_offset, handle,
+ __func__, obj, obj->gtt_offset, handle,
obj->size / 1024);
- gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
- obj->size);
+ gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
- backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
+ backing_map = kmap_atomic(obj->pages[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
- (int)(obj_priv->gtt_offset +
+ (int)(obj->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d8ae7d1d0cc..78b8cf90c92 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,28 +32,36 @@
#include "i915_drm.h"
static bool
-mark_free(struct drm_i915_gem_object *obj_priv,
- struct list_head *unwind)
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
- list_add(&obj_priv->evict_list, unwind);
- drm_gem_object_reference(&obj_priv->base);
- return drm_mm_scan_add_block(obj_priv->gtt_space);
+ list_add(&obj->exec_list, unwind);
+ drm_gem_object_reference(&obj->base);
+ return drm_mm_scan_add_block(obj->gtt_space);
}
int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret = 0;
i915_gem_retire_requests(dev);
/* Re-check for free space after retiring requests */
- if (drm_mm_search_free(&dev_priv->mm.gtt_space,
- min_size, alignment, 0))
- return 0;
+ if (mappable) {
+ if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0))
+ return 0;
+ } else {
+ if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0))
+ return 0;
+ }
/*
* The goal is to evict objects and amalgamate space in LRU order.
@@ -79,45 +87,50 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
*/
INIT_LIST_HEAD(&unwind_list);
- drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+ if (mappable)
+ drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
+ alignment, 0,
+ dev_priv->mm.gtt_mappable_end);
+ else
+ drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
- if (mark_free(obj_priv, &unwind_list))
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
- if (obj_priv->base.write_domain || obj_priv->pin_count)
+ if (obj->base.write_domain || obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
- if (obj_priv->pin_count)
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
+ if (obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- if (! obj_priv->base.write_domain || obj_priv->pin_count)
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (! obj->base.write_domain || obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Nothing found, clean up and bail out! */
- list_for_each_entry(obj_priv, &unwind_list, evict_list) {
- ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+ list_for_each_entry(obj, &unwind_list, exec_list) {
+ ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret);
- drm_gem_object_unreference(&obj_priv->base);
+ drm_gem_object_unreference(&obj->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -131,33 +144,33 @@ found:
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
- obj_priv = list_first_entry(&unwind_list,
- struct drm_i915_gem_object,
- evict_list);
- if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
- list_move(&obj_priv->evict_list, &eviction_list);
+ obj = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ list_move(&obj->exec_list, &eviction_list);
continue;
}
- list_del(&obj_priv->evict_list);
- drm_gem_object_unreference(&obj_priv->base);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
- obj_priv = list_first_entry(&eviction_list,
- struct drm_i915_gem_object,
- evict_list);
+ obj = list_first_entry(&eviction_list,
+ struct drm_i915_gem_object,
+ exec_list);
if (ret == 0)
- ret = i915_gem_object_unbind(&obj_priv->base);
- list_del(&obj_priv->evict_list);
- drm_gem_object_unreference(&obj_priv->base);
+ ret = i915_gem_object_unbind(obj);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
return ret;
}
int
-i915_gem_evict_everything(struct drm_device *dev)
+i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
@@ -176,36 +189,22 @@ i915_gem_evict_everything(struct drm_device *dev)
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- ret = i915_gem_evict_inactive(dev);
- if (ret)
- return ret;
-
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- BUG_ON(!lists_empty);
-
- return 0;
+ return i915_gem_evict_inactive(dev, purgeable_only);
}
/** Unbinds all inactive objects. */
int
-i915_gem_evict_inactive(struct drm_device *dev)
+i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
-
- while (!list_empty(&dev_priv->mm.inactive_list)) {
- struct drm_gem_object *obj;
- int ret;
-
- obj = &list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- mm_list)->base;
-
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object: %d\n", ret);
- return ret;
+ struct drm_i915_gem_object *obj, *next;
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list, mm_list) {
+ if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
+ int ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 00000000000..61129e6759e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1343 @@
+/*
+ * Copyright © 2008,2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+struct change_domains {
+ uint32_t invalidate_domains;
+ uint32_t flush_domains;
+ uint32_t flush_rings;
+};
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Mapped to GTT
+ * 4. Read by GPU
+ * 5. Unmapped from GTT
+ * 6. Freed
+ *
+ * Let's take these a step at a time
+ *
+ * 1. Allocated
+ * Pages allocated from the kernel may still have
+ * cache contents, so we set them to (CPU, CPU) always.
+ * 2. Written by CPU (using pwrite)
+ * The pwrite function calls set_domain (CPU, CPU) and
+ * this function does nothing (as nothing changes)
+ * 3. Mapped by GTT
+ * This function asserts that the object is not
+ * currently in any GPU-based read or write domains
+ * 4. Read by GPU
+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ * As write_domain is zero, this function adds in the
+ * current read domains (CPU+COMMAND, 0).
+ * flush_domains is set to CPU.
+ * invalidate_domains is set to COMMAND
+ * clflush is run to get data out of the CPU caches
+ * then i915_dev_set_domain calls i915_gem_flush to
+ * emit an MI_FLUSH and drm_agp_chipset_flush
+ * 5. Unmapped from GTT
+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
+ * flush_domains and invalidate_domains end up both zero
+ * so no flushing/invalidating happens
+ * 6. Freed
+ * yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ * 1. Allocated
+ * 2. Mapped to GTT
+ * 3. Read/written by GPU
+ * 4. set_domain to (CPU,CPU)
+ * 5. Read/written by CPU
+ * 6. Read/written by GPU
+ *
+ * 1. Allocated
+ * Same as last example, (CPU, CPU)
+ * 2. Mapped to GTT
+ * Nothing changes (assertions find that it is not in the GPU)
+ * 3. Read/written by GPU
+ * execbuffer calls set_domain (RENDER, RENDER)
+ * flush_domains gets CPU
+ * invalidate_domains gets GPU
+ * clflush (obj)
+ * MI_FLUSH and drm_agp_chipset_flush
+ * 4. set_domain (CPU, CPU)
+ * flush_domains gets GPU
+ * invalidate_domains gets CPU
+ * wait_rendering (obj) to make sure all drawing is complete.
+ * This will include an MI_FLUSH to get the data from GPU
+ * to memory
+ * clflush (obj) to invalidate the CPU cache
+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ * 5. Read/written by CPU
+ * cache lines are loaded and dirtied
+ * 6. Read written by GPU
+ * Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Read by GPU
+ * 4. Updated (written) by CPU again
+ * 5. Read by GPU
+ *
+ * 1. Allocated
+ * (CPU, CPU)
+ * 2. Written by CPU
+ * (CPU, CPU)
+ * 3. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ * 4. Updated (written) by CPU again
+ * (CPU, CPU)
+ * flush_domains = 0 (no previous write domain)
+ * invalidate_domains = 0 (no new read domains)
+ * 5. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ */
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ struct change_domains *cd)
+{
+ uint32_t invalidate_domains = 0, flush_domains = 0;
+
+ /*
+ * If the object isn't moving to a new write domain,
+ * let the object stay in multiple read domains
+ */
+ if (obj->base.pending_write_domain == 0)
+ obj->base.pending_read_domains |= obj->base.read_domains;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->base.write_domain &&
+ (((obj->base.write_domain != obj->base.pending_read_domains ||
+ obj->ring != ring)) ||
+ (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
+ flush_domains |= obj->base.write_domain;
+ invalidate_domains |=
+ obj->base.pending_read_domains & ~obj->base.write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+
+ /* blow away mappings if mapped through GTT */
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
+ i915_gem_release_mmap(obj);
+
+ /* The actual obj->write_domain will be updated with
+ * pending_write_domain after we emit the accumulated flush for all
+ * of our domain changes in execbuffers (which clears objects'
+ * write_domains). So if we have a current write domain that we
+ * aren't changing, set pending_write_domain to that.
+ */
+ if (flush_domains == 0 && obj->base.pending_write_domain == 0)
+ obj->base.pending_write_domain = obj->base.write_domain;
+
+ cd->invalidate_domains |= invalidate_domains;
+ cd->flush_domains |= flush_domains;
+ if (flush_domains & I915_GEM_GPU_DOMAINS)
+ cd->flush_rings |= obj->ring->id;
+ if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+ cd->flush_rings |= ring->id;
+}
+
+struct eb_objects {
+ int and;
+ struct hlist_head buckets[0];
+};
+
+static struct eb_objects *
+eb_create(int size)
+{
+ struct eb_objects *eb;
+ int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ while (count > size)
+ count >>= 1;
+ eb = kzalloc(count*sizeof(struct hlist_head) +
+ sizeof(struct eb_objects),
+ GFP_KERNEL);
+ if (eb == NULL)
+ return eb;
+
+ eb->and = count - 1;
+ return eb;
+}
+
+static void
+eb_reset(struct eb_objects *eb)
+{
+ memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+}
+
+static void
+eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
+{
+ hlist_add_head(&obj->exec_node,
+ &eb->buckets[obj->exec_handle & eb->and]);
+}
+
+static struct drm_i915_gem_object *
+eb_get_object(struct eb_objects *eb, unsigned long handle)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct drm_i915_gem_object *obj;
+
+ head = &eb->buckets[handle & eb->and];
+ hlist_for_each(node, head) {
+ obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
+ if (obj->exec_handle == handle)
+ return obj;
+ }
+
+ return NULL;
+}
+
+static void
+eb_destroy(struct eb_objects *eb)
+{
+ kfree(eb);
+}
+
+static int
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_gem_object *target_obj;
+ uint32_t target_offset;
+ int ret = -EINVAL;
+
+ /* we've already hold a reference to all valid objects */
+ target_obj = &eb_get_object(eb, reloc->target_handle)->base;
+ if (unlikely(target_obj == NULL))
+ return -ENOENT;
+
+ target_offset = to_intel_bo(target_obj)->gtt_offset;
+
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (unlikely(target_offset == 0)) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc->target_handle);
+ return ret;
+ }
+
+ /* Validate that the target is in a valid r/w GPU domain */
+ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return ret;
+ }
+ if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return ret;
+ }
+ if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain)) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
+ target_obj->pending_write_domain);
+ return ret;
+ }
+
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_offset == reloc->presumed_offset)
+ return 0;
+
+ /* Check that the relocation address is valid... */
+ if (unlikely(reloc->offset > obj->base.size - 4)) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ (int) obj->base.size);
+ return ret;
+ }
+ if (unlikely(reloc->offset & 3)) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ return ret;
+ }
+
+ /* and points to somewhere within the target object. */
+ if (unlikely(reloc->delta >= target_obj->size)) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta,
+ (int) target_obj->size);
+ return ret;
+ }
+
+ reloc->delta += target_offset;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+ char *vaddr;
+
+ vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ kunmap_atomic(vaddr);
+ } else {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret)
+ return ret;
+
+ /* Map the page containing the relocation we're going to perform. */
+ reloc->offset += obj->gtt_offset;
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc->offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + (reloc->offset & ~PAGE_MASK));
+ iowrite32(reloc->delta, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
+ }
+
+ /* and update the user's relocation entry */
+ reloc->presumed_offset = target_offset;
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *entry)
+{
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ int i, ret;
+
+ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry reloc;
+
+ if (__copy_from_user_inatomic(&reloc,
+ user_relocs+i,
+ sizeof(reloc)))
+ return -EFAULT;
+
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc);
+ if (ret)
+ return ret;
+
+ if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+ &reloc.presumed_offset,
+ sizeof(reloc.presumed_offset)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ int i, ret;
+
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+ struct eb_objects *eb,
+ struct list_head *objects,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ list_for_each_entry(obj, objects, exec_list) {
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct list_head *objects,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_exec_object2 *entry;
+ int ret, retry;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+
+ /* Attempt to pin all of the buffers into the GTT.
+ * This is done in 3 phases:
+ *
+ * 1a. Unbind all objects that do not match the GTT constraints for
+ * the execbuffer (fenceable, mappable, alignment etc).
+ * 1b. Increment pin count for already bound objects.
+ * 2. Bind new objects.
+ * 3. Decrement pin count.
+ *
+ * This avoid unnecessary unbinding of later objects in order to makr
+ * room for the earlier objects *unless* we need to defragment.
+ */
+ retry = 0;
+ do {
+ ret = 0;
+
+ /* Unbind any ill-fitting objects or pin. */
+ entry = exec;
+ list_for_each_entry(obj, objects, exec_list) {
+ bool need_fence, need_mappable;
+
+ if (!obj->gtt_space) {
+ entry++;
+ continue;
+ }
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+ (need_mappable && !obj->map_and_fenceable))
+ ret = i915_gem_object_unbind(obj);
+ else
+ ret = i915_gem_object_pin(obj,
+ entry->alignment,
+ need_mappable);
+ if (ret)
+ goto err;
+
+ entry++;
+ }
+
+ /* Bind fresh objects */
+ entry = exec;
+ list_for_each_entry(obj, objects, exec_list) {
+ bool need_fence;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+
+ if (!obj->gtt_space) {
+ bool need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ ret = i915_gem_object_pin(obj,
+ entry->alignment,
+ need_mappable);
+ if (ret)
+ break;
+ }
+
+ if (has_fenced_gpu_access) {
+ if (need_fence) {
+ ret = i915_gem_object_get_fence(obj, ring, 1);
+ if (ret)
+ break;
+ } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode == I915_TILING_NONE) {
+ /* XXX pipelined! */
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ break;
+ }
+ obj->pending_fenced_gpu_access = need_fence;
+ }
+
+ entry->offset = obj->gtt_offset;
+ entry++;
+ }
+
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list) {
+ if (obj->gtt_space)
+ i915_gem_object_unpin(obj);
+ }
+
+ if (ret != -ENOSPC || retry > 1)
+ return ret;
+
+ /* First attempt, just clear anything that is purgeable.
+ * Second attempt, clear the entire GTT.
+ */
+ ret = i915_gem_evict_everything(ring->dev, retry == 0);
+ if (ret)
+ return ret;
+
+ retry++;
+ } while (1);
+
+err:
+ obj = list_entry(obj->exec_list.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ while (objects != &obj->exec_list) {
+ if (obj->gtt_space)
+ i915_gem_object_unpin(obj);
+
+ obj = list_entry(obj->exec_list.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ }
+
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring,
+ struct list_head *objects,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ int count)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ struct drm_i915_gem_object *obj;
+ int i, total, ret;
+
+ /* We may process another execbuffer during the unlock... */
+ while (list_empty(objects)) {
+ obj = list_first_entry(objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += exec[i].relocation_count;
+
+ reloc = drm_malloc_ab(total, sizeof(*reloc));
+ if (reloc == NULL) {
+ mutex_lock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+
+ if (copy_from_user(reloc+total, user_relocs,
+ exec[i].relocation_count * sizeof(*reloc))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ total += exec[i].relocation_count;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ /* reacquire the objects */
+ INIT_LIST_HEAD(objects);
+ eb_reset(eb);
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (obj == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, objects);
+ obj->exec_handle = exec[i].handle;
+ eb_add_object(eb, obj);
+ }
+
+ ret = i915_gem_execbuffer_reserve(ring, file, objects, exec);
+ if (ret)
+ goto err;
+
+ total = 0;
+ list_for_each_entry(obj, objects, exec_list) {
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
+ exec,
+ reloc + total);
+ if (ret)
+ goto err;
+
+ total += exec->relocation_count;
+ exec++;
+ }
+
+ /* Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ drm_free_large(reloc);
+ return ret;
+}
+
+static void
+i915_gem_execbuffer_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains,
+ uint32_t flush_rings)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ intel_gtt_chipset_flush();
+
+ if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (flush_rings & (1 << i))
+ i915_gem_flush_ring(dev, &dev_priv->ring[i],
+ invalidate_domains,
+ flush_domains);
+ }
+}
+
+static int
+i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to)
+{
+ struct intel_ring_buffer *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
+
+ if (from == NULL || to == from)
+ return 0;
+
+ if (INTEL_INFO(obj->base.dev)->gen < 6)
+ return i915_gem_object_wait_rendering(obj, true);
+
+ idx = intel_ring_sync_index(from, to);
+
+ seqno = obj->last_rendering_seqno;
+ if (seqno <= from->sync_seqno[idx])
+ return 0;
+
+ if (seqno == from->outstanding_lazy_request) {
+ struct drm_i915_gem_request *request;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
+ ret = i915_add_request(obj->base.dev, NULL, request, from);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ seqno = request->seqno;
+ }
+
+ from->sync_seqno[idx] = seqno;
+ return intel_ring_sync(to, from, seqno - 1);
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ struct change_domains cd;
+ int ret;
+
+ cd.invalidate_domains = 0;
+ cd.flush_domains = 0;
+ cd.flush_rings = 0;
+ list_for_each_entry(obj, objects, exec_list)
+ i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
+
+ if (cd.invalidate_domains | cd.flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ cd.invalidate_domains,
+ cd.flush_domains);
+#endif
+ i915_gem_execbuffer_flush(ring->dev,
+ cd.invalidate_domains,
+ cd.flush_domains,
+ cd.flush_rings);
+ }
+
+ list_for_each_entry(obj, objects, exec_list) {
+ ret = i915_gem_execbuffer_sync_rings(obj, ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+{
+ return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
+}
+
+static int
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ int length; /* limited by fault_in_pages_readable() */
+
+ /* First check for malicious input causing overflow */
+ if (exec[i].relocation_count >
+ INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+ return -EINVAL;
+
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
+ if (!access_ok(VERIFY_READ, ptr, length))
+ return -EFAULT;
+
+ /* we may also need to update the presumed offsets */
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
+
+ if (fault_in_pages_readable(ptr, length))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ int flips;
+
+ /* Check for any pending flips. As we only maintain a flip queue depth
+ * of 1, we can simply insert a WAIT for the next display flip prior
+ * to executing the batch and avoid stalling the CPU.
+ */
+ flips = 0;
+ list_for_each_entry(obj, objects, exec_list) {
+ if (obj->base.write_domain)
+ flips |= atomic_read(&obj->pending_flip);
+ }
+ if (flips) {
+ int plane, flip_mask, ret;
+
+ for (plane = 0; flips >> plane; plane++) {
+ if (((flips >> plane) & 1) == 0)
+ continue;
+
+ if (plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
+ }
+
+ return 0;
+}
+
+static void
+i915_gem_execbuffer_move_to_active(struct list_head *objects,
+ struct intel_ring_buffer *ring,
+ u32 seqno)
+{
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, objects, exec_list) {
+ obj->base.read_domains = obj->base.pending_read_domains;
+ obj->base.write_domain = obj->base.pending_write_domain;
+ obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+
+ i915_gem_object_move_to_active(obj, ring, seqno);
+ if (obj->base.write_domain) {
+ obj->dirty = 1;
+ obj->pending_gpu_write = true;
+ list_move_tail(&obj->gpu_write_list,
+ &ring->gpu_write_list);
+ intel_mark_busy(ring->dev, obj);
+ }
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->base.read_domains,
+ obj->base.write_domain);
+ }
+}
+
+static void
+i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_request *request;
+ u32 flush_domains;
+
+ /*
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires.
+ *
+ * The sampler always gets flushed on i965 (sigh).
+ */
+ flush_domains = 0;
+ if (INTEL_INFO(dev)->gen >= 4)
+ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
+
+ ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
+
+ /* Add a breadcrumb for the completion of the batch buffer */
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL || i915_add_request(dev, file, request, ring)) {
+ i915_gem_next_request_seqno(dev, ring);
+ kfree(request);
+ }
+}
+
+static int
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct list_head objects;
+ struct eb_objects *eb;
+ struct drm_i915_gem_object *batch_obj;
+ struct drm_clip_rect *cliprects = NULL;
+ struct intel_ring_buffer *ring;
+ u32 exec_start, exec_len;
+ u32 seqno;
+ int ret, mode, i;
+
+ if (!i915_gem_check_execbuffer(args)) {
+ DRM_ERROR("execbuf with invalid offset/length\n");
+ return -EINVAL;
+ }
+
+ ret = validate_exec_list(exec, args->buffer_count);
+ if (ret)
+ return ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+ switch (args->flags & I915_EXEC_RING_MASK) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ ring = &dev_priv->ring[RCS];
+ break;
+ case I915_EXEC_BSD:
+ if (!HAS_BSD(dev)) {
+ DRM_ERROR("execbuf with invalid ring (BSD)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->ring[VCS];
+ break;
+ case I915_EXEC_BLT:
+ if (!HAS_BLT(dev)) {
+ DRM_ERROR("execbuf with invalid ring (BLT)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->ring[BCS];
+ break;
+ default:
+ DRM_ERROR("execbuf with unknown ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
+
+ mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ switch (mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (ring == &dev_priv->ring[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4)
+ return -EINVAL;
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ mode == I915_EXEC_CONSTANTS_REL_SURFACE)
+ return -EINVAL;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring,
+ I915_EXEC_CONSTANTS_MASK << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
+ }
+ break;
+ default:
+ DRM_ERROR("execbuf with unknown constants: %d\n", mode);
+ return -EINVAL;
+ }
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects != 0) {
+ if (ring != &dev_priv->ring[RCS]) {
+ DRM_ERROR("clip rectangles are only valid with the render ring\n");
+ return -EINVAL;
+ }
+
+ cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+ GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ if (copy_from_user(cliprects,
+ (struct drm_clip_rect __user *)(uintptr_t)
+ args->cliprects_ptr,
+ sizeof(*cliprects)*args->num_cliprects)) {
+ ret = -EFAULT;
+ goto pre_mutex_err;
+ }
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto pre_mutex_err;
+
+ if (dev_priv->mm.suspended) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -EBUSY;
+ goto pre_mutex_err;
+ }
+
+ eb = eb_create(args->buffer_count);
+ if (eb == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ /* Look up object handles */
+ INIT_LIST_HEAD(&objects);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (obj == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ /* prevent error path from reading uninitialized data */
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (!list_empty(&obj->exec_list)) {
+ DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
+ obj, exec[i].handle, i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, &objects);
+ obj->exec_handle = exec[i].handle;
+ eb_add_object(eb, obj);
+ }
+
+ /* Move the objects en-masse into the GTT, evicting if necessary. */
+ ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec);
+ if (ret)
+ goto err;
+
+ /* The objects are in their final locations, apply the relocations. */
+ ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec);
+ if (ret) {
+ if (ret == -EFAULT) {
+ ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
+ &objects, eb,
+ exec,
+ args->buffer_count);
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ }
+ if (ret)
+ goto err;
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ batch_obj = list_entry(objects.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (batch_obj->base.pending_write_domain) {
+ DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
+ if (ret)
+ goto err;
+
+ seqno = i915_gem_next_request_seqno(dev, ring);
+ for (i = 0; i < I915_NUM_RINGS-1; i++) {
+ if (seqno < ring->sync_seqno[i]) {
+ /* The GPU can not handle its semaphore value wrapping,
+ * so every billion or so execbuffers, we need to stall
+ * the GPU in order to reset the counters.
+ */
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ goto err;
+
+ BUG_ON(ring->sync_seqno[i]);
+ }
+ }
+
+ exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+ exec_len = args->batch_len;
+ if (cliprects) {
+ for (i = 0; i < args->num_cliprects; i++) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ args->DR1, args->DR4);
+ if (ret)
+ goto err;
+
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len);
+ if (ret)
+ goto err;
+ }
+ } else {
+ ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+ if (ret)
+ goto err;
+ }
+
+ i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+ i915_gem_execbuffer_retire_commands(dev, file, ring);
+
+err:
+ eb_destroy(eb);
+ while (!list_empty(&objects)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+ kfree(cliprects);
+ return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (INTEL_INFO(dev)->gen < 4)
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = I915_EXEC_RENDER;
+
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++)
+ exec_list[i].offset = exec2_list[i].offset;
+ /* ... and back out to userspace */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec2_list);
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
new file mode 100644
index 00000000000..86673e77d7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright © 2010 Daniel Vetter
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ i915_gem_clflush_object(obj);
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ BUG_ON(!obj->sg_list);
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start
+ >> PAGE_SHIFT,
+ obj->agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start
+ >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ obj->agp_type);
+ }
+
+ intel_gtt_chipset_flush();
+}
+
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ ret = intel_gtt_map_memory(obj->pages,
+ obj->base.size >> PAGE_SHIFT,
+ &obj->sg_list,
+ &obj->num_sg);
+ if (ret != 0)
+ return ret;
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ obj->agp_type);
+
+ return 0;
+}
+
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
+ obj->sg_list = NULL;
+ obj->num_sg = 0;
+ }
+
+ intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index af352de70be..22a32b9932c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -181,7 +181,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* Check pitch constriants for all chips & tiling formats */
-bool
+static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
@@ -232,32 +232,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return true;
}
-bool
-i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
+/* Is the current GTT allocation valid for the change in tiling? */
+static bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (obj_priv->gtt_space == NULL)
- return true;
+ u32 size;
if (tiling_mode == I915_TILING_NONE)
return true;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
- if (obj_priv->gtt_offset & (obj->size - 1))
- return false;
-
- if (IS_GEN3(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+ if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (obj->gtt_offset & ~I915_FENCE_START_MASK)
return false;
} else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+ if (obj->gtt_offset & ~I830_FENCE_START_MASK)
return false;
}
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ if (INTEL_INFO(obj->base.dev)->gen == 3)
+ size = 1024*1024;
+ else
+ size = 512*1024;
+
+ while (size < obj->base.size)
+ size <<= 1;
+
+ if (obj->gtt_space->size != size)
+ return false;
+
+ if (obj->gtt_offset & (size - 1))
+ return false;
+
return true;
}
@@ -267,30 +279,29 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_gem_check_is_wedged(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
return -ENOENT;
- obj_priv = to_intel_bo(obj);
- if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
- drm_gem_object_unreference_unlocked(obj);
+ if (!i915_tiling_ok(dev,
+ args->stride, obj->base.size, args->tiling_mode)) {
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL;
}
- if (obj_priv->pin_count) {
- drm_gem_object_unreference_unlocked(obj);
+ if (obj->pin_count) {
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
@@ -324,34 +335,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- if (args->tiling_mode != obj_priv->tiling_mode ||
- args->stride != obj_priv->stride) {
+ if (args->tiling_mode != obj->tiling_mode ||
+ args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is cleared.
*/
- if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
- ret = i915_gem_object_unbind(obj);
- else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- ret = i915_gem_object_put_fence_reg(obj, true);
- else
- i915_gem_release_mmap(obj);
+ i915_gem_release_mmap(obj);
- if (ret != 0) {
- args->tiling_mode = obj_priv->tiling_mode;
- args->stride = obj_priv->stride;
- goto err;
- }
+ obj->map_and_fenceable =
+ obj->gtt_space == NULL ||
+ (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
+ i915_gem_object_fence_ok(obj, args->tiling_mode));
- obj_priv->tiling_mode = args->tiling_mode;
- obj_priv->stride = args->stride;
+ obj->tiling_changed = true;
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
}
-err:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
/**
@@ -359,22 +364,20 @@ err:
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
return -ENOENT;
- obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
- args->tiling_mode = obj_priv->tiling_mode;
- switch (obj_priv->tiling_mode) {
+ args->tiling_mode = obj->tiling_mode;
+ switch (obj->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
@@ -394,7 +397,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -424,46 +427,44 @@ i915_gem_swizzle_page(struct page *page)
}
void
-i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size >> PAGE_SHIFT;
+ int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
- if (obj_priv->bit_17 == NULL)
+ if (obj->bit_17 == NULL)
return;
for (i = 0; i < page_count; i++) {
- char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+ char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj_priv->bit_17) != 0)) {
- i915_gem_swizzle_page(obj_priv->pages[i]);
- set_page_dirty(obj_priv->pages[i]);
+ (test_bit(i, obj->bit_17) != 0)) {
+ i915_gem_swizzle_page(obj->pages[i]);
+ set_page_dirty(obj->pages[i]);
}
}
}
void
-i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size >> PAGE_SHIFT;
+ int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
- if (obj_priv->bit_17 == NULL) {
- obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL);
- if (obj_priv->bit_17 == NULL) {
+ if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
@@ -471,9 +472,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
}
for (i = 0; i < page_count; i++) {
- if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
- __set_bit(i, obj_priv->bit_17);
+ if (page_to_phys(obj->pages[i]) & (1 << 17))
+ __set_bit(i, obj->bit_17);
else
- __clear_bit(i, obj_priv->bit_17);
+ __clear_bit(i, obj->bit_17);
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 729fd0c91d7..0dadc025b77 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -67,20 +67,20 @@
void
ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
- dev_priv->gt_irq_mask_reg &= ~mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
+ if ((dev_priv->gt_irq_mask & mask) != 0) {
+ dev_priv->gt_irq_mask &= ~mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
}
void
ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
- dev_priv->gt_irq_mask_reg |= mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
+ if ((dev_priv->gt_irq_mask & mask) != mask) {
+ dev_priv->gt_irq_mask |= mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
}
@@ -88,40 +88,40 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
+ if ((dev_priv->irq_mask & mask) != 0) {
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
}
}
static inline void
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
+ if ((dev_priv->irq_mask & mask) != mask) {
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
}
}
void
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
+ if ((dev_priv->irq_mask & mask) != 0) {
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
}
}
void
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
+ if ((dev_priv->irq_mask & mask) != mask) {
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
}
}
@@ -144,7 +144,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
- (void) I915_READ(reg);
+ POSTING_READ(reg);
}
}
@@ -156,16 +156,19 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
dev_priv->pipestat[pipe] &= ~mask;
I915_WRITE(reg, dev_priv->pipestat[pipe]);
- (void) I915_READ(reg);
+ POSTING_READ(reg);
}
}
/**
* intel_enable_asle - enable ASLE interrupt for OpRegion
*/
-void intel_enable_asle (struct drm_device *dev)
+void intel_enable_asle(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
@@ -176,6 +179,8 @@ void intel_enable_asle (struct drm_device *dev)
i915_enable_pipestat(dev_priv, 0,
PIPE_LEGACY_BLC_EVENT_ENABLE);
}
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
/**
@@ -243,6 +248,92 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
return I915_READ(reg);
}
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ int *vpos, int *hpos)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 vbl = 0, position = 0;
+ int vbl_start, vbl_end, htotal, vtotal;
+ bool in_vbl = true;
+ int ret = 0;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
+ "pipe %d\n", pipe);
+ return 0;
+ }
+
+ /* Get vtotal. */
+ vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* No obvious pixelcount register. Only query vertical
+ * scanout position from Display scan line register.
+ */
+ position = I915_READ(PIPEDSL(pipe));
+
+ /* Decode into vertical scanout position. Don't have
+ * horizontal scanout position.
+ */
+ *vpos = position & 0x1fff;
+ *hpos = 0;
+ } else {
+ /* Have access to pixelcount since start of frame.
+ * We can split this into vertical and horizontal
+ * scanout position.
+ */
+ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+ htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
+
+ /* Query vblank area. */
+ vbl = I915_READ(VBLANK(pipe));
+
+ /* Test position against vblank region. */
+ vbl_start = vbl & 0x1fff;
+ vbl_end = (vbl >> 16) & 0x1fff;
+
+ if ((*vpos < vbl_start) || (*vpos > vbl_end))
+ in_vbl = false;
+
+ /* Inside "upper part" of vblank area? Apply corrective offset: */
+ if (in_vbl && (*vpos >= vbl_start))
+ *vpos = *vpos - vtotal;
+
+ /* Readouts valid? */
+ if (vbl > 0)
+ ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_INVBL;
+
+ return ret;
+}
+
+int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct drm_crtc *drmcrtc;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Get drm_crtc to timestamp: */
+ drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+ vblank_time, flags, drmcrtc);
+}
+
/*
* Handle hotplug events outside the interrupt handler proper.
*/
@@ -297,8 +388,8 @@ static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 seqno = ring->get_seqno(dev, ring);
- ring->irq_gem_seqno = seqno;
+ u32 seqno = ring->get_seqno(ring);
+ ring->irq_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
wake_up_all(&ring->irq_queue);
dev_priv->hangcheck_count = 0;
@@ -306,11 +397,49 @@ static void notify_ring(struct drm_device *dev,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
+static void gen6_pm_irq_handler(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u8 new_delay = dev_priv->cur_delay;
+ u32 pm_iir;
+
+ pm_iir = I915_READ(GEN6_PMIIR);
+ if (!pm_iir)
+ return;
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (dev_priv->cur_delay != dev_priv->max_delay)
+ new_delay = dev_priv->cur_delay + 1;
+ if (new_delay > dev_priv->max_delay)
+ new_delay = dev_priv->max_delay;
+ } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
+ if (dev_priv->cur_delay != dev_priv->min_delay)
+ new_delay = dev_priv->cur_delay - 1;
+ if (new_delay < dev_priv->min_delay) {
+ new_delay = dev_priv->min_delay;
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
+ ((new_delay << 16) & 0x3f0000));
+ } else {
+ /* Make sure we continue to get down interrupts
+ * until we hit the minimum frequency */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
+ }
+
+ }
+
+ gen6_set_rps(dev, new_delay);
+ dev_priv->cur_delay = new_delay;
+
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+}
+
static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir;
+ u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
struct drm_i915_master_private *master_priv;
u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
@@ -321,13 +450,15 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- (void)I915_READ(DEIER);
+ POSTING_READ(DEIER);
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
+ (!IS_GEN6(dev) || pm_iir == 0))
goto done;
if (HAS_PCH_CPT(dev))
@@ -344,12 +475,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
READ_BREADCRUMB(dev_priv);
}
- if (gt_iir & GT_PIPE_NOTIFY)
- notify_ring(dev, &dev_priv->render_ring);
+ if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+ notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & bsd_usr_interrupt)
- notify_ring(dev, &dev_priv->bsd_ring);
- if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->blt_ring);
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
@@ -379,6 +510,9 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
i915_handle_rps_change(dev);
}
+ if (IS_GEN6(dev))
+ gen6_pm_irq_handler(dev);
+
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
@@ -386,7 +520,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
done:
I915_WRITE(DEIER, de_ier);
- (void)I915_READ(DEIER);
+ POSTING_READ(DEIER);
return ret;
}
@@ -423,28 +557,23 @@ static void i915_error_work_func(struct work_struct *work)
#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev,
- struct drm_gem_object *src)
+ struct drm_i915_gem_object *src)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_object *dst;
- struct drm_i915_gem_object *src_priv;
int page, page_count;
u32 reloc_offset;
- if (src == NULL)
- return NULL;
-
- src_priv = to_intel_bo(src);
- if (src_priv->pages == NULL)
+ if (src == NULL || src->pages == NULL)
return NULL;
- page_count = src->size / PAGE_SIZE;
+ page_count = src->base.size / PAGE_SIZE;
dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
- reloc_offset = src_priv->gtt_offset;
+ reloc_offset = src->gtt_offset;
for (page = 0; page < page_count; page++) {
unsigned long flags;
void __iomem *s;
@@ -466,7 +595,7 @@ i915_error_object_create(struct drm_device *dev,
reloc_offset += PAGE_SIZE;
}
dst->page_count = page_count;
- dst->gtt_offset = src_priv->gtt_offset;
+ dst->gtt_offset = src->gtt_offset;
return dst;
@@ -520,36 +649,96 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
}
static u32
-i915_ringbuffer_last_batch(struct drm_device *dev)
+i915_ringbuffer_last_batch(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 head, bbaddr;
- u32 *ring;
+ u32 *val;
/* Locate the current position in the ringbuffer and walk back
* to find the most recently dispatched batch buffer.
*/
- bbaddr = 0;
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
- while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
- bbaddr = i915_get_bbaddr(dev, ring);
+ val = (u32 *)(ring->virtual_start + head);
+ while (--val >= (u32 *)ring->virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, val);
if (bbaddr)
- break;
+ return bbaddr;
}
- if (bbaddr == 0) {
- ring = (u32 *)(dev_priv->render_ring.virtual_start
- + dev_priv->render_ring.size);
- while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
- bbaddr = i915_get_bbaddr(dev, ring);
- if (bbaddr)
- break;
- }
+ val = (u32 *)(ring->virtual_start + ring->size);
+ while (--val >= (u32 *)ring->virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, val);
+ if (bbaddr)
+ return bbaddr;
}
- return bbaddr;
+ return 0;
+}
+
+static u32 capture_bo_list(struct drm_i915_error_buffer *err,
+ int count,
+ struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, mm_list) {
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->seqno = obj->last_rendering_seqno;
+ err->gtt_offset = obj->gtt_offset;
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : 0;
+
+ if (++i == count)
+ break;
+
+ err++;
+ }
+
+ return i;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
+ }
}
/**
@@ -564,9 +753,9 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
static void i915_capture_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error;
- struct drm_gem_object *batchbuffer[2];
+ struct drm_i915_gem_object *batchbuffer[2];
unsigned long flags;
u32 bbaddr;
int count;
@@ -585,20 +774,33 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_DEBUG_DRIVER("generating error event\n");
- error->seqno =
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
+ error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
error->pipebstat = I915_READ(PIPEBSTAT);
error->instpm = I915_READ(INSTPM);
- if (INTEL_INFO(dev)->gen < 4) {
- error->ipeir = I915_READ(IPEIR);
- error->ipehr = I915_READ(IPEHR);
- error->instdone = I915_READ(INSTDONE);
- error->acthd = I915_READ(ACTHD);
- error->bbaddr = 0;
- } else {
+ error->error = 0;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->error = I915_READ(ERROR_GEN6);
+
+ error->bcs_acthd = I915_READ(BCS_ACTHD);
+ error->bcs_ipehr = I915_READ(BCS_IPEHR);
+ error->bcs_ipeir = I915_READ(BCS_IPEIR);
+ error->bcs_instdone = I915_READ(BCS_INSTDONE);
+ error->bcs_seqno = 0;
+ if (dev_priv->ring[BCS].get_seqno)
+ error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
+
+ error->vcs_acthd = I915_READ(VCS_ACTHD);
+ error->vcs_ipehr = I915_READ(VCS_IPEHR);
+ error->vcs_ipeir = I915_READ(VCS_IPEIR);
+ error->vcs_instdone = I915_READ(VCS_INSTDONE);
+ error->vcs_seqno = 0;
+ if (dev_priv->ring[VCS].get_seqno)
+ error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
+ }
+ if (INTEL_INFO(dev)->gen >= 4) {
error->ipeir = I915_READ(IPEIR_I965);
error->ipehr = I915_READ(IPEHR_I965);
error->instdone = I915_READ(INSTDONE_I965);
@@ -606,42 +808,45 @@ static void i915_capture_error_state(struct drm_device *dev)
error->instdone1 = I915_READ(INSTDONE1);
error->acthd = I915_READ(ACTHD_I965);
error->bbaddr = I915_READ64(BB_ADDR);
+ } else {
+ error->ipeir = I915_READ(IPEIR);
+ error->ipehr = I915_READ(IPEHR);
+ error->instdone = I915_READ(INSTDONE);
+ error->acthd = I915_READ(ACTHD);
+ error->bbaddr = 0;
}
+ i915_gem_record_fences(dev, error);
- bbaddr = i915_ringbuffer_last_batch(dev);
+ bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]);
/* Grab the current batchbuffer, most likely to have crashed. */
batchbuffer[0] = NULL;
batchbuffer[1] = NULL;
count = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
+ bbaddr >= obj->gtt_offset &&
+ bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
+ error->acthd >= obj->gtt_offset &&
+ error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
count++;
}
/* Scan the other lists for completeness for those bizarre errors. */
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
+ bbaddr >= obj->gtt_offset &&
+ bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
+ error->acthd >= obj->gtt_offset &&
+ error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
@@ -649,17 +854,15 @@ static void i915_capture_error_state(struct drm_device *dev)
}
}
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
+ bbaddr >= obj->gtt_offset &&
+ bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
+ error->acthd >= obj->gtt_offset &&
+ error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
@@ -678,46 +881,41 @@ static void i915_capture_error_state(struct drm_device *dev)
/* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev,
- dev_priv->render_ring.gem_object);
+ dev_priv->ring[RCS].obj);
- /* Record buffers on the active list. */
+ /* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
- error->active_bo_count = 0;
+ error->pinned_bo = NULL;
- if (count)
+ error->active_bo_count = count;
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+ count++;
+ error->pinned_bo_count = count - error->active_bo_count;
+
+ if (count) {
error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
GFP_ATOMIC);
-
- if (error->active_bo) {
- int i = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- error->active_bo[i].size = obj->size;
- error->active_bo[i].name = obj->name;
- error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
- error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
- error->active_bo[i].read_domains = obj->read_domains;
- error->active_bo[i].write_domain = obj->write_domain;
- error->active_bo[i].fence_reg = obj_priv->fence_reg;
- error->active_bo[i].pinned = 0;
- if (obj_priv->pin_count > 0)
- error->active_bo[i].pinned = 1;
- if (obj_priv->user_pin_count > 0)
- error->active_bo[i].pinned = -1;
- error->active_bo[i].tiling = obj_priv->tiling_mode;
- error->active_bo[i].dirty = obj_priv->dirty;
- error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
-
- if (++i == count)
- break;
- }
- error->active_bo_count = i;
+ if (error->active_bo)
+ error->pinned_bo =
+ error->active_bo + error->active_bo_count;
}
+ if (error->active_bo)
+ error->active_bo_count =
+ capture_bo_list(error->active_bo,
+ error->active_bo_count,
+ &dev_priv->mm.active_list);
+
+ if (error->pinned_bo)
+ error->pinned_bo_count =
+ capture_bo_list(error->pinned_bo,
+ error->pinned_bo_count,
+ &dev_priv->mm.pinned_list);
+
do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev);
+ error->display = intel_display_capture_error_state(dev);
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
@@ -775,7 +973,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
+ POSTING_READ(IPEIR_I965);
}
if (eir & GM45_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
@@ -783,7 +981,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
+ POSTING_READ(PGTBL_ER);
}
}
@@ -794,7 +992,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
+ POSTING_READ(PGTBL_ER);
}
}
@@ -825,7 +1023,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
- (void)I915_READ(IPEIR);
+ POSTING_READ(IPEIR);
} else {
u32 ipeir = I915_READ(IPEIR_I965);
@@ -842,12 +1040,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
+ POSTING_READ(IPEIR_I965);
}
}
I915_WRITE(EIR, eir);
- (void)I915_READ(EIR);
+ POSTING_READ(EIR);
eir = I915_READ(EIR);
if (eir) {
/*
@@ -870,7 +1068,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-static void i915_handle_error(struct drm_device *dev, bool wedged)
+void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -884,11 +1082,11 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- wake_up_all(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
- wake_up_all(&dev_priv->bsd_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
- wake_up_all(&dev_priv->blt_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[BCS].irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -899,7 +1097,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct intel_unpin_work *work;
unsigned long flags;
bool stall_detected;
@@ -918,13 +1116,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
}
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
- obj_priv = to_intel_bo(work->pending_flip_obj);
+ obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
- stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+ stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
} else {
int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
- stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+ stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
crtc->y * crtc->fb->pitch +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -970,7 +1168,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
pipea_stats = I915_READ(PIPEASTAT);
pipeb_stats = I915_READ(PIPEBSTAT);
@@ -993,7 +1191,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
I915_WRITE(PIPEBSTAT, pipeb_stats);
irq_received = 1;
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
if (!irq_received)
break;
@@ -1026,9 +1224,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
- notify_ring(dev, &dev_priv->bsd_ring);
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 0);
@@ -1101,12 +1299,13 @@ static int i915_emit_irq(struct drm_device * dev)
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
return dev_priv->counter;
}
@@ -1114,12 +1313,11 @@ static int i915_emit_irq(struct drm_device * dev)
void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
- if (dev_priv->trace_irq_seqno == 0)
- render_ring->user_irq_get(dev, render_ring);
-
- dev_priv->trace_irq_seqno = seqno;
+ if (dev_priv->trace_irq_seqno == 0 &&
+ ring->irq_get(ring))
+ dev_priv->trace_irq_seqno = seqno;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -1127,7 +1325,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@@ -1141,10 +1339,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- render_ring->user_irq_get(dev, render_ring);
- DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- render_ring->user_irq_put(dev, render_ring);
+ ret = -ENODEV;
+ if (ring->irq_get(ring)) {
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ ring->irq_put(ring);
+ }
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1163,7 +1363,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
drm_i915_irq_emit_t *emit = data;
int result;
- if (!dev_priv || !dev_priv->render_ring.virtual_start) {
+ if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1209,9 +1409,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
@@ -1219,7 +1419,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
@@ -1231,15 +1431,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
PIPE_START_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
void i915_enable_interrupt (struct drm_device *dev)
@@ -1306,12 +1506,50 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
-static struct drm_i915_gem_request *
-i915_get_tail_request(struct drm_device *dev)
+static u32
+ring_last_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return list_entry(dev_priv->render_ring.request_list.prev,
- struct drm_i915_gem_request, list);
+ return list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request, list)->seqno;
+}
+
+static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+{
+ if (list_empty(&ring->request_list) ||
+ i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+ /* Issue a wake-up to catch stuck h/w. */
+ if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
+ DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
+ ring->name,
+ ring->waiting_seqno,
+ ring->get_seqno(ring));
+ wake_up_all(&ring->irq_queue);
+ *err = true;
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool kick_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT) {
+ DRM_ERROR("Kicking stuck wait on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
+ if (IS_GEN6(dev) &&
+ (tmp & RING_WAIT_SEMAPHORE)) {
+ DRM_ERROR("Kicking stuck semaphore on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
+ return false;
}
/**
@@ -1325,6 +1563,17 @@ void i915_hangcheck_elapsed(unsigned long data)
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd, instdone, instdone1;
+ bool err = false;
+
+ /* If all work is done then ACTHD clearly hasn't advanced. */
+ if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
+ i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
+ i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
+ dev_priv->hangcheck_count = 0;
+ if (err)
+ goto repeat;
+ return;
+ }
if (INTEL_INFO(dev)->gen < 4) {
acthd = I915_READ(ACTHD);
@@ -1336,38 +1585,6 @@ void i915_hangcheck_elapsed(unsigned long data)
instdone1 = I915_READ(INSTDONE1);
}
- /* If all work is done then ACTHD clearly hasn't advanced. */
- if (list_empty(&dev_priv->render_ring.request_list) ||
- i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
- i915_get_tail_request(dev)->seqno)) {
- bool missed_wakeup = false;
-
- dev_priv->hangcheck_count = 0;
-
- /* Issue a wake-up to catch stuck h/w. */
- if (dev_priv->render_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->render_ring.irq_queue)) {
- wake_up_all(&dev_priv->render_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (dev_priv->bsd_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
- wake_up_all(&dev_priv->bsd_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (dev_priv->blt_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
- wake_up_all(&dev_priv->blt_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (missed_wakeup)
- DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
- return;
- }
-
if (dev_priv->last_acthd == acthd &&
dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) {
@@ -1380,12 +1597,17 @@ void i915_hangcheck_elapsed(unsigned long data)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
- u32 tmp = I915_READ(PRB0_CTL);
- if (tmp & RING_WAIT) {
- I915_WRITE(PRB0_CTL, tmp);
- POSTING_READ(PRB0_CTL);
- goto out;
- }
+
+ if (kick_ring(&dev_priv->ring[RCS]))
+ goto repeat;
+
+ if (HAS_BSD(dev) &&
+ kick_ring(&dev_priv->ring[VCS]))
+ goto repeat;
+
+ if (HAS_BLT(dev) &&
+ kick_ring(&dev_priv->ring[BCS]))
+ goto repeat;
}
i915_handle_error(dev, true);
@@ -1399,7 +1621,7 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 = instdone1;
}
-out:
+repeat:
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1417,17 +1639,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
- (void) I915_READ(DEIER);
+ POSTING_READ(DEIER);
/* and GT */
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
- (void) I915_READ(GTIER);
+ POSTING_READ(GTIER);
/* south display irq */
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
- (void) I915_READ(SDEIER);
+ POSTING_READ(SDEIER);
}
static int ironlake_irq_postinstall(struct drm_device *dev)
@@ -1436,38 +1658,39 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
- u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
+ u32 render_irqs;
u32 hotplug_mask;
- dev_priv->irq_mask_reg = ~display_mask;
- dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
+ dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
- (void) I915_READ(DEIER);
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+ POSTING_READ(DEIER);
- if (IS_GEN6(dev)) {
- render_mask =
- GT_PIPE_NOTIFY |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
- }
-
- dev_priv->gt_irq_mask_reg = ~render_mask;
- dev_priv->gt_irq_enable_reg = render_mask;
+ dev_priv->gt_irq_mask = ~0;
I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
if (IS_GEN6(dev)) {
- I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
- I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
+ I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT);
+ I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT);
I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
}
- I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
- (void) I915_READ(GTIER);
+ if (IS_GEN6(dev))
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_BLT_USER_INTERRUPT;
+ else
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_BSD_USER_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
if (HAS_PCH_CPT(dev)) {
hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
@@ -1477,13 +1700,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
}
- dev_priv->pch_irq_mask_reg = ~hotplug_mask;
- dev_priv->pch_irq_enable_reg = hotplug_mask;
+ dev_priv->pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
- I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
- (void) I915_READ(SDEIER);
+ I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+ I915_WRITE(SDEIER, hotplug_mask);
+ POSTING_READ(SDEIER);
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
@@ -1519,7 +1741,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
I915_WRITE(PIPEBSTAT, 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
- (void) I915_READ(IER);
+ POSTING_READ(IER);
}
/*
@@ -1532,11 +1754,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
u32 error_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
@@ -1544,7 +1766,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
return ironlake_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+ dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
@@ -1553,7 +1775,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
/*
@@ -1571,9 +1793,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(EMR, error_mask);
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE(IMR, dev_priv->irq_mask);
I915_WRITE(IER, enable_mask);
- (void) I915_READ(IER);
+ POSTING_READ(IER);
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cb8f4342927..8f948a6fbc1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,12 @@
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GEN6_GDRST 0x941c
+#define GEN6_GRDOM_FULL (1 << 0)
+#define GEN6_GRDOM_RENDER (1 << 1)
+#define GEN6_GRDOM_MEDIA (1 << 2)
+#define GEN6_GRDOM_BLT (1 << 3)
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -158,12 +164,23 @@
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
-#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ * simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
+#define MI_SEMAPHORE_UPDATE (1<<21)
+#define MI_SEMAPHORE_COMPARE (1<<20)
+#define MI_SEMAPHORE_REGISTER (1<<18)
/*
* 3D instructions used by the kernel
*/
@@ -256,10 +273,6 @@
* Instruction and interrupt control regs
*/
#define PGTBL_ER 0x02024
-#define PRB0_TAIL 0x02030
-#define PRB0_HEAD 0x02034
-#define PRB0_START 0x02038
-#define PRB0_CTL 0x0203c
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
@@ -268,9 +281,13 @@
#define RING_HEAD(base) ((base)+0x34)
#define RING_START(base) ((base)+0x38)
#define RING_CTL(base) ((base)+0x3c)
+#define RING_SYNC_0(base) ((base)+0x40)
+#define RING_SYNC_1(base) ((base)+0x44)
+#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define RING_ACTHD(base) ((base)+0x74)
+#define RING_NOPID(base) ((base)+0x94)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -285,10 +302,17 @@
#define RING_INVALID 0x00000000
#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+#if 0
+#define PRB0_TAIL 0x02030
+#define PRB0_HEAD 0x02034
+#define PRB0_START 0x02038
+#define PRB0_CTL 0x0203c
#define PRB1_TAIL 0x02040 /* 915+ only */
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
+#endif
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
@@ -305,11 +329,42 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+#define VCS_INSTDONE 0x1206C
+#define VCS_IPEIR 0x12064
+#define VCS_IPEHR 0x12068
+#define VCS_ACTHD 0x12074
+#define BCS_INSTDONE 0x2206C
+#define BCS_IPEIR 0x22064
+#define BCS_IPEHR 0x22068
+#define BCS_ACTHD 0x22074
+
+#define ERROR_GEN6 0x040a0
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior. The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN2 0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+#define _3D_CHICKEN3 0x02090
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 11)
+#define GFX_MODE 0x02520
+#define GFX_RUN_LIST_ENABLE (1<<15)
+#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
+#define GFX_SURFACE_FAULT_ENABLE (1<<12)
+#define GFX_REPLAY_MODE (1<<11)
+#define GFX_PSMI_GRANULARITY (1<<10)
+#define GFX_PPGTT_ENABLE (1<<9)
+
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
@@ -461,7 +516,7 @@
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
#define GEN6_BSD_IMR 0x120a8
-#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
+#define GEN6_BSD_USER_INTERRUPT (1 << 12)
#define GEN6_BSD_RNCID 0x12198
@@ -541,6 +596,18 @@
#define ILK_DISPLAY_CHICKEN1 0x42000
#define ILK_FBCQ_DIS (1<<22)
+#define ILK_PABSTRETCH_DIS (1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA 0x100100
+#define SNB_CPU_FENCE_ENABLE (1<<29)
+#define DPFC_CPU_FENCE_OFFSET 0x100104
+
/*
* GPIO regs
@@ -900,6 +967,8 @@
*/
#define MCHBAR_MIRROR_BASE 0x10000
+#define MCHBAR_MIRROR_BASE_SNB 0x140000
+
/** 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
@@ -1119,6 +1188,10 @@
#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
+#define GEN6_GT_PERF_STATUS 0x145948
+#define GEN6_RP_STATE_LIMITS 0x145994
+#define GEN6_RP_STATE_CAP 0x145998
+
/*
* Logical Context regs
*/
@@ -1168,7 +1241,6 @@
#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
-#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
/* VGA port control */
@@ -2182,8 +2254,10 @@
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
+#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -2291,6 +2365,40 @@
#define ILK_FIFO_LINE_SIZE 64
+/* define the WM info on Sandybridge */
+#define SNB_DISPLAY_FIFO 128
+#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
+#define SNB_DISPLAY_DFTWM 8
+#define SNB_CURSOR_FIFO 32
+#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
+#define SNB_CURSOR_DFTWM 8
+
+#define SNB_DISPLAY_SR_FIFO 512
+#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
+#define SNB_DISPLAY_DFT_SRWM 0x3f
+#define SNB_CURSOR_SR_FIFO 64
+#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
+#define SNB_CURSOR_DFT_SRWM 8
+
+#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
+
+#define SNB_FIFO_LINE_SIZE 64
+
+
+/* the address where we get all kinds of latency value */
+#define SSKPD 0x5d10
+#define SSKPD_WM_MASK 0x3f
+#define SSKPD_WM0_SHIFT 0
+#define SSKPD_WM1_SHIFT 8
+#define SSKPD_WM2_SHIFT 16
+#define SSKPD_WM3_SHIFT 24
+
+#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
+#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
+#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
+#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
+#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
+
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2351,6 +2459,10 @@
#define CURBBASE 0x700c4
#define CURBPOS 0x700c8
+#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
+
/* Display A control */
#define DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
@@ -2589,6 +2701,8 @@
#define GTIER 0x4401c
#define ILK_DISPLAY_CHICKEN2 0x42004
+/* Required on all Ironlake and Sandybridge according to the B-Spec. */
+#define ILK_ELPIN_409_SELECT (1 << 25)
#define ILK_DPARB_GATE (1<<22)
#define ILK_VSDPFD_FULL (1<<21)
#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
@@ -2600,6 +2714,8 @@
#define ILK_DESKTOP (1<<23)
#define ILK_DSPCLK_GATE 0x42020
#define ILK_DPARB_CLK_GATE (1<<5)
+#define ILK_DPFD_CLK_GATE (1<<7)
+
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
#define ILK_CLK_FBC (1<<7)
#define ILK_DPFC_DIS1 (1<<8)
@@ -2679,6 +2795,7 @@
#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
#define PCH_FPA0 0xc6040
+#define FP_CB_TUNE (0x3<<22)
#define PCH_FPA1 0xc6044
#define PCH_FPB0 0xc6048
#define PCH_FPB1 0xc604c
@@ -3063,4 +3180,66 @@
#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+#define FORCEWAKE 0xA18C
+#define FORCEWAKE_ACK 0x130090
+
+#define GEN6_RPNSWREQ 0xA008
+#define GEN6_TURBO_DISABLE (1<<31)
+#define GEN6_FREQUENCY(x) ((x)<<25)
+#define GEN6_OFFSET(x) ((x)<<19)
+#define GEN6_AGGRESSIVE_TURBO (0<<15)
+#define GEN6_RC_VIDEO_FREQ 0xA00C
+#define GEN6_RC_CONTROL 0xA090
+#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
+#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
+#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
+#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
+#define GEN6_RC_CTL_HW_ENABLE (1<<31)
+#define GEN6_RP_DOWN_TIMEOUT 0xA010
+#define GEN6_RP_INTERRUPT_LIMITS 0xA014
+#define GEN6_RPSTAT1 0xA01C
+#define GEN6_RP_CONTROL 0xA024
+#define GEN6_RP_MEDIA_TURBO (1<<11)
+#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
+#define GEN6_RP_MEDIA_IS_GFX (1<<8)
+#define GEN6_RP_ENABLE (1<<7)
+#define GEN6_RP_UP_BUSY_MAX (0x2<<3)
+#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0)
+#define GEN6_RP_UP_THRESHOLD 0xA02C
+#define GEN6_RP_DOWN_THRESHOLD 0xA030
+#define GEN6_RP_UP_EI 0xA068
+#define GEN6_RP_DOWN_EI 0xA06C
+#define GEN6_RP_IDLE_HYSTERSIS 0xA070
+#define GEN6_RC_STATE 0xA094
+#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
+#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
+#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0
+#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
+#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
+#define GEN6_RC_SLEEP 0xA0B0
+#define GEN6_RC1e_THRESHOLD 0xA0B4
+#define GEN6_RC6_THRESHOLD 0xA0B8
+#define GEN6_RC6p_THRESHOLD 0xA0BC
+#define GEN6_RC6pp_THRESHOLD 0xA0C0
+#define GEN6_PMINTRMSK 0xA168
+
+#define GEN6_PMISR 0x44020
+#define GEN6_PMIMR 0x44024
+#define GEN6_PMIIR 0x44028
+#define GEN6_PMIER 0x4402C
+#define GEN6_PM_MBOX_EVENT (1<<25)
+#define GEN6_PM_THERMAL_EVENT (1<<24)
+#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
+#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
+
+#define GEN6_PCODE_MAILBOX 0x138124
+#define GEN6_PCODE_READY (1<<31)
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_DATA 0x138128
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 42729d25da5..410772466fa 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -235,6 +235,7 @@ static void i915_restore_vga(struct drm_device *dev)
static void i915_save_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
@@ -367,6 +368,28 @@ static void i915_save_modeset_reg(struct drm_device *dev)
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+ }
+
return;
}
@@ -375,10 +398,33 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_a_reg, fpa0_reg, fpa1_reg;
int dpll_b_reg, fpb0_reg, fpb1_reg;
+ int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ break;
+ }
+
+
if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
@@ -771,8 +817,14 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
+ if (IS_GEN6(dev))
+ gen6_disable_rps(dev);
+
+ /* XXX disabling the clock gating breaks suspend on gm45
+ intel_disable_clock_gating(dev);
+ */
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -788,28 +840,6 @@ int i915_save_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 6:
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
-
- }
-
return 0;
}
@@ -823,27 +853,6 @@ int i915_restore_state(struct drm_device *dev)
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 6:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
- break;
- case 3:
- case 2:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
- break;
- }
-
i915_restore_display(dev);
/* Interrupt state */
@@ -860,13 +869,16 @@ int i915_restore_state(struct drm_device *dev)
}
/* Clock gating state */
- intel_init_clock_gating(dev);
+ intel_enable_clock_gating(dev);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
+ if (IS_GEN6(dev))
+ gen6_enable_rps(dev_priv);
+
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fea97a21cc1..7f0fc3ed61a 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,6 +6,7 @@
#include <linux/tracepoint.h>
#include <drm/drmP.h>
+#include "i915_drv.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@@ -16,18 +17,18 @@
TRACE_EVENT(i915_gem_object_create,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->size = obj->size;
+ __entry->size = obj->base.size;
),
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
@@ -35,40 +36,43 @@ TRACE_EVENT(i915_gem_object_create,
TRACE_EVENT(i915_gem_object_bind,
- TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
- TP_ARGS(obj, gtt_offset),
+ TP_ARGS(obj, gtt_offset, mappable),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, gtt_offset)
+ __field(bool, mappable)
),
TP_fast_assign(
__entry->obj = obj;
__entry->gtt_offset = gtt_offset;
+ __entry->mappable = mappable;
),
- TP_printk("obj=%p, gtt_offset=%08x",
- __entry->obj, __entry->gtt_offset)
+ TP_printk("obj=%p, gtt_offset=%08x%s",
+ __entry->obj, __entry->gtt_offset,
+ __entry->mappable ? ", mappable" : "")
);
TRACE_EVENT(i915_gem_object_change_domain,
- TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+ TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
TP_ARGS(obj, old_read_domains, old_write_domain),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, read_domains)
__field(u32, write_domain)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->read_domains = obj->read_domains | (old_read_domains << 16);
- __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+ __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
+ __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
),
TP_printk("obj=%p, read=%04x, write=%04x",
@@ -76,36 +80,14 @@ TRACE_EVENT(i915_gem_object_change_domain,
__entry->read_domains, __entry->write_domain)
);
-TRACE_EVENT(i915_gem_object_get_fence,
-
- TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
-
- TP_ARGS(obj, fence, tiling_mode),
-
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- __field(int, fence)
- __field(int, tiling_mode)
- ),
-
- TP_fast_assign(
- __entry->obj = obj;
- __entry->fence = fence;
- __entry->tiling_mode = tiling_mode;
- ),
-
- TP_printk("obj=%p, fence=%d, tiling=%d",
- __entry->obj, __entry->fence, __entry->tiling_mode)
-);
-
DECLARE_EVENT_CLASS(i915_gem_object,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -117,21 +99,21 @@ DECLARE_EVENT_CLASS(i915_gem_object,
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
@@ -263,13 +245,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
);
TRACE_EVENT(i915_flip_request,
- TP_PROTO(int plane, struct drm_gem_object *obj),
+ TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -281,13 +263,13 @@ TRACE_EVENT(i915_flip_request,
);
TRACE_EVENT(i915_flip_complete,
- TP_PROTO(int plane, struct drm_gem_object *obj),
+ TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -298,6 +280,29 @@ TRACE_EVENT(i915_flip_complete,
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
);
+TRACE_EVENT(i915_reg_rw,
+ TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
+
+ TP_ARGS(cmd, reg, val, len),
+
+ TP_STRUCT__entry(
+ __field(int, cmd)
+ __field(uint32_t, reg)
+ __field(uint64_t, val)
+ __field(int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->reg = reg;
+ __entry->val = (uint64_t)val;
+ __entry->len = len;
+ ),
+
+ TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
+ __entry->cmd, __entry->reg, __entry->val, __entry->len)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fca523288ac..0abe79fb638 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -642,26 +642,23 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.find_pll = intel_find_pll_ironlake_dp,
};
-static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+ int refclk)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
- int refclk = 120;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
- refclk = 100;
-
if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP) {
/* LVDS dual channel */
- if (refclk == 100)
+ if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
limit = &intel_limits_ironlake_dual_lvds;
} else {
- if (refclk == 100)
+ if (refclk == 100000)
limit = &intel_limits_ironlake_single_lvds_100m;
else
limit = &intel_limits_ironlake_single_lvds;
@@ -702,13 +699,13 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
return limit;
}
-static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
{
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
- limit = intel_ironlake_limit(crtc);
+ limit = intel_ironlake_limit(crtc, refclk);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_PINEVIEW(dev)) {
@@ -773,11 +770,10 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+static bool intel_PLL_is_valid(struct drm_device *dev,
+ const intel_limit_t *limit,
+ const intel_clock_t *clock)
{
- const intel_limit_t *limit = intel_limit (crtc);
- struct drm_device *dev = crtc->dev;
-
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid ("p1 out of range\n");
if (clock->p < limit->p.min || limit->p.max < clock->p)
@@ -849,8 +845,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int this_err;
intel_clock(dev, refclk, &clock);
-
- if (!intel_PLL_is_valid(crtc, &clock))
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
continue;
this_err = abs(clock.dot - target);
@@ -912,9 +908,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int this_err;
intel_clock(dev, refclk, &clock);
- if (!intel_PLL_is_valid(crtc, &clock))
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
continue;
- this_err = abs(clock.dot - target) ;
+
+ this_err = abs(clock.dot - target);
if (this_err < err_most) {
*best_clock = clock;
err_most = this_err;
@@ -1066,13 +1064,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane, i;
u32 fbc_ctl, fbc_ctl2;
if (fb->pitch == dev_priv->cfb_pitch &&
- obj_priv->fence_reg == dev_priv->cfb_fence &&
+ obj->fence_reg == dev_priv->cfb_fence &&
intel_crtc->plane == dev_priv->cfb_plane &&
I915_READ(FBC_CONTROL) & FBC_CTL_EN)
return;
@@ -1086,7 +1084,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* FBC_CTL wants 64B units */
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
@@ -1096,7 +1094,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1107,7 +1105,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl |= dev_priv->cfb_fence;
I915_WRITE(FBC_CONTROL, fbc_ctl);
@@ -1150,7 +1148,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
@@ -1159,7 +1157,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_y == crtc->y)
return;
@@ -1170,12 +1168,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
}
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
dev_priv->cfb_y = crtc->y;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
} else {
@@ -1221,7 +1219,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
@@ -1230,9 +1228,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_offset == obj_priv->gtt_offset &&
+ dev_priv->cfb_offset == obj->gtt_offset &&
dev_priv->cfb_y == crtc->y)
return;
@@ -1242,14 +1240,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
}
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
- dev_priv->cfb_offset = obj_priv->gtt_offset;
+ dev_priv->cfb_offset = obj->gtt_offset;
dev_priv->cfb_y = crtc->y;
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
} else {
@@ -1260,10 +1258,16 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ }
+
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
@@ -1345,7 +1349,7 @@ static void intel_update_fbc(struct drm_device *dev)
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
DRM_DEBUG_KMS("\n");
@@ -1384,9 +1388,9 @@ static void intel_update_fbc(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc);
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
- obj_priv = to_intel_bo(intel_fb->obj);
+ obj = intel_fb->obj;
- if (intel_fb->obj->size > dev_priv->cfb_size) {
+ if (intel_fb->obj->base.size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
@@ -1410,7 +1414,7 @@ static void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
- if (obj_priv->tiling_mode != I915_TILING_X) {
+ if (obj->tiling_mode != I915_TILING_X) {
DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
@@ -1433,14 +1437,13 @@ out_disable:
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_gem_object *obj,
- bool pipelined)
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 alignment;
int ret;
- switch (obj_priv->tiling_mode) {
+ switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
@@ -1461,7 +1464,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
BUG();
}
- ret = i915_gem_object_pin(obj, alignment);
+ ret = i915_gem_object_pin(obj, alignment, true);
if (ret)
return ret;
@@ -1474,9 +1477,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj, false);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ ret = i915_gem_object_get_fence(obj, pipelined, false);
if (ret)
goto err_unpin;
}
@@ -1497,8 +1499,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
u32 dspcntr;
@@ -1515,7 +1516,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- obj_priv = to_intel_bo(obj);
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
@@ -1540,7 +1540,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
@@ -1552,7 +1552,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr);
- Start = obj_priv->gtt_offset;
+ Start = obj->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
@@ -1598,7 +1598,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
to_intel_framebuffer(crtc->fb)->obj,
- false);
+ NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1606,18 +1606,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj_priv->pending_flip) == 0);
+ atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
*/
- ret = i915_gem_object_flush_gpu(obj_priv, false);
+ ret = i915_gem_object_flush_gpu(obj, false);
if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
@@ -1633,8 +1632,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- if (old_fb)
+ if (old_fb) {
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+ }
mutex_unlock(&dev->struct_mutex);
@@ -1996,31 +1997,31 @@ static void intel_flush_display_plane(struct drm_device *dev,
static void intel_clear_scanline_wait(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
u32 tmp;
if (IS_GEN2(dev))
/* Can't break the hang on i8xx */
return;
- tmp = I915_READ(PRB0_CTL);
- if (tmp & RING_WAIT) {
- I915_WRITE(PRB0_CTL, tmp);
- POSTING_READ(PRB0_CTL);
- }
+ ring = LP_RING(dev_priv);
+ tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT)
+ I915_WRITE_CTL(ring, tmp);
}
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_i915_private *dev_priv;
if (crtc->fb == NULL)
return;
- obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
+ obj = to_intel_framebuffer(crtc->fb)->obj;
dev_priv = crtc->dev->dev_private;
wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj_priv->pending_flip) == 0);
+ atomic_read(&obj->pending_flip) == 0);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -2850,6 +2851,39 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_FIFO_LINE_SIZE
};
+static struct intel_watermark_params sandybridge_display_wm_info = {
+ SNB_DISPLAY_FIFO,
+ SNB_DISPLAY_MAXWM,
+ SNB_DISPLAY_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_wm_info = {
+ SNB_CURSOR_FIFO,
+ SNB_CURSOR_MAXWM,
+ SNB_CURSOR_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_display_srwm_info = {
+ SNB_DISPLAY_SR_FIFO,
+ SNB_DISPLAY_MAX_SRWM,
+ SNB_DISPLAY_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_srwm_info = {
+ SNB_CURSOR_SR_FIFO,
+ SNB_CURSOR_MAX_SRWM,
+ SNB_CURSOR_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
@@ -3383,6 +3417,10 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
static bool ironlake_compute_wm0(struct drm_device *dev,
int pipe,
+ const struct intel_watermark_params *display,
+ int display_latency,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency,
int *plane_wm,
int *cursor_wm)
{
@@ -3400,22 +3438,20 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
pixel_size = crtc->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
- entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
- entries = DIV_ROUND_UP(entries,
- ironlake_display_wm_info.cacheline_size);
- *plane_wm = entries + ironlake_display_wm_info.guard_size;
- if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
- *plane_wm = ironlake_display_wm_info.max_wm;
+ entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
/* Use the large buffer method to calculate cursor watermark */
line_time_us = ((htotal * 1000) / clock);
- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+ line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000;
entries = line_count * 64 * pixel_size;
- entries = DIV_ROUND_UP(entries,
- ironlake_cursor_wm_info.cacheline_size);
- *cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
- if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
- *cursor_wm = ironlake_cursor_wm_info.max_wm;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
return true;
}
@@ -3430,7 +3466,12 @@ static void ironlake_update_wm(struct drm_device *dev,
int tmp;
enabled = 0;
- if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
+ if (ironlake_compute_wm0(dev, 0,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -3439,7 +3480,12 @@ static void ironlake_update_wm(struct drm_device *dev,
enabled++;
}
- if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
+ if (ironlake_compute_wm0(dev, 1,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -3453,7 +3499,7 @@ static void ironlake_update_wm(struct drm_device *dev,
* display plane is used.
*/
tmp = 0;
- if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
+ if (enabled == 1) {
unsigned long line_time_us;
int small, large, plane_fbc;
int sr_clock, entries;
@@ -3505,6 +3551,197 @@ static void ironlake_update_wm(struct drm_device *dev,
/* XXX setup WM2 and WM3 */
}
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ *
+ * Also return true if all of those watermark values is 0, which is set by
+ * sandybridge_compute_srwm, to indicate the latency is ZERO.
+ */
+static bool sandybridge_check_srwm(struct drm_device *dev, int level,
+ int fbc_wm, int display_wm, int cursor_wm)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+ " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+ if (fbc_wm > SNB_FBC_MAX_SRWM) {
+ DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+ fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+ /* fbc has it's own way to disable FBC WM */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+ return false;
+ }
+
+ if (display_wm > SNB_DISPLAY_MAX_SRWM) {
+ DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+ display_wm, SNB_DISPLAY_MAX_SRWM, level);
+ return false;
+ }
+
+ if (cursor_wm > SNB_CURSOR_MAX_SRWM) {
+ DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+ cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+ return false;
+ }
+
+ if (!(fbc_wm || display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool sandybridge_compute_srwm(struct drm_device *dev, int level,
+ int hdisplay, int htotal, int pixel_size,
+ int clock, int latency_ns, int *fbc_wm,
+ int *display_wm, int *cursor_wm)
+{
+
+ unsigned long line_time_us;
+ int small, large;
+ int entries;
+ int line_count, line_size;
+
+ if (!latency_ns) {
+ *fbc_wm = *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large),
+ sandybridge_display_srwm_info.cacheline_size);
+ *display_wm = entries + sandybridge_display_srwm_info.guard_size;
+
+ /*
+ * Spec said:
+ * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+ */
+ *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries,
+ sandybridge_cursor_srwm_info.cacheline_size);
+ *cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size;
+
+ return sandybridge_check_srwm(dev, level,
+ *fbc_wm, *display_wm, *cursor_wm);
+}
+
+static void sandybridge_update_wm(struct drm_device *dev,
+ int planea_clock, int planeb_clock,
+ int hdisplay, int htotal,
+ int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY();
+ int fbc_wm, plane_wm, cursor_wm, enabled;
+ int clock;
+
+ enabled = 0;
+ if (ironlake_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
+ }
+
+ if (ironlake_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (enabled != 1)
+ return;
+
+ clock = planea_clock ? planea_clock : planeb_clock;
+
+ /* WM1 */
+ if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM1_LATENCY() * 500,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!sandybridge_compute_srwm(dev, 2,
+ hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM2_LATENCY() * 500,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!sandybridge_compute_srwm(dev, 3,
+ hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM3_LATENCY() * 500,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -3660,7 +3897,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(crtc);
+ limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -3857,6 +4094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
+ /* Enable autotuning of the PLL clock (if permissible) */
+ if (HAS_PCH_SPLIT(dev)) {
+ int factor = 21;
+
+ if (is_lvds) {
+ if ((dev_priv->lvds_use_ssc &&
+ dev_priv->lvds_ssc_freq == 100) ||
+ (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+ factor = 25;
+ } else if (is_sdvo && is_tv)
+ factor = 20;
+
+ if (clock.m1 < factor * clock.n)
+ fp |= FP_CB_TUNE;
+ }
+
dpll = 0;
if (!HAS_PCH_SPLIT(dev))
dpll = DPLL_VGA_MODE_DIS;
@@ -4071,7 +4324,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
/* Wait for the clocks to stabilize. */
@@ -4089,13 +4341,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
I915_WRITE(DPLL_MD(pipe), temp);
} else {
- /* write it again -- the BIOS does, after all */
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
I915_WRITE(dpll_reg, dpll);
}
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(dpll_reg);
- udelay(150);
}
intel_crtc->lowfreq_avail = false;
@@ -4331,15 +4583,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
+ struct drm_file *file,
uint32_t handle,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_gem_object *bo;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
uint32_t addr;
int ret;
@@ -4349,7 +4600,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
- bo = NULL;
+ obj = NULL;
mutex_lock(&dev->struct_mutex);
goto finish;
}
@@ -4360,13 +4611,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
- bo = drm_gem_object_lookup(dev, file_priv, handle);
- if (!bo)
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (!obj)
return -ENOENT;
- obj_priv = to_intel_bo(bo);
-
- if (bo->size < width * height * 4) {
+ if (obj->base.size < width * height * 4) {
DRM_ERROR("buffer is to small\n");
ret = -ENOMEM;
goto fail;
@@ -4375,29 +4624,41 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) {
- ret = i915_gem_object_pin(bo, PAGE_SIZE);
+ if (obj->tiling_mode) {
+ DRM_ERROR("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ goto fail_locked;
+ }
+
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked;
}
- ret = i915_gem_object_set_to_gtt_domain(bo, 0);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 0);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
goto fail_unpin;
}
- addr = obj_priv->gtt_offset;
+ ret = i915_gem_object_put_fence(obj);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_unpin;
+ }
+
+ addr = obj->gtt_offset;
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
- ret = i915_gem_attach_phys_object(dev, bo,
+ ret = i915_gem_attach_phys_object(dev, obj,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
}
- addr = obj_priv->phys_obj->handle->busaddr;
+ addr = obj->phys_obj->handle->busaddr;
}
if (IS_GEN2(dev))
@@ -4406,17 +4667,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
finish:
if (intel_crtc->cursor_bo) {
if (dev_priv->info->cursor_needs_physical) {
- if (intel_crtc->cursor_bo != bo)
+ if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
i915_gem_object_unpin(intel_crtc->cursor_bo);
- drm_gem_object_unreference(intel_crtc->cursor_bo);
+ drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
mutex_unlock(&dev->struct_mutex);
intel_crtc->cursor_addr = addr;
- intel_crtc->cursor_bo = bo;
+ intel_crtc->cursor_bo = obj;
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
@@ -4424,11 +4685,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
fail_unpin:
- i915_gem_object_unpin(bo);
+ i915_gem_object_unpin(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
- drm_gem_object_unreference_unlocked(bo);
+ drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
@@ -4739,8 +5000,14 @@ static void intel_gpu_idle_timer(unsigned long arg)
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
- dev_priv->busy = false;
+ if (!list_empty(&dev_priv->mm.active_list)) {
+ /* Still processing requests, so just re-arm the timer. */
+ mod_timer(&dev_priv->idle_timer, jiffies +
+ msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ return;
+ }
+ dev_priv->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
@@ -4751,9 +5018,17 @@ static void intel_crtc_idle_timer(unsigned long arg)
struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
+ struct intel_framebuffer *intel_fb;
- intel_crtc->busy = false;
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ if (intel_fb && intel_fb->obj->active) {
+ /* The framebuffer is still being accessed by the GPU. */
+ mod_timer(&intel_crtc->idle_timer, jiffies +
+ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+ return;
+ }
+ intel_crtc->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
@@ -4888,7 +5163,7 @@ static void intel_idle_update(struct work_struct *work)
* buffer), we'll also mark the display as busy, so we know to increase its
* clock frequency.
*/
-void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
@@ -4969,8 +5244,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_lock(&work->dev->struct_mutex);
i915_gem_object_unpin(work->old_fb_obj);
- drm_gem_object_unreference(work->pending_flip_obj);
- drm_gem_object_unreference(work->old_fb_obj);
+ drm_gem_object_unreference(&work->pending_flip_obj->base);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -4981,15 +5257,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_pending_vblank_event *e;
- struct timeval now;
+ struct timeval tnow, tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
+ do_gettimeofday(&tnow);
+
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
@@ -4998,26 +5276,49 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
}
intel_crtc->unpin_work = NULL;
- drm_vblank_put(dev, intel_crtc->pipe);
if (work->event) {
e = work->event;
- do_gettimeofday(&now);
- e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+
+ /* Called before vblank count and timestamps have
+ * been updated for the vblank interval of flip
+ * completion? Need to increment vblank count and
+ * add one videorefresh duration to returned timestamp
+ * to account for this. We assume this happened if we
+ * get called over 0.9 frame durations after the last
+ * timestamped vblank.
+ *
+ * This calculation can not be used with vrefresh rates
+ * below 5Hz (10Hz to be on the safe side) without
+ * promoting to 64 integers.
+ */
+ if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
+ 9 * crtc->framedur_ns) {
+ e->event.sequence++;
+ tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
+ crtc->framedur_ns);
+ }
+
+ e->event.tv_sec = tvbl.tv_sec;
+ e->event.tv_usec = tvbl.tv_usec;
+
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
+ drm_vblank_put(dev, intel_crtc->pipe);
+
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = to_intel_bo(work->old_fb_obj);
+ obj = work->old_fb_obj;
+
atomic_clear_mask(1 << intel_crtc->plane,
- &obj_priv->pending_flip.counter);
- if (atomic_read(&obj_priv->pending_flip) == 0)
+ &obj->pending_flip.counter);
+ if (atomic_read(&obj->pending_flip) == 0)
wake_up(&dev_priv->pending_flip_queue);
+
schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5063,8 +5364,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags, offset;
@@ -5098,13 +5398,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, true);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto cleanup_work;
/* Reference the objects for the scheduled work. */
- drm_gem_object_reference(work->old_fb_obj);
- drm_gem_object_reference(obj);
+ drm_gem_object_reference(&work->old_fb_obj->base);
+ drm_gem_object_reference(&obj->base);
crtc->fb = fb;
@@ -5112,22 +5412,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_objs;
- /* Block clients from rendering to the new back buffer until
- * the flip occurs and the object is no longer visible.
- */
- atomic_add(1 << intel_crtc->plane,
- &to_intel_bo(work->old_fb_obj)->pending_flip);
-
- work->pending_flip_obj = obj;
- obj_priv = to_intel_bo(obj);
-
if (IS_GEN3(dev) || IS_GEN2(dev)) {
u32 flip_mask;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
*/
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret)
+ goto cleanup_objs;
+
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
@@ -5137,18 +5431,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ADVANCE_LP_RING();
}
+ work->pending_flip_obj = obj;
+
work->enable_stall_check = true;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
- BEGIN_LP_RING(4);
- switch(INTEL_INFO(dev)->gen) {
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto cleanup_objs;
+
+ /* Block clients from rendering to the new back buffer until
+ * the flip occurs and the object is no longer visible.
+ */
+ atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+
+ switch (INTEL_INFO(dev)->gen) {
case 2:
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
break;
@@ -5156,7 +5460,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
break;
@@ -5169,7 +5473,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+ OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -5183,8 +5487,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
case 6:
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj_priv->tiling_mode);
- OUT_RING(obj_priv->gtt_offset);
+ OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(obj->gtt_offset);
pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
@@ -5200,8 +5504,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_objs:
- drm_gem_object_unreference(work->old_fb_obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
cleanup_work:
mutex_unlock(&dev->struct_mutex);
@@ -5338,7 +5642,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
@@ -5505,19 +5809,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
- drm_gem_object_unreference_unlocked(intel_fb->obj);
+ drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
kfree(intel_fb);
}
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
+ struct drm_file *file,
unsigned int *handle)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_gem_object *object = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb->obj;
- return drm_gem_handle_create(file_priv, object, handle);
+ return drm_gem_handle_create(file, &obj->base, handle);
}
static const struct drm_framebuffer_funcs intel_fb_funcs = {
@@ -5528,12 +5832,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj)
+ struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
if (mode_cmd->pitch & 63)
@@ -5565,11 +5868,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd)
{
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
struct intel_framebuffer *intel_fb;
int ret;
- obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
if (!obj)
return ERR_PTR(-ENOENT);
@@ -5577,10 +5880,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (!intel_fb)
return ERR_PTR(-ENOMEM);
- ret = intel_framebuffer_init(dev, intel_fb,
- mode_cmd, obj);
+ ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference_unlocked(&obj->base);
kfree(intel_fb);
return ERR_PTR(ret);
}
@@ -5593,10 +5895,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.output_poll_changed = intel_fb_output_poll_changed,
};
-static struct drm_gem_object *
+static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device *dev)
{
- struct drm_gem_object *ctx;
+ struct drm_i915_gem_object *ctx;
int ret;
ctx = i915_gem_alloc_object(dev, 4096);
@@ -5606,7 +5908,7 @@ intel_alloc_context_page(struct drm_device *dev)
}
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(ctx, 4096);
+ ret = i915_gem_object_pin(ctx, 4096, true);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -5624,7 +5926,7 @@ intel_alloc_context_page(struct drm_device *dev)
err_unpin:
i915_gem_object_unpin(ctx);
err_unref:
- drm_gem_object_unreference(ctx);
+ drm_gem_object_unreference(&ctx->base);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
@@ -5736,6 +6038,25 @@ void ironlake_disable_drps(struct drm_device *dev)
}
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 swreq;
+
+ swreq = (val & 0x3ff) << 25;
+ I915_WRITE(GEN6_RPNSWREQ, swreq);
+}
+
+void gen6_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0);
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
@@ -5822,7 +6143,96 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
-void intel_init_clock_gating(struct drm_device *dev)
+void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+ __gen6_force_wake_get(dev_priv);
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN6_RC_CTL_RC6p_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ 18 << 24 |
+ 6 << 16);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RP_UP_EI, 100000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 300000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_USE_NORMAL_FREQ |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_MAX |
+ GEN6_RP_DOWN_BUSY_MIN);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE(GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER,
+ GEN6_PM_MBOX_EVENT |
+ GEN6_PM_THERMAL_EVENT |
+ GEN6_PM_RP_DOWN_TIMEOUT |
+ GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_EI_EXPIRED);
+ I915_WRITE(GEN6_PMIMR, 0);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ __gen6_force_wake_put(dev_priv);
+}
+
+void intel_enable_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5872,9 +6282,9 @@ void intel_init_clock_gating(struct drm_device *dev)
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
}
/*
* Based on the document from hardware guys the following bits
@@ -5896,7 +6306,49 @@ void intel_init_clock_gating(struct drm_device *dev)
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
- return;
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ if (IS_GEN5(dev)) {
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+ }
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE |
+ ILK_DPFD_CLK_GATE);
+
+ I915_WRITE(DSPACNTR,
+ I915_READ(DSPACNTR) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ I915_WRITE(DSPBCNTR,
+ I915_READ(DSPBCNTR) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ }
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5939,20 +6391,18 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */
if (dev_priv->renderctx == NULL)
dev_priv->renderctx = intel_alloc_context_page(dev);
if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
- obj_priv = to_intel_bo(dev_priv->renderctx);
- if (obj_priv) {
- BEGIN_LP_RING(4);
+ struct drm_i915_gem_object *obj = dev_priv->renderctx;
+ if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_SET_CONTEXT);
- OUT_RING(obj_priv->gtt_offset |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
+ OUT_RING(obj->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
@@ -5962,29 +6412,45 @@ void intel_init_clock_gating(struct drm_device *dev)
"Disable RC6\n");
}
- if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_i915_gem_object *obj_priv = NULL;
-
+ if (IS_GEN4(dev) && IS_MOBILE(dev)) {
+ if (dev_priv->pwrctx == NULL)
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
if (dev_priv->pwrctx) {
- obj_priv = to_intel_bo(dev_priv->pwrctx);
- } else {
- struct drm_gem_object *pwrctx;
-
- pwrctx = intel_alloc_context_page(dev);
- if (pwrctx) {
- dev_priv->pwrctx = pwrctx;
- obj_priv = to_intel_bo(pwrctx);
- }
- }
-
- if (obj_priv) {
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+ struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+ I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
I915_WRITE(MCHBAR_RENDER_STANDBY,
I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
}
}
}
+void intel_disable_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx) {
+ struct drm_i915_gem_object *obj = dev_priv->renderctx;
+
+ I915_WRITE(CCID, 0);
+ POSTING_READ(CCID);
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ dev_priv->renderctx = NULL;
+ }
+
+ if (dev_priv->pwrctx) {
+ struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ dev_priv->pwrctx = NULL;
+ }
+}
+
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
@@ -5997,7 +6463,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.dpms = i9xx_crtc_dpms;
if (I915_HAS_FBC(dev)) {
- if (IS_IRONLAKE_M(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
@@ -6046,6 +6512,14 @@ static void intel_init_display(struct drm_device *dev)
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
+ } else if (IS_GEN6(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
} else
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
@@ -6211,7 +6685,7 @@ void intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev);
- intel_init_clock_gating(dev);
+ intel_enable_clock_gating(dev);
/* Just disable it once at startup */
i915_disable_vga(dev);
@@ -6221,6 +6695,9 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_emon(dev);
}
+ if (IS_GEN6(dev))
+ gen6_enable_rps(dev_priv);
+
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
@@ -6252,28 +6729,12 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(dev_priv->renderctx);
- I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
- I915_READ(CCID);
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(dev_priv->renderctx);
- }
-
- if (dev_priv->pwrctx) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(dev_priv->pwrctx);
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
- I915_READ(PWRCTXA);
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(dev_priv->pwrctx);
- }
-
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
+ if (IS_GEN6(dev))
+ gen6_disable_rps(dev);
+
+ intel_disable_clock_gating(dev);
mutex_unlock(&dev->struct_mutex);
@@ -6325,3 +6786,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
return 0;
}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+struct intel_display_error_state {
+ struct intel_cursor_error_state {
+ u32 control;
+ u32 position;
+ u32 base;
+ u32 size;
+ } cursor[2];
+
+ struct intel_pipe_error_state {
+ u32 conf;
+ u32 source;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } pipe[2];
+
+ struct intel_plane_error_state {
+ u32 control;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 addr;
+ u32 surface;
+ u32 tile_offset;
+ } plane[2];
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_display_error_state *error;
+ int i;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ for (i = 0; i < 2; i++) {
+ error->cursor[i].control = I915_READ(CURCNTR(i));
+ error->cursor[i].position = I915_READ(CURPOS(i));
+ error->cursor[i].base = I915_READ(CURBASE(i));
+
+ error->plane[i].control = I915_READ(DSPCNTR(i));
+ error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+ error->plane[i].size = I915_READ(DSPSIZE(i));
+ error->plane[i].pos= I915_READ(DSPPOS(i));
+ error->plane[i].addr = I915_READ(DSPADDR(i));
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->plane[i].surface = I915_READ(DSPSURF(i));
+ error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+ }
+
+ error->pipe[i].conf = I915_READ(PIPECONF(i));
+ error->pipe[i].source = I915_READ(PIPESRC(i));
+ error->pipe[i].htotal = I915_READ(HTOTAL(i));
+ error->pipe[i].hblank = I915_READ(HBLANK(i));
+ error->pipe[i].hsync = I915_READ(HSYNC(i));
+ error->pipe[i].vtotal = I915_READ(VTOTAL(i));
+ error->pipe[i].vblank = I915_READ(VBLANK(i));
+ error->pipe[i].vsync = I915_READ(VSYNC(i));
+ }
+
+ return error;
+}
+
+void
+intel_display_print_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ seq_printf(m, "Pipe [%d]:\n", i);
+ seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
+ seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
+ seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
+ seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
+ seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
+ seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
+ seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
+ seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
+
+ seq_printf(m, "Plane [%d]:\n", i);
+ seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
+ seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
+ seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
+ seq_printf(m, " POS: %08x\n", error->plane[i].pos);
+ seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
+ seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
+ }
+
+ seq_printf(m, "Cursor [%d]:\n", i);
+ seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
+ seq_printf(m, " POS: %08x\n", error->cursor[i].position);
+ seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 864417cffe9..1dc60408d5b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1442,8 +1442,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
/* Changes to enable or select take place the vblank
* after being written.
*/
- intel_wait_for_vblank(intel_dp->base.base.dev,
- intel_crtc->pipe);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
}
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e52c6125bb1..d782ad9fd6d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
struct intel_framebuffer {
struct drm_framebuffer base;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
};
struct intel_fbdev {
@@ -166,7 +166,7 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work;
int fdi_lanes;
- struct drm_gem_object *cursor_bo;
+ struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
@@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
- struct drm_gem_object *old_fb_obj;
- struct drm_gem_object *pending_flip_obj;
+ struct drm_i915_gem_object *old_fb_obj;
+ struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
int pending;
bool enable_stall_check;
@@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev,
+ struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
@@ -293,19 +294,22 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void intel_disable_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_gem_object *obj,
- bool pipelined);
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj);
+ struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index af2a1dddc28..701e830d001 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,10 +65,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd mode_cmd;
- struct drm_gem_object *fbo = NULL;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
- int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ int size, ret;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
@@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- fbo = i915_gem_alloc_object(dev, size);
- if (!fbo) {
+ obj = i915_gem_alloc_object(dev, size);
+ if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
goto out;
}
- obj_priv = to_intel_bo(fbo);
mutex_lock(&dev->struct_mutex);
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, false);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
@@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->par = ifbdev;
- ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+ ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
if (ret)
goto out_unpin;
@@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
else
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
- info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
+ info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
- info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
- size);
+ info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
if (!info->screen_base) {
ret = -ENOSPC;
goto out_unpin;
@@ -153,13 +150,8 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
// memset(info->screen_base, 0, size);
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
- /* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
- info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar);
-
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
@@ -168,7 +160,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
fb->width, fb->height,
- obj_priv->gtt_offset, fbo);
+ obj->gtt_offset, obj);
mutex_unlock(&dev->struct_mutex);
@@ -176,9 +168,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
return 0;
out_unpin:
- i915_gem_object_unpin(fbo);
+ i915_gem_object_unpin(obj);
out_unref:
- drm_gem_object_unreference(fbo);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
out:
return ret;
@@ -225,7 +217,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) {
- drm_gem_object_unreference_unlocked(ifb->obj);
+ drm_gem_object_unreference_unlocked(&ifb->obj->base);
ifb->obj = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3dba086e7ee..58040f68ed7 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -85,8 +85,9 @@ static u32 get_reserved(struct intel_gpio *gpio)
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ reserved = I915_READ_NOTRACE(gpio->reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
}
@@ -96,9 +97,9 @@ static int get_clock(void *data)
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
- I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
- I915_WRITE(gpio->reg, reserved);
- return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+ I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE_NOTRACE(gpio->reg, reserved);
+ return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
@@ -106,9 +107,9 @@ static int get_data(void *data)
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
- I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
- I915_WRITE(gpio->reg, reserved);
- return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+ I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE_NOTRACE(gpio->reg, reserved);
+ return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
@@ -124,7 +125,7 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE(gpio->reg, reserved | clock_bits);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
POSTING_READ(gpio->reg);
}
@@ -141,7 +142,7 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE(gpio->reg, reserved | data_bits);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
POSTING_READ(gpio->reg);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 25bcedf386f..aa2307080be 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -304,14 +304,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
- pfit_control |= PFIT_ENABLE;
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
- pfit_control |= PFIT_SCALING_PILLAR;
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
else if (scaled_width < scaled_height)
- pfit_control |= PFIT_SCALING_LETTER;
- else
- pfit_control |= PFIT_SCALING_AUTO;
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
+ else if (adjusted_mode->hdisplay != mode->hdisplay)
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
} else {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
@@ -358,13 +357,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
- pfit_control |= PFIT_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4)
- pfit_control |= PFIT_SCALING_AUTO;
- else
- pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
+ if (mode->vdisplay != adjusted_mode->vdisplay ||
+ mode->hdisplay != adjusted_mode->hdisplay) {
+ pfit_control |= PFIT_ENABLE;
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_AUTO_SCALE |
+ HORIZ_INTERP_BILINEAR);
+ }
break;
default:
@@ -914,6 +917,8 @@ bool intel_lvds_init(struct drm_device *dev)
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
+ if (INTEL_INFO(dev)->gen >= 5)
+ intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1019,10 +1024,18 @@ bool intel_lvds_init(struct drm_device *dev)
out:
if (HAS_PCH_SPLIT(dev)) {
u32 pwm;
- /* make sure PWM is enabled */
+
+ pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
+
+ /* make sure PWM is enabled and locked to the LVDS pipe */
pwm = I915_READ(BLC_PWM_CPU_CTL2);
- pwm |= (PWM_ENABLE | PWM_PIPE_B);
- I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
+ if (pipe == 0 && (pwm & PWM_PIPE_B))
+ I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
+ if (pipe)
+ pwm |= PWM_PIPE_B;
+ else
+ pwm &= ~PWM_PIPE_B;
+ I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
pwm = I915_READ(BLC_PWM_PCH_CTL1);
pwm |= PWM_PCH_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 9b0d9a867ae..f295a7aaadf 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev)
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
- if (IS_MOBILE(dev)) {
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ if (IS_MOBILE(dev))
intel_enable_asle(dev);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock,
- irqflags);
- }
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 02ff0a481f4..3fbb98b948d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -221,15 +221,16 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
- overlay->last_flip_req =
- i915_add_request(dev, NULL, request, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
+ ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+ overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
ret = i915_do_wait_request(dev,
overlay->last_flip_req, true,
- &dev_priv->render_ring);
+ LP_RING(dev_priv));
if (ret)
return ret;
@@ -289,6 +290,7 @@ i830_deactivate_pipe_a(struct drm_device *dev)
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *request;
int pipe_a_quirk = 0;
int ret;
@@ -308,7 +310,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
goto out;
}
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret) {
+ kfree(request);
+ goto out;
+ }
+
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
OUT_RING(overlay->flip_addr | OFC_UPDATE);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -332,6 +339,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
+ int ret;
BUG_ON(!overlay->active);
@@ -347,36 +355,44 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
ADVANCE_LP_RING();
- overlay->last_flip_req =
- i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+ ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ overlay->last_flip_req = request->seqno;
return 0;
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
- struct drm_gem_object *obj = &overlay->old_vid_bo->base;
+ struct drm_i915_gem_object *obj = overlay->old_vid_bo;
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
overlay->old_vid_bo = NULL;
}
static void intel_overlay_off_tail(struct intel_overlay *overlay)
{
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj = overlay->vid_bo;
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
- obj = &overlay->vid_bo->base;
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
overlay->vid_bo = NULL;
overlay->crtc->overlay = NULL;
@@ -389,8 +405,10 @@ static int intel_overlay_off(struct intel_overlay *overlay,
bool interruptible)
{
struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 flip_addr = overlay->flip_addr;
struct drm_i915_gem_request *request;
+ int ret;
BUG_ON(!overlay->active);
@@ -404,7 +422,11 @@ static int intel_overlay_off(struct intel_overlay *overlay,
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- BEGIN_LP_RING(6);
+ ret = BEGIN_LP_RING(6);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
/* wait for overlay to go idle */
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
@@ -432,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
return 0;
ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, &dev_priv->render_ring);
+ interruptible, LP_RING(dev_priv));
if (ret)
return ret;
@@ -467,7 +489,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (request == NULL)
return -ENOMEM;
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
@@ -736,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
}
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
- struct drm_gem_object *new_bo,
+ struct drm_i915_gem_object *new_bo,
struct put_image_params *params)
{
int ret, tmp_width;
struct overlay_registers *regs;
bool scale_changed = false;
- struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
struct drm_device *dev = overlay->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -753,7 +779,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+ ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
if (ret != 0)
return ret;
@@ -761,6 +787,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
goto out_unpin;
+ ret = i915_gem_object_put_fence(new_bo);
+ if (ret)
+ goto out_unpin;
+
if (!overlay->active) {
regs = intel_overlay_map_regs(overlay);
if (!regs) {
@@ -797,7 +827,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+ regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
regs->OSTRIDE = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -811,8 +841,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
- regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
- regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+ regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
+ regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
regs->OSTRIDE |= params->stride_UV << 16;
}
@@ -829,7 +859,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
overlay->old_vid_bo = overlay->vid_bo;
- overlay->vid_bo = to_intel_bo(new_bo);
+ overlay->vid_bo = new_bo;
return 0;
@@ -942,7 +972,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
static int check_overlay_src(struct drm_device *dev,
struct drm_intel_overlay_put_image *rec,
- struct drm_gem_object *new_bo)
+ struct drm_i915_gem_object *new_bo)
{
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
@@ -1027,7 +1057,7 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
+ if (rec->offset_Y + tmp > new_bo->base.size)
return -EINVAL;
break;
@@ -1038,12 +1068,12 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
tmp = rec->stride_Y * rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
+ if (rec->offset_Y + tmp > new_bo->base.size)
return -EINVAL;
tmp = rec->stride_UV * (rec->src_height / uv_vscale);
- if (rec->offset_U + tmp > new_bo->size ||
- rec->offset_V + tmp > new_bo->size)
+ if (rec->offset_U + tmp > new_bo->base.size ||
+ rec->offset_V + tmp > new_bo->base.size)
return -EINVAL;
break;
}
@@ -1086,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct intel_overlay *overlay;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
- struct drm_gem_object *new_bo;
+ struct drm_i915_gem_object *new_bo;
struct put_image_params *params;
int ret;
@@ -1125,8 +1155,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
}
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
- new_bo = drm_gem_object_lookup(dev, file_priv,
- put_image_rec->bo_handle);
+ new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+ put_image_rec->bo_handle));
if (!new_bo) {
ret = -ENOENT;
goto out_free;
@@ -1135,6 +1165,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
+ if (new_bo->tiling_mode) {
+ DRM_ERROR("buffer used for overlay image can not be tiled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = intel_overlay_recover_from_interrupt(overlay, true);
if (ret != 0)
goto out_unlock;
@@ -1217,7 +1253,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
- drm_gem_object_unreference_unlocked(new_bo);
+ drm_gem_object_unreference_unlocked(&new_bo->base);
out_free:
kfree(params);
@@ -1370,7 +1406,7 @@ void intel_setup_overlay(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct drm_gem_object *reg_bo;
+ struct drm_i915_gem_object *reg_bo;
struct overlay_registers *regs;
int ret;
@@ -1385,7 +1421,7 @@ void intel_setup_overlay(struct drm_device *dev)
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (!reg_bo)
goto out_free;
- overlay->reg_bo = to_intel_bo(reg_bo);
+ overlay->reg_bo = reg_bo;
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo,
@@ -1395,14 +1431,14 @@ void intel_setup_overlay(struct drm_device *dev)
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
}
- overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+ overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
}
- overlay->flip_addr = overlay->reg_bo->gtt_offset;
+ overlay->flip_addr = reg_bo->gtt_offset;
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1434,7 +1470,7 @@ void intel_setup_overlay(struct drm_device *dev)
out_unpin_bo:
i915_gem_object_unpin(reg_bo);
out_free_bo:
- drm_gem_object_unreference(reg_bo);
+ drm_gem_object_unreference(&reg_bo->base);
out_free:
kfree(overlay);
return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 92ff8f38527..7350ec2515c 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -125,15 +125,55 @@ static int is_backlight_combination_mode(struct drm_device *dev)
return 0;
}
+static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ /* Restore the CTL value if it lost, e.g. GPU reset */
+
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ val = I915_READ(BLC_PWM_PCH_CTL2);
+ if (dev_priv->saveBLC_PWM_CTL2 == 0) {
+ dev_priv->saveBLC_PWM_CTL2 = val;
+ } else if (val == 0) {
+ I915_WRITE(BLC_PWM_PCH_CTL2,
+ dev_priv->saveBLC_PWM_CTL);
+ val = dev_priv->saveBLC_PWM_CTL;
+ }
+ } else {
+ val = I915_READ(BLC_PWM_CTL);
+ if (dev_priv->saveBLC_PWM_CTL == 0) {
+ dev_priv->saveBLC_PWM_CTL = val;
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ } else if (val == 0) {
+ I915_WRITE(BLC_PWM_CTL,
+ dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_CTL2,
+ dev_priv->saveBLC_PWM_CTL2);
+ val = dev_priv->saveBLC_PWM_CTL;
+ }
+ }
+
+ return val;
+}
+
u32 intel_panel_get_max_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
+ max = i915_read_blc_pwm_ctl(dev_priv);
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+ printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
+ return 1;
+ }
+
if (HAS_PCH_SPLIT(dev)) {
- max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
+ max >>= 16;
} else {
- max = I915_READ(BLC_PWM_CTL);
if (IS_PINEVIEW(dev)) {
max >>= 17;
} else {
@@ -146,14 +186,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
max *= 0xff;
}
- if (max == 0) {
- /* XXX add code here to query mode clock or hardware clock
- * and program max PWM appropriately.
- */
- DRM_ERROR("fixme: max PWM is zero.\n");
- max = 1;
- }
-
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
return max;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 31cd7e33e82..56bc95c056d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,11 +49,11 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
}
static void
-render_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
@@ -109,49 +109,50 @@ render_ring_flush(struct drm_device *dev,
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
cmd |= MI_EXE_FLUSH;
+ if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+ (IS_G4X(dev) || IS_GEN5(dev)))
+ cmd |= MI_INVALIDATE_ISP;
+
#if WATCH_EXEC
DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
#endif
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, cmd);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
+ if (intel_ring_begin(ring, 2) == 0) {
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
}
}
-static void ring_write_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
I915_WRITE_TAIL(ring, value);
}
-u32 intel_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
RING_ACTHD(ring->mmio_base) : ACTHD;
return I915_READ(acthd_reg);
}
-static int init_ring_common(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int init_ring_common(struct intel_ring_buffer *ring)
{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
u32 head;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- obj_priv = to_intel_bo(ring->gem_object);
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
- ring->write_tail(dev, ring, 0);
+ ring->write_tail(ring, 0);
/* Initialize the ring. */
- I915_WRITE_START(ring, obj_priv->gtt_offset);
+ I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
@@ -178,12 +179,13 @@ static int init_ring_common(struct drm_device *dev,
}
I915_WRITE_CTL(ring,
- ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_REPORT_64K | RING_VALID);
- head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* If the head is still not zero, the ring is dead */
- if (head != 0) {
+ if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+ I915_READ_START(ring) != obj->gtt_offset ||
+ (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -194,8 +196,8 @@ static int init_ring_common(struct drm_device *dev,
return -EIO;
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
+ if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+ i915_kernel_lost_context(ring->dev);
else {
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
@@ -203,335 +205,500 @@ static int init_ring_common(struct drm_device *dev,
if (ring->space < 0)
ring->space += ring->size;
}
+
return 0;
}
-static int init_render_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+ struct drm_i915_gem_object *obj;
+ volatile u32 *cpu_page;
+ u32 gtt_offset;
+};
+
+static int
+init_pipe_control(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret = init_ring_common(dev, ring);
- int mode;
+ struct pipe_control *pc;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (ring->private)
+ return 0;
+
+ pc = kmalloc(sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ obj->agp_type = AGP_USER_CACHED_MEMORY;
+
+ ret = i915_gem_object_pin(obj, 4096, true);
+ if (ret)
+ goto err_unref;
+
+ pc->gtt_offset = obj->gtt_offset;
+ pc->cpu_page = kmap(obj->pages[0]);
+ if (pc->cpu_page == NULL)
+ goto err_unpin;
+
+ pc->obj = obj;
+ ring->private = pc;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ kfree(pc);
+ return ret;
+}
+
+static void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ struct drm_i915_gem_object *obj;
+
+ if (!ring->private)
+ return;
+
+ obj = pc->obj;
+ kunmap(obj->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ kfree(pc);
+ ring->private = NULL;
+}
+
+static int init_render_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = init_ring_common(ring);
if (INTEL_INFO(dev)->gen > 3) {
- mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+ int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
if (IS_GEN6(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
}
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (IS_GEN5(dev)) {
+ ret = init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
-#define PIPE_CONTROL_FLUSH(addr) \
+static void render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ if (!ring->private)
+ return;
+
+ cleanup_pipe_control(ring);
+}
+
+static void
+update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int id;
+
+ /*
+ * cs -> 1 = vcs, 0 = bcs
+ * vcs -> 1 = bcs, 0 = cs,
+ * bcs -> 1 = cs, 0 = vcs.
+ */
+ id = ring - dev_priv->ring;
+ id += 2 - i;
+ id %= 3;
+
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_REGISTER |
+ MI_SEMAPHORE_UPDATE);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring,
+ RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
+}
+
+static int
+gen6_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+{
+ u32 seqno;
+ int ret;
+
+ ret = intel_ring_begin(ring, 10);
+ if (ret)
+ return ret;
+
+ seqno = i915_gem_get_seqno(ring->dev);
+ update_semaphore(ring, 0, seqno);
+ update_semaphore(ring, 1, seqno);
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
+}
+
+int
+intel_ring_sync(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_REGISTER |
+ intel_ring_sync_index(ring, to) << 17 |
+ MI_SEMAPHORE_COMPARE);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
PIPE_CONTROL_DEPTH_STALL | 2); \
- OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
- OUT_RING(0); \
- OUT_RING(0); \
+ intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
+ intel_ring_emit(ring__, 0); \
+ intel_ring_emit(ring__, 0); \
} while (0)
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
-static u32
-render_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
+static int
+pc_render_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno;
+ struct drm_device *dev = ring->dev;
+ u32 seqno = i915_gem_get_seqno(dev);
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
- seqno = i915_gem_get_seqno(dev);
-
- if (IS_GEN6(dev)) {
- BEGIN_LP_RING(6);
- OUT_RING(GFX_OP_PIPE_CONTROL | 3);
- OUT_RING(PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
- PIPE_CONTROL_NOTIFY);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else if (HAS_PIPE_CONTROL(dev)) {
- u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+ /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+ * incoherent with writes to memory, i.e. completely fubar,
+ * so we need to use PIPE_NOTIFY instead.
+ *
+ * However, we also need to workaround the qword write
+ * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+ * memory before requesting an interrupt.
+ */
+ ret = intel_ring_begin(ring, 32);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
+}
- /*
- * Workaround qword write incoherence by flushing the
- * PIPE_NOTIFY buffers out to memory before requesting
- * an interrupt.
- */
- BEGIN_LP_RING(32);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128; /* write to separate cachelines */
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
- PIPE_CONTROL_NOTIFY);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
+static int
+render_ring_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+{
+ struct drm_device *dev = ring->dev;
+ u32 seqno = i915_gem_get_seqno(dev);
+ int ret;
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
- return seqno;
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
}
static u32
-render_ring_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- if (HAS_PIPE_CONTROL(dev))
- return ((volatile u32 *)(dev_priv->seqno_page))[0];
- else
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
-static void
-render_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static u32
+pc_render_get_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ struct pipe_control *pc = ring->private;
+ return pc->cpu_page[0];
+}
+
+static bool
+render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ if (atomic_inc_return(&ring->irq_refcount) == 1) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ ironlake_enable_graphics_irq(dev_priv,
+ GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+ return true;
}
static void
-render_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+render_ring_put_irq(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ struct drm_device *dev = ring->dev;
+
+ if (atomic_dec_and_test(&ring->irq_refcount)) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
- if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ ironlake_disable_graphics_irq(dev_priv,
+ GT_USER_INTERRUPT |
+ GT_PIPE_NOTIFY);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
-void intel_ring_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- if (IS_GEN6(dev)) {
- I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
- ring->status_page.gfx_addr);
- I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
- } else {
- I915_WRITE(RING_HWS_PGA(ring->mmio_base),
- ring->status_page.gfx_addr);
- I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
- }
-
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 mmio = IS_GEN6(ring->dev) ?
+ RING_HWS_PGA_GEN6(ring->mmio_base) :
+ RING_HWS_PGA(ring->mmio_base);
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
}
static void
-bsd_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+bsd_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
{
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_FLUSH);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
-}
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ return;
-static int init_bsd_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- return init_ring_common(dev, ring);
+ if (intel_ring_begin(ring, 2) == 0) {
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
}
-static u32
-ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
+static int
+ring_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
{
u32 seqno;
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- seqno = i915_gem_get_seqno(dev);
+ seqno = i915_gem_get_seqno(ring->dev);
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(dev, ring,
- I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(dev, ring, seqno);
- intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
- intel_ring_advance(dev, ring);
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
-
- return seqno;
+ *result = seqno;
+ return 0;
}
-static void
-bsd_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static bool
+ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
{
- /* do nothing */
+ struct drm_device *dev = ring->dev;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ if (atomic_inc_return(&ring->irq_refcount) == 1) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_enable_graphics_irq(dev_priv, flag);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+ return true;
}
+
static void
-bsd_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
{
- /* do nothing */
+ struct drm_device *dev = ring->dev;
+
+ if (atomic_dec_and_test(&ring->irq_refcount)) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_disable_graphics_irq(dev_priv, flag);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
}
-static u32
-ring_status_page_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static bool
+bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
+}
+static void
+bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
}
static int
-ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{
- uint32_t exec_start;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
- (2 << 6) | MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(dev, ring, exec_start);
- intel_ring_advance(dev, ring);
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
return 0;
}
static int
-render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int nbox = exec->num_cliprects;
- int i = 0, count;
- uint32_t exec_start, exec_len;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
+ int ret;
trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
- count = nbox ? nbox : 1;
+ if (IS_I830(dev) || IS_845G(dev)) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- exec->DR1, exec->DR4);
- if (ret)
- return ret;
- }
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, 0);
+ } else {
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- if (IS_I830(dev) || IS_845G(dev)) {
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
- intel_ring_emit(dev, ring,
- exec_start | MI_BATCH_NON_SECURE);
- intel_ring_emit(dev, ring, exec_start + exec_len - 4);
- intel_ring_emit(dev, ring, 0);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
} else {
- intel_ring_begin(dev, ring, 2);
- if (INTEL_INFO(dev)->gen >= 4) {
- intel_ring_emit(dev, ring,
- MI_BATCH_BUFFER_START | (2 << 6)
- | MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(dev, ring, exec_start);
- } else {
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
- | (2 << 6));
- intel_ring_emit(dev, ring, exec_start |
- MI_BATCH_NON_SECURE);
- }
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6));
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
}
- intel_ring_advance(dev, ring);
}
-
- if (IS_G4X(dev) || IS_GEN5(dev)) {
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_FLUSH |
- MI_NO_WRITE_FLUSH |
- MI_INVALIDATE_ISP );
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
- }
- /* XXX breadcrumb */
+ intel_ring_advance(ring);
return 0;
}
-static void cleanup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj;
obj = ring->status_page.obj;
if (obj == NULL)
return;
- obj_priv = to_intel_bo(obj);
- kunmap(obj_priv->pages[0]);
+ kunmap(obj->pages[0]);
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}
-static int init_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_ring_buffer *ring)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
obj = i915_gem_alloc_object(dev, 4096);
@@ -540,16 +707,15 @@ static int init_status_page(struct drm_device *dev,
ret = -ENOMEM;
goto err;
}
- obj_priv = to_intel_bo(obj);
- obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+ obj->agp_type = AGP_USER_CACHED_MEMORY;
- ret = i915_gem_object_pin(obj, 4096);
+ ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) {
goto err_unref;
}
- ring->status_page.gfx_addr = obj_priv->gtt_offset;
- ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+ ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
goto err_unpin;
@@ -557,7 +723,7 @@ static int init_status_page(struct drm_device *dev,
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
- intel_ring_setup_status_page(dev, ring);
+ intel_ring_setup_status_page(ring);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
ring->name, ring->status_page.gfx_addr);
@@ -566,7 +732,7 @@ static int init_status_page(struct drm_device *dev,
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
err:
return ret;
}
@@ -574,9 +740,7 @@ err:
int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret;
ring->dev = dev;
@@ -585,7 +749,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->gpu_write_list);
if (I915_NEED_GFX_HWS(dev)) {
- ret = init_status_page(dev, ring);
+ ret = init_status_page(ring);
if (ret)
return ret;
}
@@ -597,15 +761,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
goto err_hws;
}
- ring->gem_object = obj;
+ ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret)
goto err_unref;
- obj_priv = to_intel_bo(obj);
ring->map.size = ring->size;
- ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+ ring->map.offset = dev->agp->base + obj->gtt_offset;
ring->map.type = 0;
ring->map.flags = 0;
ring->map.mtrr = 0;
@@ -618,60 +781,57 @@ int intel_init_ring_buffer(struct drm_device *dev,
}
ring->virtual_start = ring->map.handle;
- ret = ring->init(dev, ring);
+ ret = ring->init(ring);
if (ret)
goto err_unmap;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
- ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
- }
- return ret;
+ return 0;
err_unmap:
drm_core_ioremapfree(&ring->map, dev);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
- drm_gem_object_unreference(obj);
- ring->gem_object = NULL;
+ drm_gem_object_unreference(&obj->base);
+ ring->obj = NULL;
err_hws:
- cleanup_status_page(dev, ring);
+ cleanup_status_page(ring);
return ret;
}
-void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
{
- if (ring->gem_object == NULL)
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ if (ring->obj == NULL)
return;
- drm_core_ioremapfree(&ring->map, dev);
+ /* Disable the ring buffer. The ring must be idle at this point */
+ dev_priv = ring->dev->dev_private;
+ ret = intel_wait_ring_buffer(ring, ring->size - 8);
+ I915_WRITE_CTL(ring, 0);
- i915_gem_object_unpin(ring->gem_object);
- drm_gem_object_unreference(ring->gem_object);
- ring->gem_object = NULL;
+ drm_core_ioremapfree(&ring->map, ring->dev);
+
+ i915_gem_object_unpin(ring->obj);
+ drm_gem_object_unreference(&ring->obj->base);
+ ring->obj = NULL;
if (ring->cleanup)
ring->cleanup(ring);
- cleanup_status_page(dev, ring);
+ cleanup_status_page(ring);
}
-static int intel_wrap_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
unsigned int *virt;
int rem;
rem = ring->size - ring->tail;
if (ring->space < rem) {
- int ret = intel_wait_ring_buffer(dev, ring, rem);
+ int ret = intel_wait_ring_buffer(ring, rem);
if (ret)
return ret;
}
@@ -689,11 +849,11 @@ static int intel_wrap_ring_buffer(struct drm_device *dev,
return 0;
}
-int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n)
+int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
- drm_i915_private_t *dev_priv = dev->dev_private;
u32 head;
trace_i915_ring_wait_begin (dev);
@@ -711,7 +871,7 @@ int intel_wait_ring_buffer(struct drm_device *dev,
if (ring->space < 0)
ring->space += ring->size;
if (ring->space >= n) {
- trace_i915_ring_wait_end (dev);
+ trace_i915_ring_wait_end(dev);
return 0;
}
@@ -722,29 +882,39 @@ int intel_wait_ring_buffer(struct drm_device *dev,
}
msleep(1);
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EAGAIN;
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end (dev);
return -EBUSY;
}
-void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- int num_dwords)
+int intel_ring_begin(struct intel_ring_buffer *ring,
+ int num_dwords)
{
int n = 4*num_dwords;
- if (unlikely(ring->tail + n > ring->size))
- intel_wrap_ring_buffer(dev, ring);
- if (unlikely(ring->space < n))
- intel_wait_ring_buffer(dev, ring, n);
+ int ret;
+
+ if (unlikely(ring->tail + n > ring->size)) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ring->space < n)) {
+ ret = intel_wait_ring_buffer(ring, n);
+ if (unlikely(ret))
+ return ret;
+ }
ring->space -= n;
+ return 0;
}
-void intel_ring_advance(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_ring_advance(struct intel_ring_buffer *ring)
{
ring->tail &= ring->size - 1;
- ring->write_tail(dev, ring, ring->tail);
+ ring->write_tail(ring, ring->tail);
}
static const struct intel_ring_buffer render_ring = {
@@ -756,10 +926,11 @@ static const struct intel_ring_buffer render_ring = {
.write_tail = ring_write_tail,
.flush = render_ring_flush,
.add_request = render_ring_add_request,
- .get_seqno = render_ring_get_seqno,
- .user_irq_get = render_ring_get_user_irq,
- .user_irq_put = render_ring_put_user_irq,
- .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+ .get_seqno = ring_get_seqno,
+ .irq_get = render_ring_get_irq,
+ .irq_put = render_ring_put_irq,
+ .dispatch_execbuffer = render_ring_dispatch_execbuffer,
+ .cleanup = render_ring_cleanup,
};
/* ring buffer for bit-stream decoder */
@@ -769,22 +940,21 @@ static const struct intel_ring_buffer bsd_ring = {
.id = RING_BSD,
.mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
- .init = init_bsd_ring,
+ .init = init_ring_common,
.write_tail = ring_write_tail,
.flush = bsd_ring_flush,
.add_request = ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = bsd_ring_get_user_irq,
- .user_irq_put = bsd_ring_put_user_irq,
- .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
+ .get_seqno = ring_get_seqno,
+ .irq_get = bsd_ring_get_irq,
+ .irq_put = bsd_ring_put_irq,
+ .dispatch_execbuffer = ring_dispatch_execbuffer,
};
-static void gen6_bsd_ring_write_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
/* Every tail move must follow the sequence below */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
@@ -803,69 +973,80 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev,
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
}
-static void gen6_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void gen6_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_FLUSH_DW);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_advance(dev, ring);
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ return;
+
+ if (intel_ring_begin(ring, 4) == 0) {
+ intel_ring_emit(ring, MI_FLUSH_DW);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+ }
}
static int
-gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
- uint32_t exec_start;
+ int ret;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring,
- MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(dev, ring, exec_start);
- intel_ring_advance(dev, ring);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
return 0;
}
+static bool
+gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+}
+
+static void
+gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+}
+
/* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = {
- .name = "gen6 bsd ring",
- .id = RING_BSD,
- .mmio_base = GEN6_BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_bsd_ring,
- .write_tail = gen6_bsd_ring_write_tail,
- .flush = gen6_ring_flush,
- .add_request = ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = bsd_ring_get_user_irq,
- .user_irq_put = bsd_ring_put_user_irq,
- .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+ .name = "gen6 bsd ring",
+ .id = RING_BSD,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_ring_common,
+ .write_tail = gen6_bsd_ring_write_tail,
+ .flush = gen6_ring_flush,
+ .add_request = gen6_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = gen6_bsd_ring_get_irq,
+ .irq_put = gen6_bsd_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
};
/* Blitter support (SandyBridge+) */
-static void
-blt_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static bool
+blt_ring_get_irq(struct intel_ring_buffer *ring)
{
- /* do nothing */
+ return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
}
+
static void
-blt_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+blt_ring_put_irq(struct intel_ring_buffer *ring)
{
- /* do nothing */
+ ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
}
@@ -883,32 +1064,31 @@ to_blt_workaround(struct intel_ring_buffer *ring)
return ring->private;
}
-static int blt_ring_init(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int blt_ring_init(struct intel_ring_buffer *ring)
{
- if (NEED_BLT_WORKAROUND(dev)) {
+ if (NEED_BLT_WORKAROUND(ring->dev)) {
struct drm_i915_gem_object *obj;
- u32 __iomem *ptr;
+ u32 *ptr;
int ret;
- obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+ obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL)
return -ENOMEM;
- ret = i915_gem_object_pin(&obj->base, 4096);
+ ret = i915_gem_object_pin(obj, 4096, true);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
}
ptr = kmap(obj->pages[0]);
- iowrite32(MI_BATCH_BUFFER_END, ptr);
- iowrite32(MI_NOOP, ptr+1);
+ *ptr++ = MI_BATCH_BUFFER_END;
+ *ptr++ = MI_NOOP;
kunmap(obj->pages[0]);
- ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) {
- i915_gem_object_unpin(&obj->base);
+ i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
return ret;
}
@@ -916,51 +1096,39 @@ static int blt_ring_init(struct drm_device *dev,
ring->private = obj;
}
- return init_ring_common(dev, ring);
+ return init_ring_common(ring);
}
-static void blt_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static int blt_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
if (ring->private) {
- intel_ring_begin(dev, ring, num_dwords+2);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
- intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+ int ret = intel_ring_begin(ring, num_dwords+2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START);
+ intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
+
+ return 0;
} else
- intel_ring_begin(dev, ring, 4);
+ return intel_ring_begin(ring, 4);
}
-static void blt_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
- blt_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_FLUSH_DW);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_advance(dev, ring);
-}
-
-static u32
-blt_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
-{
- u32 seqno = i915_gem_get_seqno(dev);
-
- blt_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(dev, ring,
- I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(dev, ring, seqno);
- intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
- intel_ring_advance(dev, ring);
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ return;
- DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
- return seqno;
+ if (blt_ring_begin(ring, 4) == 0) {
+ intel_ring_emit(ring, MI_FLUSH_DW);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+ }
}
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -981,47 +1149,54 @@ static const struct intel_ring_buffer gen6_blt_ring = {
.init = blt_ring_init,
.write_tail = ring_write_tail,
.flush = blt_ring_flush,
- .add_request = blt_ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = blt_ring_get_user_irq,
- .user_irq_put = blt_ring_put_user_irq,
- .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+ .add_request = gen6_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = blt_ring_get_irq,
+ .irq_put = blt_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
.cleanup = blt_ring_cleanup,
};
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
-
- dev_priv->render_ring = render_ring;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+ *ring = render_ring;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->get_seqno = pc_render_get_seqno;
+ }
if (!I915_NEED_GFX_HWS(dev)) {
- dev_priv->render_ring.status_page.page_addr
- = dev_priv->status_page_dmah->vaddr;
- memset(dev_priv->render_ring.status_page.page_addr,
- 0, PAGE_SIZE);
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
}
- return intel_init_ring_buffer(dev, &dev_priv->render_ring);
+ return intel_init_ring_buffer(dev, ring);
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
if (IS_GEN6(dev))
- dev_priv->bsd_ring = gen6_bsd_ring;
+ *ring = gen6_bsd_ring;
else
- dev_priv->bsd_ring = bsd_ring;
+ *ring = bsd_ring;
- return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+ return intel_init_ring_buffer(dev, ring);
}
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
- dev_priv->blt_ring = gen6_blt_ring;
+ *ring = gen6_blt_ring;
- return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
+ return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d2cd0f1efee..8e2e357ad6e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,22 +1,37 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+enum {
+ RCS = 0x0,
+ VCS,
+ BCS,
+ I915_NUM_RINGS,
+};
+
struct intel_hw_status_page {
- void *page_addr;
+ u32 __iomem *page_addr;
unsigned int gfx_addr;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
};
-#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
+#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+
+#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
-#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
+
+#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
-#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
+
+#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
-#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
+
+#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
-struct drm_i915_gem_execbuffer2;
+#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base))
+#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base))
+#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
+
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
@@ -25,45 +40,36 @@ struct intel_ring_buffer {
RING_BLT = 0x4,
} id;
u32 mmio_base;
- unsigned long size;
void *virtual_start;
struct drm_device *dev;
- struct drm_gem_object *gem_object;
+ struct drm_i915_gem_object *obj;
u32 actual_head;
u32 head;
u32 tail;
int space;
+ int size;
struct intel_hw_status_page status_page;
- u32 irq_gem_seqno; /* last seq seem at irq time */
- u32 waiting_gem_seqno;
- int user_irq_refcount;
- void (*user_irq_get)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- void (*user_irq_put)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ u32 irq_seqno; /* last seq seem at irq time */
+ u32 waiting_seqno;
+ u32 sync_seqno[I915_NUM_RINGS-1];
+ atomic_t irq_refcount;
+ bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
+ void (*irq_put)(struct intel_ring_buffer *ring);
- int (*init)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ int (*init)(struct intel_ring_buffer *ring);
- void (*write_tail)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+ void (*write_tail)(struct intel_ring_buffer *ring,
u32 value);
- void (*flush)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains);
- u32 (*add_request)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains);
- u32 (*get_seqno)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- int (*dispatch_gem_execbuffer)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset);
+ void (*flush)(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ int (*add_request)(struct intel_ring_buffer *ring,
+ u32 *seqno);
+ u32 (*get_seqno)(struct intel_ring_buffer *ring);
+ int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+ u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
/**
@@ -96,7 +102,7 @@ struct intel_ring_buffer {
/**
* Do we have some not yet emitted requests outstanding?
*/
- bool outstanding_lazy_request;
+ u32 outstanding_lazy_request;
wait_queue_head_t irq_queue;
drm_local_map_t map;
@@ -105,44 +111,54 @@ struct intel_ring_buffer {
};
static inline u32
+intel_ring_sync_index(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *other)
+{
+ int idx;
+
+ /*
+ * cs -> 0 = vcs, 1 = bcs
+ * vcs -> 0 = bcs, 1 = cs,
+ * bcs -> 0 = cs, 1 = vcs.
+ */
+
+ idx = (other - ring) - 1;
+ if (idx < 0)
+ idx += I915_NUM_RINGS;
+
+ return idx;
+}
+
+static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
- int reg)
+ int reg)
{
- u32 *regs = ring->status_page.page_addr;
- return regs[reg];
+ return ioread32(ring->status_page.page_addr + reg);
}
-int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
-void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
-
-static inline void intel_ring_emit(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- unsigned int data)
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+
+static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+ u32 data)
{
- unsigned int *virt = ring->virtual_start + ring->tail;
- *virt = data;
+ iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
-void intel_ring_advance(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+void intel_ring_advance(struct intel_ring_buffer *ring);
-u32 intel_ring_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_sync(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
-u32 intel_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-void intel_ring_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6bc42fa2a6e..9d0af36a13e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1045,7 +1045,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* Set the SDVO control regs. */
if (INTEL_INFO(dev)->gen >= 4) {
- sdvox = SDVO_BORDER_ENABLE;
+ sdvox = 0;
+ if (INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1075,7 +1077,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+ INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2f768198931..93206e4eaa6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
int type;
/* Disable TV interrupts around load detect or we'll recurse */
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
save_tv_dac = tv_dac = I915_READ(TV_DAC);
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
I915_WRITE(TV_CTL, save_tv_ctl);
/* Restore interrupt config */
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return type;
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 72730e9ca06..21d6c29c2d2 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,7 +10,7 @@ config DRM_NOUVEAU
select FB
select FRAMEBUFFER_CONSOLE if !EMBEDDED
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
- select ACPI_VIDEO if ACPI
+ select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
help
Choose this option for open-source nVidia support.
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 23fa82d667d..e12c97fd8db 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -5,27 +5,32 @@
ccflags-y := -Iinclude/drm
nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o \
- nouveau_sgdma.o nouveau_dma.o \
+ nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
nouveau_dp.o nouveau_ramht.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
+ nouveau_mm.o nouveau_vm.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o nvc0_graph.o \
- nv40_grctx.o nv50_grctx.o \
+ nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
+ nv84_crypt.o \
nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
- nv50_crtc.o nv50_dac.o nv50_sor.o \
- nv50_cursor.o nv50_display.o nv50_fbcon.o \
+ nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
+ nv50_cursor.o nv50_display.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
- nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
+ nv04_crtc.o nv04_display.o nv04_cursor.o \
+ nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
nv10_gpio.o nv50_gpio.o \
nv50_calc.o \
- nv04_pm.o nv50_pm.o nva3_pm.o
+ nv04_pm.o nv50_pm.o nva3_pm.o \
+ nv50_vram.o nvc0_vram.o \
+ nv50_vm.o nvc0_vm.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 119152606e4..a54238058dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -130,10 +130,15 @@ static int nouveau_dsm_init(void)
static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
{
- if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ /* easy option one - intel vendor ID means Integrated */
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
return VGA_SWITCHEROO_IGD;
- else
- return VGA_SWITCHEROO_DIS;
+
+ /* is this device on Bus 0? - this may need improving */
+ if (pdev->bus->number == 0)
+ return VGA_SWITCHEROO_IGD;
+
+ return VGA_SWITCHEROO_DIS;
}
static struct vga_switcheroo_handler nouveau_dsm_handler = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b2293576f27..d3046559bf0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6053,52 +6053,17 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
return entry;
}
-static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
+static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
+ int heads, int or)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
- entry->type = 0;
+ entry->type = type;
entry->i2c_index = i2c;
entry->heads = heads;
- entry->location = DCB_LOC_ON_CHIP;
- entry->or = 1;
-}
-
-static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
-{
- struct dcb_entry *entry = new_dcb_entry(dcb);
-
- entry->type = 2;
- entry->i2c_index = LEGACY_I2C_PANEL;
- entry->heads = twoHeads ? 3 : 1;
- entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
- entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
- entry->duallink_possible = false; /* SiI164 and co. are single link */
-
-#if 0
- /*
- * For dvi-a either crtc probably works, but my card appears to only
- * support dvi-d. "nvidia" still attempts to program it for dvi-a,
- * doing the full fp output setup (program 0x6808.. fp dimension regs,
- * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
- * the monitor picks up the mode res ok and lights up, but no pixel
- * data appears, so the board manufacturer probably connected up the
- * sync lines, but missed the video traces / components
- *
- * with this introduction, dvi-a left as an exercise for the reader.
- */
- fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
-#endif
-}
-
-static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
-{
- struct dcb_entry *entry = new_dcb_entry(dcb);
-
- entry->type = 1;
- entry->i2c_index = LEGACY_I2C_TV;
- entry->heads = twoHeads ? 3 : 1;
- entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+ if (type != OUTPUT_ANALOG)
+ entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+ entry->or = or;
}
static bool
@@ -6365,8 +6330,36 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
return true;
}
+static void
+fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ int all_heads = (nv_two_heads(dev) ? 3 : 1);
+
+#ifdef __powerpc__
+ /* Apple iMac G4 NV17 */
+ if (of_machine_is_compatible("PowerMac4,5")) {
+ fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
+ fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
+ return;
+ }
+#endif
+
+ /* Make up some sane defaults */
+ fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
+
+ if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
+ fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
+ all_heads, 0);
+
+ else if (bios->tmds.output0_script_ptr ||
+ bios->tmds.output1_script_ptr)
+ fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
+ all_heads, 1);
+}
+
static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &bios->dcb;
@@ -6386,12 +6379,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
/* this situation likely means a really old card, pre DCB */
if (dcbptr == 0x0) {
- NV_INFO(dev, "Assuming a CRT output exists\n");
- fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
- if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
- fabricate_tv_output(dcb, twoHeads);
-
+ fabricate_dcb_encoder_table(dev, bios);
return 0;
}
@@ -6451,21 +6439,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
*/
NV_TRACEWARN(dev, "No useful information in BIOS output table; "
"adding all possible outputs\n");
- fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
- /*
- * Attempt to detect TV before DVI because the test
- * for the former is more accurate and it rules the
- * latter out.
- */
- if (nv04_tv_identify(dev,
- bios->legacy.i2c_indices.tv) >= 0)
- fabricate_tv_output(dcb, twoHeads);
-
- else if (bios->tmds.output0_script_ptr ||
- bios->tmds.output1_script_ptr)
- fabricate_dvi_i_output(dcb, twoHeads);
-
+ fabricate_dcb_encoder_table(dev, bios);
return 0;
}
@@ -6859,7 +6833,7 @@ nouveau_bios_init(struct drm_device *dev)
if (ret)
return ret;
- ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
+ ret = parse_dcb_table(dev, bios);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c41e1c200ef..a7fae26f465 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -32,6 +32,8 @@
#include "nouveau_drm.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
#include <linux/log2.h>
#include <linux/slab.h>
@@ -46,82 +48,51 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
- if (nvbo->tile)
- nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
-
+ nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
+ nouveau_vm_put(&nvbo->vma);
kfree(nvbo);
}
static void
-nouveau_bo_fixup_align(struct drm_device *dev,
- uint32_t tile_mode, uint32_t tile_flags,
- int *align, int *size)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
+ int *page_shift)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- /*
- * Some of the tile_flags have a periodic structure of N*4096 bytes,
- * align to to that as well as the page size. Align the size to the
- * appropriate boundaries. This does imply that sizes are rounded up
- * 3-7 pages, so be aware of this and do not waste memory by allocating
- * many small buffers.
- */
- if (dev_priv->card_type == NV_50) {
- uint32_t block_size = dev_priv->vram_size >> 15;
- int i;
-
- switch (tile_flags) {
- case 0x1800:
- case 0x2800:
- case 0x4800:
- case 0x7a00:
- if (is_power_of_2(block_size)) {
- for (i = 1; i < 10; i++) {
- *align = 12 * i * block_size;
- if (!(*align % 65536))
- break;
- }
- } else {
- for (i = 1; i < 10; i++) {
- *align = 8 * i * block_size;
- if (!(*align % 65536))
- break;
- }
- }
- *size = roundup(*size, *align);
- break;
- default:
- break;
- }
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
- } else {
- if (tile_mode) {
+ if (dev_priv->card_type < NV_50) {
+ if (nvbo->tile_mode) {
if (dev_priv->chipset >= 0x40) {
*align = 65536;
- *size = roundup(*size, 64 * tile_mode);
+ *size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x30) {
*align = 32768;
- *size = roundup(*size, 64 * tile_mode);
+ *size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x20) {
*align = 16384;
- *size = roundup(*size, 64 * tile_mode);
+ *size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x10) {
*align = 16384;
- *size = roundup(*size, 32 * tile_mode);
+ *size = roundup(*size, 32 * nvbo->tile_mode);
}
}
+ } else {
+ if (likely(dev_priv->chan_vm)) {
+ if (*size > 256 * 1024)
+ *page_shift = dev_priv->chan_vm->lpg_shift;
+ else
+ *page_shift = dev_priv->chan_vm->spg_shift;
+ } else {
+ *page_shift = 12;
+ }
+
+ *size = roundup(*size, (1 << *page_shift));
+ *align = max((1 << *page_shift), *align);
}
- /* ALIGN works only on powers of two. */
*size = roundup(*size, PAGE_SIZE);
-
- if (dev_priv->card_type == NV_50) {
- *size = roundup(*size, 65536);
- *align = max(65536, *align);
- }
}
int
@@ -132,7 +103,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
- int ret = 0;
+ int ret = 0, page_shift = 0;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
@@ -145,10 +116,18 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
- &align, &size);
+ nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
align >>= PAGE_SHIFT;
+ if (!nvbo->no_vm && dev_priv->chan_vm) {
+ ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
+ NV_MEM_ACCESS_RW, &nvbo->vma);
+ if (ret) {
+ kfree(nvbo);
+ return ret;
+ }
+ }
+
nouveau_bo_placement_set(nvbo, flags, 0);
nvbo->channel = chan;
@@ -161,6 +140,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
+ if (nvbo->vma.node) {
+ if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ nvbo->bo.offset = nvbo->vma.offset;
+ }
+
*pnvbo = nvbo;
return 0;
}
@@ -244,7 +228,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -280,7 +264,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -319,6 +303,25 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
ttm_bo_kunmap(&nvbo->kmap);
}
+int
+nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu)
+{
+ int ret;
+
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
+ no_wait_reserve, no_wait_gpu);
+ if (ret)
+ return ret;
+
+ if (nvbo->vma.node) {
+ if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ nvbo->bo.offset = nvbo->vma.offset;
+ }
+
+ return 0;
+}
+
u16
nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
{
@@ -410,37 +413,40 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
+ if (dev_priv->card_type >= NV_50) {
+ man->func = &nouveau_vram_manager;
+ man->io_reserve_fastpath = false;
+ man->use_io_reserve_lru = true;
+ } else {
+ man->func = &ttm_bo_manager_func;
+ }
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- if (dev_priv->card_type == NV_50)
- man->gpu_offset = 0x40000000;
- else
- man->gpu_offset = 0;
break;
case TTM_PL_TT:
man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED;
- man->default_caching = TTM_PL_FLAG_UNCACHED;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
break;
case NOUVEAU_GART_SGDMA:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
+ man->gpu_offset = dev_priv->gart_info.aper_base;
break;
default:
NV_ERROR(dev, "Unknown GART type: %d\n",
dev_priv->gart_info.type);
return -EINVAL;
}
- man->gpu_offset = dev_priv->vm_gart_base;
break;
default:
NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
@@ -485,16 +491,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
if (ret)
return ret;
- if (nvbo->channel) {
- ret = nouveau_fence_sync(fence, nvbo->channel);
- if (ret)
- goto out;
- }
-
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
no_wait_reserve, no_wait_gpu, new_mem);
-out:
- nouveau_fence_unref((void *)&fence);
+ nouveau_fence_unref(&fence);
return ret;
}
@@ -516,6 +515,58 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
}
static int
+nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ u64 src_offset = old_mem->start << PAGE_SHIFT;
+ u64 dst_offset = new_mem->start << PAGE_SHIFT;
+ u32 page_count = new_mem->num_pages;
+ int ret;
+
+ if (!nvbo->no_vm) {
+ if (old_mem->mem_type == TTM_PL_VRAM)
+ src_offset = nvbo->vma.offset;
+ else
+ src_offset += dev_priv->gart_info.aper_base;
+
+ if (new_mem->mem_type == TTM_PL_VRAM)
+ dst_offset = nvbo->vma.offset;
+ else
+ dst_offset += dev_priv->gart_info.aper_base;
+ }
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 2047) ? 2047 : page_count;
+
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
+ OUT_RING (chan, upper_32_bits(dst_offset));
+ OUT_RING (chan, lower_32_bits(dst_offset));
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, lower_32_bits(src_offset));
+ OUT_RING (chan, PAGE_SIZE); /* src_pitch */
+ OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
+ OUT_RING (chan, PAGE_SIZE); /* line_length */
+ OUT_RING (chan, line_count);
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
+ OUT_RING (chan, 0x00100110);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
+
+static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
@@ -529,14 +580,14 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
dst_offset = new_mem->start << PAGE_SHIFT;
if (!nvbo->no_vm) {
if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset += dev_priv->vm_vram_base;
+ src_offset = nvbo->vma.offset;
else
- src_offset += dev_priv->vm_gart_base;
+ src_offset += dev_priv->gart_info.aper_base;
if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset += dev_priv->vm_vram_base;
+ dst_offset = nvbo->vma.offset;
else
- dst_offset += dev_priv->vm_gart_base;
+ dst_offset += dev_priv->gart_info.aper_base;
}
ret = RING_SPACE(chan, 3);
@@ -683,17 +734,27 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
int ret;
chan = nvbo->channel;
- if (!chan || nvbo->no_vm)
+ if (!chan || nvbo->no_vm) {
chan = dev_priv->channel;
+ mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
+ }
if (dev_priv->card_type < NV_50)
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
+ if (dev_priv->card_type < NV_C0)
ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- if (ret)
- return ret;
+ else
+ ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+ if (ret == 0) {
+ ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
+ no_wait_reserve,
+ no_wait_gpu, new_mem);
+ }
- return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ if (chan == dev_priv->channel)
+ mutex_unlock(&chan->mutex);
+ return ret;
}
static int
@@ -771,7 +832,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
uint64_t offset;
- int ret;
if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
/* Nothing to do. */
@@ -781,18 +841,12 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
offset = new_mem->start << PAGE_SHIFT;
- if (dev_priv->card_type == NV_50) {
- ret = nv50_mem_vm_bind_linear(dev,
- offset + dev_priv->vm_vram_base,
- new_mem->size,
- nouveau_bo_tile_layout(nvbo),
- offset);
- if (ret)
- return ret;
-
+ if (dev_priv->chan_vm) {
+ nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
} else if (dev_priv->card_type >= NV_10) {
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
- nvbo->tile_mode);
+ nvbo->tile_mode,
+ nvbo->tile_flags);
}
return 0;
@@ -808,9 +862,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
if (dev_priv->card_type >= NV_10 &&
dev_priv->card_type < NV_50) {
- if (*old_tile)
- nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
-
+ nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
*old_tile = new_tile;
}
}
@@ -879,6 +931,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
+ int ret;
mem->bus.addr = NULL;
mem->bus.offset = 0;
@@ -901,9 +954,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
#endif
break;
case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
+ {
+ struct nouveau_vram *vram = mem->mm_node;
+ u8 page_shift;
+
+ if (!dev_priv->bar1_vm) {
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(dev->pdev, 1);
+ mem->bus.is_iomem = true;
+ break;
+ }
+
+ if (dev_priv->card_type == NV_C0)
+ page_shift = vram->page_shift;
+ else
+ page_shift = 12;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
+ page_shift, NV_MEM_ACCESS_RW,
+ &vram->bar_vma);
+ if (ret)
+ return ret;
+
+ nouveau_vm_map(&vram->bar_vma, vram);
+ if (ret) {
+ nouveau_vm_put(&vram->bar_vma);
+ return ret;
+ }
+
+ mem->bus.offset = vram->bar_vma.offset;
+ if (dev_priv->card_type == NV_50) /*XXX*/
+ mem->bus.offset -= 0x0020000000ULL;
mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true;
+ }
break;
default:
return -EINVAL;
@@ -914,6 +998,17 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct nouveau_vram *vram = mem->mm_node;
+
+ if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
+ return;
+
+ if (!vram->bar_vma.node)
+ return;
+
+ nouveau_vm_unmap(&vram->bar_vma);
+ nouveau_vm_put(&vram->bar_vma);
}
static int
@@ -939,7 +1034,23 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
- return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
+ return nouveau_bo_validate(nvbo, false, true, false);
+}
+
+void
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+{
+ struct nouveau_fence *old_fence;
+
+ if (likely(fence))
+ nouveau_fence_ref(fence);
+
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ old_fence = nvbo->bo.sync_obj;
+ nvbo->bo.sync_obj = fence;
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+ nouveau_fence_unref(&old_fence);
}
struct ttm_bo_driver nouveau_bo_driver = {
@@ -949,11 +1060,11 @@ struct ttm_bo_driver nouveau_bo_driver = {
.evict_flags = nouveau_bo_evict_flags,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
- .sync_obj_signaled = nouveau_fence_signalled,
- .sync_obj_wait = nouveau_fence_wait,
- .sync_obj_flush = nouveau_fence_flush,
- .sync_obj_unref = nouveau_fence_unref,
- .sync_obj_ref = nouveau_fence_ref,
+ .sync_obj_signaled = __nouveau_fence_signalled,
+ .sync_obj_wait = __nouveau_fence_wait,
+ .sync_obj_flush = __nouveau_fence_flush,
+ .sync_obj_unref = __nouveau_fence_unref,
+ .sync_obj_ref = __nouveau_fence_ref,
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 373950e3481..3960d66d7ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -38,23 +38,28 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
int ret;
if (dev_priv->card_type >= NV_50) {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
- dev_priv->vm_end, NV_DMA_ACCESS_RO,
- NV_DMA_TARGET_AGP, &pushbuf);
+ if (dev_priv->card_type < NV_C0) {
+ ret = nouveau_gpuobj_dma_new(chan,
+ NV_CLASS_DMA_IN_MEMORY, 0,
+ (1ULL << 40),
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_VM,
+ &pushbuf);
+ }
chan->pushbuf_base = pb->bo.offset;
} else
if (pb->bo.mem.mem_type == TTM_PL_TT) {
- ret = nouveau_gpuobj_gart_dma_new(chan, 0,
- dev_priv->gart_info.aper_size,
- NV_DMA_ACCESS_RO, &pushbuf,
- NULL);
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+ dev_priv->gart_info.aper_size,
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_GART, &pushbuf);
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
- NV_DMA_ACCESS_RO,
- NV_DMA_TARGET_VIDMEM, &pushbuf);
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_VRAM, &pushbuf);
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
@@ -62,17 +67,16 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
* VRAM.
*/
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- pci_resource_start(dev->pdev,
- 1),
+ pci_resource_start(dev->pdev, 1),
dev_priv->fb_available_size,
- NV_DMA_ACCESS_RO,
- NV_DMA_TARGET_PCI, &pushbuf);
+ NV_MEM_ACCESS_RO,
+ NV_MEM_TARGET_PCI, &pushbuf);
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
}
nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
nouveau_gpuobj_ref(NULL, &pushbuf);
- return 0;
+ return ret;
}
static struct nouveau_bo *
@@ -100,6 +104,13 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
return NULL;
}
+ ret = nouveau_bo_map(pushbuf);
+ if (ret) {
+ nouveau_bo_unpin(pushbuf);
+ nouveau_bo_ref(NULL, &pushbuf);
+ return NULL;
+ }
+
return pushbuf;
}
@@ -107,74 +118,59 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
int
nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
struct drm_file *file_priv,
- uint32_t vram_handle, uint32_t tt_handle)
+ uint32_t vram_handle, uint32_t gart_handle)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
- int channel, user;
+ unsigned long flags;
int ret;
- /*
- * Alright, here is the full story
- * Nvidia cards have multiple hw fifo contexts (praise them for that,
- * no complicated crash-prone context switches)
- * We allocate a new context for each app and let it write to it
- * directly (woo, full userspace command submission !)
- * When there are no more contexts, you lost
- */
- for (channel = 0; channel < pfifo->channels; channel++) {
- if (dev_priv->fifos[channel] == NULL)
+ /* allocate and lock channel structure */
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = dev;
+ chan->file_priv = file_priv;
+ chan->vram_handle = vram_handle;
+ chan->gart_handle = gart_handle;
+
+ kref_init(&chan->ref);
+ atomic_set(&chan->users, 1);
+ mutex_init(&chan->mutex);
+ mutex_lock(&chan->mutex);
+
+ /* allocate hw channel id */
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
+ if (!dev_priv->channels.ptr[chan->id]) {
+ nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
break;
+ }
}
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- /* no more fifos. you lost. */
- if (channel == pfifo->channels)
- return -EINVAL;
+ if (chan->id == pfifo->channels) {
+ mutex_unlock(&chan->mutex);
+ kfree(chan);
+ return -ENODEV;
+ }
- dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
- GFP_KERNEL);
- if (!dev_priv->fifos[channel])
- return -ENOMEM;
- chan = dev_priv->fifos[channel];
+ NV_DEBUG(dev, "initialising channel %d\n", chan->id);
INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
+ INIT_LIST_HEAD(&chan->nvsw.flip);
INIT_LIST_HEAD(&chan->fence.pending);
- chan->dev = dev;
- chan->id = channel;
- chan->file_priv = file_priv;
- chan->vram_handle = vram_handle;
- chan->gart_handle = tt_handle;
-
- NV_INFO(dev, "Allocating FIFO number %d\n", channel);
/* Allocate DMA push buffer */
chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
if (!chan->pushbuf_bo) {
ret = -ENOMEM;
NV_ERROR(dev, "pushbuf %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
nouveau_dma_pre_init(chan);
-
- /* Locate channel's user control regs */
- if (dev_priv->card_type < NV_40)
- user = NV03_USER(channel);
- else
- if (dev_priv->card_type < NV_50)
- user = NV40_USER(channel);
- else
- user = NV50_USER(channel);
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
- PAGE_SIZE);
- if (!chan->user) {
- NV_ERROR(dev, "ioremap of regs failed.\n");
- nouveau_channel_free(chan);
- return -ENOMEM;
- }
chan->user_put = 0x40;
chan->user_get = 0x44;
@@ -182,15 +178,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
ret = nouveau_notifier_init_channel(chan);
if (ret) {
NV_ERROR(dev, "ntfy %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
/* Setup channel's default objects */
- ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
+ ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
if (ret) {
NV_ERROR(dev, "gpuobj %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
@@ -198,24 +194,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
ret = nouveau_channel_pushbuf_ctxdma_init(chan);
if (ret) {
NV_ERROR(dev, "pbctxdma %d\n", ret);
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
/* disable the fifo caches */
pfifo->reassign(dev, false);
- /* Create a graphics context for new channel */
- ret = pgraph->create_context(chan);
- if (ret) {
- nouveau_channel_free(chan);
- return ret;
- }
-
/* Construct inital RAMFC for new channel */
ret = pfifo->create_context(chan);
if (ret) {
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
@@ -225,83 +214,111 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
if (!ret)
ret = nouveau_fence_channel_init(chan);
if (ret) {
- nouveau_channel_free(chan);
+ nouveau_channel_put(&chan);
return ret;
}
nouveau_debugfs_channel_init(chan);
- NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
+ NV_DEBUG(dev, "channel %d initialised\n", chan->id);
*chan_ret = chan;
return 0;
}
-/* stops a fifo */
+struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *ref)
+{
+ struct nouveau_channel *chan = NULL;
+
+ if (likely(ref && atomic_inc_not_zero(&ref->users)))
+ nouveau_channel_ref(ref, &chan);
+
+ return chan;
+}
+
+struct nouveau_channel *
+nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+
+ if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
+ return ERR_PTR(-EINVAL);
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+
+ if (unlikely(!chan))
+ return ERR_PTR(-EINVAL);
+
+ if (unlikely(file_priv && chan->file_priv != file_priv)) {
+ nouveau_channel_put_unlocked(&chan);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&chan->mutex);
+ return chan;
+}
+
void
-nouveau_channel_free(struct nouveau_channel *chan)
+nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
{
+ struct nouveau_channel *chan = *pchan;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
unsigned long flags;
- int ret;
- NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
+ /* decrement the refcount, and we're done if there's still refs */
+ if (likely(!atomic_dec_and_test(&chan->users))) {
+ nouveau_channel_ref(NULL, pchan);
+ return;
+ }
+ /* noone wants the channel anymore */
+ NV_DEBUG(dev, "freeing channel %d\n", chan->id);
nouveau_debugfs_channel_fini(chan);
- /* Give outstanding push buffers a chance to complete */
- nouveau_fence_update(chan);
- if (chan->fence.sequence != chan->fence.sequence_ack) {
- struct nouveau_fence *fence = NULL;
+ /* give it chance to idle */
+ nouveau_channel_idle(chan);
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret == 0) {
- ret = nouveau_fence_wait(fence, NULL, false, false);
- nouveau_fence_unref((void *)&fence);
- }
-
- if (ret)
- NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
- }
-
- /* Ensure all outstanding fences are signaled. They should be if the
+ /* ensure all outstanding fences are signaled. they should be if the
* above attempts at idling were OK, but if we failed this'll tell TTM
* we're done with the buffers.
*/
nouveau_fence_channel_fini(chan);
- /* This will prevent pfifo from switching channels. */
+ /* boot it off the hardware */
pfifo->reassign(dev, false);
- /* We want to give pgraph a chance to idle and get rid of all potential
- * errors. We need to do this before the lock, otherwise the irq handler
- * is unable to process them.
+ /* We want to give pgraph a chance to idle and get rid of all
+ * potential errors. We need to do this without the context
+ * switch lock held, otherwise the irq handler is unable to
+ * process them.
*/
if (pgraph->channel(dev) == chan)
nouveau_wait_for_idle(dev);
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- pgraph->fifo_access(dev, false);
- if (pgraph->channel(dev) == chan)
- pgraph->unload_context(dev);
- pgraph->destroy_context(chan);
- pgraph->fifo_access(dev, true);
-
- if (pfifo->channel_id(dev) == chan->id) {
- pfifo->disable(dev);
- pfifo->unload_context(dev);
- pfifo->enable(dev);
- }
+ /* destroy the engine specific contexts */
pfifo->destroy_context(chan);
+ pgraph->destroy_context(chan);
+ if (pcrypt->destroy_context)
+ pcrypt->destroy_context(chan);
pfifo->reassign(dev, true);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ /* aside from its resources, the channel should now be dead,
+ * remove it from the channel list
+ */
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- /* Release the channel's resources */
+ /* destroy any resources the channel owned */
nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) {
nouveau_bo_unmap(chan->pushbuf_bo);
@@ -310,44 +327,80 @@ nouveau_channel_free(struct nouveau_channel *chan)
}
nouveau_gpuobj_channel_takedown(chan);
nouveau_notifier_takedown_channel(chan);
- if (chan->user)
- iounmap(chan->user);
- dev_priv->fifos[chan->id] = NULL;
+ nouveau_channel_ref(NULL, pchan);
+}
+
+void
+nouveau_channel_put(struct nouveau_channel **pchan)
+{
+ mutex_unlock(&(*pchan)->mutex);
+ nouveau_channel_put_unlocked(pchan);
+}
+
+static void
+nouveau_channel_del(struct kref *ref)
+{
+ struct nouveau_channel *chan =
+ container_of(ref, struct nouveau_channel, ref);
+
kfree(chan);
}
+void
+nouveau_channel_ref(struct nouveau_channel *chan,
+ struct nouveau_channel **pchan)
+{
+ if (chan)
+ kref_get(&chan->ref);
+
+ if (*pchan)
+ kref_put(&(*pchan)->ref, nouveau_channel_del);
+
+ *pchan = chan;
+}
+
+void
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+ nouveau_fence_update(chan);
+
+ if (chan->fence.sequence != chan->fence.sequence_ack) {
+ ret = nouveau_fence_new(chan, &fence, true);
+ if (!ret) {
+ ret = nouveau_fence_wait(fence, false, false);
+ nouveau_fence_unref(&fence);
+ }
+
+ if (ret)
+ NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+ }
+}
+
/* cleans up all the fifos from file_priv */
void
nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_channel *chan;
int i;
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
for (i = 0; i < engine->fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
+ chan = nouveau_channel_get(dev, file_priv, i);
+ if (IS_ERR(chan))
+ continue;
- if (chan && chan->file_priv == file_priv)
- nouveau_channel_free(chan);
+ atomic_dec(&chan->users);
+ nouveau_channel_put(&chan);
}
}
-int
-nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
- int channel)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
-
- if (channel >= engine->fifo.channels)
- return 0;
- if (dev_priv->fifos[channel] == NULL)
- return 0;
-
- return (dev_priv->fifos[channel]->file_priv == file_priv);
-}
/***********************************
* ioctls wrapping the functions
@@ -383,36 +436,44 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- init->subchan[0].handle = NvM2MF;
- if (dev_priv->card_type < NV_50)
- init->subchan[0].grclass = 0x0039;
- else
- init->subchan[0].grclass = 0x5039;
- init->subchan[1].handle = NvSw;
- init->subchan[1].grclass = NV_SW;
- init->nr_subchan = 2;
+ if (dev_priv->card_type < NV_C0) {
+ init->subchan[0].handle = NvM2MF;
+ if (dev_priv->card_type < NV_50)
+ init->subchan[0].grclass = 0x0039;
+ else
+ init->subchan[0].grclass = 0x5039;
+ init->subchan[1].handle = NvSw;
+ init->subchan[1].grclass = NV_SW;
+ init->nr_subchan = 2;
+ } else {
+ init->subchan[0].handle = 0x9039;
+ init->subchan[0].grclass = 0x9039;
+ init->nr_subchan = 1;
+ }
/* Named memory object area */
ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
&init->notifier_handle);
- if (ret) {
- nouveau_channel_free(chan);
- return ret;
- }
- return 0;
+ if (ret == 0)
+ atomic_inc(&chan->users); /* userspace reference */
+ nouveau_channel_put(&chan);
+ return ret;
}
static int
nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_nouveau_channel_free *cfree = data;
+ struct drm_nouveau_channel_free *req = data;
struct nouveau_channel *chan;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
+ chan = nouveau_channel_get(dev, file_priv, req->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
- nouveau_channel_free(chan);
+ atomic_dec(&chan->users);
+ nouveau_channel_put(&chan);
return 0;
}
@@ -421,18 +482,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
***********************************/
struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 52c356e9a3d..a21e0007683 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -37,6 +37,8 @@
#include "nouveau_connector.h"
#include "nouveau_hw.h"
+static void nouveau_connector_hotplug(void *, int);
+
static struct nouveau_encoder *
find_encoder_by_type(struct drm_connector *connector, int type)
{
@@ -94,22 +96,30 @@ nouveau_connector_bpp(struct drm_connector *connector)
}
static void
-nouveau_connector_destroy(struct drm_connector *drm_connector)
+nouveau_connector_destroy(struct drm_connector *connector)
{
- struct nouveau_connector *nv_connector =
- nouveau_connector(drm_connector);
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_nouveau_private *dev_priv;
+ struct nouveau_gpio_engine *pgpio;
struct drm_device *dev;
if (!nv_connector)
return;
dev = nv_connector->base.dev;
+ dev_priv = dev->dev_private;
NV_DEBUG_KMS(dev, "\n");
+ pgpio = &dev_priv->engine.gpio;
+ if (pgpio->irq_unregister) {
+ pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
+ nouveau_connector_hotplug, connector);
+ }
+
kfree(nv_connector->edid);
- drm_sysfs_connector_remove(drm_connector);
- drm_connector_cleanup(drm_connector);
- kfree(drm_connector);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
}
static struct nouveau_i2c_chan *
@@ -760,6 +770,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_connector *nv_connector = NULL;
struct dcb_connector_table_entry *dcb = NULL;
struct drm_connector *connector;
@@ -876,6 +887,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
+ if (pgpio->irq_register) {
+ pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
+ nouveau_connector_hotplug, connector);
+ }
+
drm_sysfs_connector_add(connector);
dcb->drm = connector;
return dcb->drm;
@@ -886,3 +902,29 @@ fail:
return ERR_PTR(ret);
}
+
+static void
+nouveau_connector_hotplug(void *data, int plugged)
+{
+ struct drm_connector *connector = data;
+ struct drm_device *dev = connector->dev;
+
+ NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
+ drm_get_connector_name(connector));
+
+ if (connector->encoder && connector->encoder->crtc &&
+ connector->encoder->crtc->enabled) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
+ struct drm_encoder_helper_funcs *helper =
+ connector->encoder->helper_private;
+
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ if (plugged)
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
+ else
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+ }
+ }
+
+ drm_helper_hpd_irq_event(dev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2e11fd65b4d..505c6bfb4d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -29,6 +29,9 @@
#include "nouveau_drv.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
+#include "nouveau_hw.h"
+#include "nouveau_crtc.h"
+#include "nouveau_dma.h"
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -104,3 +107,207 @@ const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.output_poll_changed = nouveau_fbcon_output_poll_changed,
};
+int
+nouveau_vblank_enable(struct drm_device *dev, int crtc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
+ NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
+ else
+ NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
+ NV_PCRTC_INTR_0_VBLANK);
+
+ return 0;
+}
+
+void
+nouveau_vblank_disable(struct drm_device *dev, int crtc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
+ NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
+ else
+ NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
+}
+
+static int
+nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
+ struct nouveau_bo *new_bo)
+{
+ int ret;
+
+ ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
+ if (ret)
+ goto fail;
+
+ ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
+ if (ret)
+ goto fail_unreserve;
+
+ return 0;
+
+fail_unreserve:
+ ttm_bo_unreserve(&new_bo->bo);
+fail:
+ nouveau_bo_unpin(new_bo);
+ return ret;
+}
+
+static void
+nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
+ struct nouveau_bo *new_bo,
+ struct nouveau_fence *fence)
+{
+ nouveau_bo_fence(new_bo, fence);
+ ttm_bo_unreserve(&new_bo->bo);
+
+ nouveau_bo_fence(old_bo, fence);
+ ttm_bo_unreserve(&old_bo->bo);
+
+ nouveau_bo_unpin(old_bo);
+}
+
+static int
+nouveau_page_flip_emit(struct nouveau_channel *chan,
+ struct nouveau_bo *old_bo,
+ struct nouveau_bo *new_bo,
+ struct nouveau_page_flip_state *s,
+ struct nouveau_fence **pfence)
+{
+ struct drm_device *dev = chan->dev;
+ unsigned long flags;
+ int ret;
+
+ /* Queue it to the pending list */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_add_tail(&s->head, &chan->nvsw.flip);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* Synchronize with the old framebuffer */
+ ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
+ if (ret)
+ goto fail;
+
+ /* Emit the pageflip */
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ goto fail;
+
+ BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ OUT_RING(chan, 0);
+ FIRE_RING(chan);
+
+ ret = nouveau_fence_new(chan, pfence, true);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_del(&s->head);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return ret;
+}
+
+int
+nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
+ struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+ struct nouveau_page_flip_state *s;
+ struct nouveau_channel *chan;
+ struct nouveau_fence *fence;
+ int ret;
+
+ if (dev_priv->engine.graph.accel_blocked)
+ return -ENODEV;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ /* Don't let the buffers go away while we flip */
+ ret = nouveau_page_flip_reserve(old_bo, new_bo);
+ if (ret)
+ goto fail_free;
+
+ /* Initialize a page flip struct */
+ *s = (struct nouveau_page_flip_state)
+ { { }, s->event, nouveau_crtc(crtc)->index,
+ fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+ new_bo->bo.offset };
+
+ /* Choose the channel the flip will be handled in */
+ chan = nouveau_fence_channel(new_bo->bo.sync_obj);
+ if (!chan)
+ chan = nouveau_channel_get_unlocked(dev_priv->channel);
+ mutex_lock(&chan->mutex);
+
+ /* Emit a page flip */
+ ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
+ nouveau_channel_put(&chan);
+ if (ret)
+ goto fail_unreserve;
+
+ /* Update the crtc struct and cleanup */
+ crtc->fb = fb;
+
+ nouveau_page_flip_unreserve(old_bo, new_bo, fence);
+ nouveau_fence_unref(&fence);
+ return 0;
+
+fail_unreserve:
+ nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
+fail_free:
+ kfree(s);
+ return ret;
+}
+
+int
+nouveau_finish_page_flip(struct nouveau_channel *chan,
+ struct nouveau_page_flip_state *ps)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_page_flip_state *s;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (list_empty(&chan->nvsw.flip)) {
+ NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return -EINVAL;
+ }
+
+ s = list_first_entry(&chan->nvsw.flip,
+ struct nouveau_page_flip_state, head);
+ if (s->event) {
+ struct drm_pending_vblank_event *e = s->event;
+ struct timeval now;
+
+ do_gettimeofday(&now);
+ e->event.sequence = 0;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ list_del(&s->head);
+ *ps = *s;
+ kfree(s);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 82581e600dc..65699bfaaae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -36,7 +36,7 @@ nouveau_dma_pre_init(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_bo *pushbuf = chan->pushbuf_bo;
- if (dev_priv->card_type == NV_50) {
+ if (dev_priv->card_type >= NV_50) {
const int ib_size = pushbuf->bo.mem.size / 2;
chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
@@ -59,17 +59,26 @@ nouveau_dma_init(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
int ret, i;
- /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
- ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
- 0x0039 : 0x5039, &obj);
- if (ret)
- return ret;
+ if (dev_priv->card_type >= NV_C0) {
+ ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
+ if (ret)
+ return ret;
+
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
+ OUT_RING (chan, 0x00009039);
+ FIRE_RING (chan);
+ return 0;
+ }
- ret = nouveau_ramht_insert(chan, NvM2MF, obj);
- nouveau_gpuobj_ref(NULL, &obj);
+ /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
+ ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
+ 0x0039 : 0x5039);
if (ret)
return ret;
@@ -78,11 +87,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
if (ret)
return ret;
- /* Map push buffer */
- ret = nouveau_bo_map(chan->pushbuf_bo);
- if (ret)
- return ret;
-
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index d578c21d3c8..c36f1763fea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -77,7 +77,8 @@ enum {
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
NvEvoFB16 = 0x01000001,
- NvEvoFB32 = 0x01000002
+ NvEvoFB32 = 0x01000002,
+ NvEvoVRAM_LP = 0x01000003
};
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
@@ -125,6 +126,12 @@ extern void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
static inline void
+BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
+{
+ OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
{
OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 4562f309ae3..38d599554bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -279,7 +279,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
struct bit_displayport_encoder_table *dpe;
int dpe_headerlen;
uint8_t config[4], status[3];
- bool cr_done, cr_max_vs, eq_done;
+ bool cr_done, cr_max_vs, eq_done, hpd_state;
int ret = 0, i, tries, voltage;
NV_DEBUG_KMS(dev, "link training!!\n");
@@ -297,7 +297,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
/* disable hotplug detect, this flips around on some panels during
* link training.
*/
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+ hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
if (dpe->script0) {
NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
@@ -439,7 +439,7 @@ stop:
}
/* re-enable hotplug detect */
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+ pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
return eq_done;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 90875494a65..13bb672a16f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -115,6 +115,10 @@ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
int nouveau_perflvl_wr;
module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
+MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
+int nouveau_msi;
+module_param_named(msi, nouveau_msi, int, 0400);
+
int nouveau_fbpercrtc;
#if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -167,6 +171,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
if (pm_state.event == PM_EVENT_PRETHAW)
return 0;
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
NV_INFO(dev, "Disabling fbcon acceleration...\n");
nouveau_fbcon_save_disable_accel(dev);
@@ -193,23 +200,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
NV_INFO(dev, "Idling channels...\n");
for (i = 0; i < pfifo->channels; i++) {
- struct nouveau_fence *fence = NULL;
-
- chan = dev_priv->fifos[i];
- if (!chan || (dev_priv->card_type >= NV_50 &&
- chan == dev_priv->fifos[0]))
- continue;
-
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret == 0) {
- ret = nouveau_fence_wait(fence, NULL, false, false);
- nouveau_fence_unref((void *)&fence);
- }
+ chan = dev_priv->channels.ptr[i];
- if (ret) {
- NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
- chan->id);
- }
+ if (chan && chan->pushbuf_bo)
+ nouveau_channel_idle(chan);
}
pgraph->fifo_access(dev, false);
@@ -219,17 +213,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pfifo->unload_context(dev);
pgraph->unload_context(dev);
- NV_INFO(dev, "Suspending GPU objects...\n");
- ret = nouveau_gpuobj_suspend(dev);
+ ret = pinstmem->suspend(dev);
if (ret) {
NV_ERROR(dev, "... failed: %d\n", ret);
goto out_abort;
}
- ret = pinstmem->suspend(dev);
+ NV_INFO(dev, "Suspending GPU objects...\n");
+ ret = nouveau_gpuobj_suspend(dev);
if (ret) {
NV_ERROR(dev, "... failed: %d\n", ret);
- nouveau_gpuobj_suspend_cleanup(dev);
+ pinstmem->resume(dev);
goto out_abort;
}
@@ -263,6 +257,9 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct drm_crtc *crtc;
int ret, i;
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "We're back, enabling device...\n");
@@ -294,17 +291,18 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
}
+ NV_INFO(dev, "Restoring GPU objects...\n");
+ nouveau_gpuobj_resume(dev);
+
NV_INFO(dev, "Reinitialising engines...\n");
engine->instmem.resume(dev);
engine->mc.init(dev);
engine->timer.init(dev);
engine->fb.init(dev);
engine->graph.init(dev);
+ engine->crypt.init(dev);
engine->fifo.init(dev);
- NV_INFO(dev, "Restoring GPU objects...\n");
- nouveau_gpuobj_resume(dev);
-
nouveau_irq_postinstall(dev);
/* Re-write SKIPS, they'll have been lost over the suspend */
@@ -313,7 +311,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
int j;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- chan = dev_priv->fifos[i];
+ chan = dev_priv->channels.ptr[i];
if (!chan || !chan->pushbuf_bo)
continue;
@@ -347,13 +345,11 @@ nouveau_pci_resume(struct pci_dev *pdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
- nv_crtc->cursor.set_offset(nv_crtc,
- nv_crtc->cursor.nvbo->bo.offset -
- dev_priv->vm_vram_base);
-
+ nv_crtc->cursor.set_offset(nv_crtc, offset);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
- nv_crtc->cursor_saved_y);
+ nv_crtc->cursor_saved_y);
}
/* Force CLUT to get re-loaded during modeset */
@@ -393,6 +389,9 @@ static struct drm_driver driver = {
.irq_postinstall = nouveau_irq_postinstall,
.irq_uninstall = nouveau_irq_uninstall,
.irq_handler = nouveau_irq_handler,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = nouveau_vblank_enable,
+ .disable_vblank = nouveau_vblank_disable,
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
.fops = {
@@ -403,6 +402,7 @@ static struct drm_driver driver = {
.mmap = nouveau_ttm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+ .read = drm_read,
#if defined(CONFIG_COMPAT)
.compat_ioctl = nouveau_compat_ioctl,
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c7db64c03b..46e32573b3a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,22 +54,37 @@ struct nouveau_fpriv {
#include "nouveau_drm.h"
#include "nouveau_reg.h"
#include "nouveau_bios.h"
+#include "nouveau_util.h"
+
struct nouveau_grctx;
+struct nouveau_vram;
+#include "nouveau_vm.h"
#define MAX_NUM_DCB_ENTRIES 16
#define NOUVEAU_MAX_CHANNEL_NR 128
#define NOUVEAU_MAX_TILE_NR 15
-#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
-#define NV50_VM_BLOCK (512*1024*1024ULL)
-#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+struct nouveau_vram {
+ struct drm_device *dev;
+
+ struct nouveau_vma bar_vma;
+ u8 page_shift;
+
+ struct list_head regions;
+ u32 memtype;
+ u64 offset;
+ u64 size;
+};
struct nouveau_tile_reg {
- struct nouveau_fence *fence;
- uint32_t addr;
- uint32_t size;
bool used;
+ uint32_t addr;
+ uint32_t limit;
+ uint32_t pitch;
+ uint32_t zcomp;
+ struct drm_mm_node *tag_mem;
+ struct nouveau_fence *fence;
};
struct nouveau_bo {
@@ -88,6 +103,7 @@ struct nouveau_bo {
struct nouveau_channel *channel;
+ struct nouveau_vma vma;
bool mappable;
bool no_vm;
@@ -96,7 +112,6 @@ struct nouveau_bo {
struct nouveau_tile_reg *tile;
struct drm_gem_object *gem;
- struct drm_file *cpu_filp;
int pin_refcnt;
};
@@ -133,20 +148,28 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_SW 0
#define NVOBJ_ENGINE_GR 1
-#define NVOBJ_ENGINE_DISPLAY 2
+#define NVOBJ_ENGINE_PPP 2
+#define NVOBJ_ENGINE_COPY 3
+#define NVOBJ_ENGINE_VP 4
+#define NVOBJ_ENGINE_CRYPT 5
+#define NVOBJ_ENGINE_BSP 6
+#define NVOBJ_ENGINE_DISPLAY 0xcafe0001
#define NVOBJ_ENGINE_INT 0xdeadbeef
+#define NVOBJ_FLAG_DONT_MAP (1 << 0)
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
+#define NVOBJ_FLAG_VM (1 << 3)
+
+#define NVOBJ_CINST_GLOBAL 0xdeadbeef
+
struct nouveau_gpuobj {
struct drm_device *dev;
struct kref refcount;
struct list_head list;
- struct drm_mm_node *im_pramin;
- struct nouveau_bo *im_backing;
- uint32_t *im_backing_suspend;
- int im_bound;
+ void *node;
+ u32 *suspend;
uint32_t flags;
@@ -162,10 +185,29 @@ struct nouveau_gpuobj {
void *priv;
};
+struct nouveau_page_flip_state {
+ struct list_head head;
+ struct drm_pending_vblank_event *event;
+ int crtc, bpp, pitch, x, y;
+ uint64_t offset;
+};
+
+enum nouveau_channel_mutex_class {
+ NOUVEAU_UCHANNEL_MUTEX,
+ NOUVEAU_KCHANNEL_MUTEX
+};
+
struct nouveau_channel {
struct drm_device *dev;
int id;
+ /* references to the channel data structure */
+ struct kref ref;
+ /* users of the hardware channel resources, the hardware
+ * context will be kicked off when it reaches zero. */
+ atomic_t users;
+ struct mutex mutex;
+
/* owner of this fifo */
struct drm_file *file_priv;
/* mapping of the fifo itself */
@@ -198,16 +240,17 @@ struct nouveau_channel {
/* PFIFO context */
struct nouveau_gpuobj *ramfc;
struct nouveau_gpuobj *cache;
+ void *fifo_priv;
/* PGRAPH context */
/* XXX may be merge 2 pointers as private data ??? */
struct nouveau_gpuobj *ramin_grctx;
+ struct nouveau_gpuobj *crypt_ctx;
void *pgraph_ctx;
/* NV50 VM */
+ struct nouveau_vm *vm;
struct nouveau_gpuobj *vm_pd;
- struct nouveau_gpuobj *vm_gart_pt;
- struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
/* Objects */
struct nouveau_gpuobj *ramin; /* Private instmem */
@@ -238,9 +281,11 @@ struct nouveau_channel {
struct {
struct nouveau_gpuobj *vblsem;
+ uint32_t vblsem_head;
uint32_t vblsem_offset;
uint32_t vblsem_rval;
struct list_head vbl_wait;
+ struct list_head flip;
} nvsw;
struct {
@@ -258,11 +303,11 @@ struct nouveau_instmem_engine {
int (*suspend)(struct drm_device *dev);
void (*resume)(struct drm_device *dev);
- int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
- void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
- int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
- int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
+ int (*get)(struct nouveau_gpuobj *, u32 size, u32 align);
+ void (*put)(struct nouveau_gpuobj *);
+ int (*map)(struct nouveau_gpuobj *);
+ void (*unmap)(struct nouveau_gpuobj *);
+
void (*flush)(struct drm_device *);
};
@@ -279,15 +324,21 @@ struct nouveau_timer_engine {
struct nouveau_fb_engine {
int num_tiles;
+ struct drm_mm tag_heap;
+ void *priv;
int (*init)(struct drm_device *dev);
void (*takedown)(struct drm_device *dev);
- void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch);
+ void (*init_tile_region)(struct drm_device *dev, int i,
+ uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+ void (*set_tile_region)(struct drm_device *dev, int i);
+ void (*free_tile_region)(struct drm_device *dev, int i);
};
struct nouveau_fifo_engine {
+ void *priv;
int channels;
struct nouveau_gpuobj *playlist[2];
@@ -310,22 +361,11 @@ struct nouveau_fifo_engine {
void (*tlb_flush)(struct drm_device *dev);
};
-struct nouveau_pgraph_object_method {
- int id;
- int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
- uint32_t data);
-};
-
-struct nouveau_pgraph_object_class {
- int id;
- bool software;
- struct nouveau_pgraph_object_method *methods;
-};
-
struct nouveau_pgraph_engine {
- struct nouveau_pgraph_object_class *grclass;
bool accel_blocked;
+ bool registered;
int grctx_size;
+ void *priv;
/* NV2x/NV3x context table (0x400780) */
struct nouveau_gpuobj *ctx_table;
@@ -342,8 +382,7 @@ struct nouveau_pgraph_engine {
int (*unload_context)(struct drm_device *);
void (*tlb_flush)(struct drm_device *dev);
- void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch);
+ void (*set_tile_region)(struct drm_device *dev, int i);
};
struct nouveau_display_engine {
@@ -355,13 +394,19 @@ struct nouveau_display_engine {
};
struct nouveau_gpio_engine {
+ void *priv;
+
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
int (*get)(struct drm_device *, enum dcb_gpio_tag);
int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
- void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+ int (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+ void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+ bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
};
struct nouveau_pm_voltage_level {
@@ -437,6 +482,7 @@ struct nouveau_pm_engine {
struct nouveau_pm_level *cur;
struct device *hwmon;
+ struct notifier_block acpi_nb;
int (*clock_get)(struct drm_device *, u32 id);
void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
@@ -449,6 +495,25 @@ struct nouveau_pm_engine {
int (*temp_get)(struct drm_device *);
};
+struct nouveau_crypt_engine {
+ bool registered;
+
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+ int (*create_context)(struct nouveau_channel *);
+ void (*destroy_context)(struct nouveau_channel *);
+ void (*tlb_flush)(struct drm_device *dev);
+};
+
+struct nouveau_vram_engine {
+ int (*init)(struct drm_device *);
+ int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
+ u32 type, struct nouveau_vram **);
+ void (*put)(struct drm_device *, struct nouveau_vram **);
+
+ bool (*flags_valid)(struct drm_device *, u32 tile_flags);
+};
+
struct nouveau_engine {
struct nouveau_instmem_engine instmem;
struct nouveau_mc_engine mc;
@@ -459,6 +524,8 @@ struct nouveau_engine {
struct nouveau_display_engine display;
struct nouveau_gpio_engine gpio;
struct nouveau_pm_engine pm;
+ struct nouveau_crypt_engine crypt;
+ struct nouveau_vram_engine vram;
};
struct nouveau_pll_vals {
@@ -577,18 +644,15 @@ struct drm_nouveau_private {
bool ramin_available;
struct drm_mm ramin_heap;
struct list_head gpuobj_list;
+ struct list_head classes;
struct nouveau_bo *vga_ram;
+ /* interrupt handling */
+ void (*irq_handler[32])(struct drm_device *);
+ bool msi_enabled;
struct workqueue_struct *wq;
struct work_struct irq_work;
- struct work_struct hpd_work;
-
- struct {
- spinlock_t lock;
- uint32_t hpd0_bits;
- uint32_t hpd1_bits;
- } hpd_state;
struct list_head vbl_waiting;
@@ -605,8 +669,10 @@ struct drm_nouveau_private {
struct nouveau_bo *bo;
} fence;
- int fifo_alloc_count;
- struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
+ struct {
+ spinlock_t lock;
+ struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
+ } channels;
struct nouveau_engine engine;
struct nouveau_channel *channel;
@@ -632,12 +698,14 @@ struct drm_nouveau_private {
uint64_t aper_free;
struct nouveau_gpuobj *sg_ctxdma;
- struct page *sg_dummy_page;
- dma_addr_t sg_dummy_bus;
+ struct nouveau_vma vma;
} gart_info;
/* nv10-nv40 tiling regions */
- struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
+ struct {
+ struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
+ spinlock_t lock;
+ } tile;
/* VRAM/fb configuration */
uint64_t vram_size;
@@ -650,14 +718,12 @@ struct drm_nouveau_private {
uint64_t fb_aper_free;
int fb_mtrr;
+ /* BAR control (NV50-) */
+ struct nouveau_vm *bar1_vm;
+ struct nouveau_vm *bar3_vm;
+
/* G8x/G9x virtual address space */
- uint64_t vm_gart_base;
- uint64_t vm_gart_size;
- uint64_t vm_vram_base;
- uint64_t vm_vram_size;
- uint64_t vm_end;
- struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
- int vm_vram_pt_nr;
+ struct nouveau_vm *chan_vm;
struct nvbios vbios;
@@ -674,6 +740,7 @@ struct drm_nouveau_private {
struct backlight_device *backlight;
struct nouveau_channel *evo;
+ u32 evo_alloc;
struct {
struct dcb_entry *dcb;
u16 script;
@@ -686,6 +753,8 @@ struct drm_nouveau_private {
struct nouveau_fbdev *nfbdev;
struct apertures_struct *apertures;
+
+ bool powered_down;
};
static inline struct drm_nouveau_private *
@@ -719,16 +788,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
return 0;
}
-#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
- struct drm_nouveau_private *nv = dev->dev_private; \
- if (!nouveau_channel_owner(dev, (cl), (id))) { \
- NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
- DRM_CURRENTPID, (id)); \
- return -EPERM; \
- } \
- (ch) = nv->fifos[(id)]; \
-} while (0)
-
/* nouveau_drv.c */
extern int nouveau_agpmode;
extern int nouveau_duallink;
@@ -748,6 +807,7 @@ extern int nouveau_force_post;
extern int nouveau_override_conntype;
extern char *nouveau_perflvl;
extern int nouveau_perflvl_wr;
+extern int nouveau_msi;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -762,8 +822,10 @@ extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
struct drm_file *);
-extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
- uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val);
extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
@@ -775,18 +837,18 @@ extern void nouveau_mem_gart_fini(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
extern int nouveau_mem_reset_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
-extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
- uint32_t addr,
- uint32_t size,
- uint32_t pitch);
-extern void nv10_mem_expire_tiling(struct drm_device *dev,
- struct nouveau_tile_reg *tile,
- struct nouveau_fence *fence);
-extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
- uint32_t size, uint32_t flags,
- uint64_t phys);
-extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
- uint32_t size);
+extern int nouveau_mem_detect(struct drm_device *);
+extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
+extern struct nouveau_tile_reg *nv10_mem_set_tiling(
+ struct drm_device *dev, uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+extern void nv10_mem_put_tile_region(struct drm_device *dev,
+ struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence);
+extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
+
+/* nvc0_vram.c */
+extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
/* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -803,21 +865,44 @@ extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
extern struct drm_ioctl_desc nouveau_ioctls[];
extern int nouveau_max_ioctl;
extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
-extern int nouveau_channel_owner(struct drm_device *, struct drm_file *,
- int channel);
extern int nouveau_channel_alloc(struct drm_device *dev,
struct nouveau_channel **chan,
struct drm_file *file_priv,
uint32_t fb_ctxdma, uint32_t tt_ctxdma);
-extern void nouveau_channel_free(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
+extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
+extern void nouveau_channel_put(struct nouveau_channel **);
+extern void nouveau_channel_ref(struct nouveau_channel *chan,
+ struct nouveau_channel **pchan);
+extern void nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_object.c */
+#define NVOBJ_CLASS(d,c,e) do { \
+ int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
+ if (ret) \
+ return ret; \
+} while(0)
+
+#define NVOBJ_MTHD(d,c,m,e) do { \
+ int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
+ if (ret) \
+ return ret; \
+} while(0)
+
extern int nouveau_gpuobj_early_init(struct drm_device *);
extern int nouveau_gpuobj_init(struct drm_device *);
extern void nouveau_gpuobj_takedown(struct drm_device *);
extern int nouveau_gpuobj_suspend(struct drm_device *dev);
-extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
extern void nouveau_gpuobj_resume(struct drm_device *dev);
+extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
+extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
+ int (*exec)(struct nouveau_channel *,
+ u32 class, u32 mthd, u32 data));
+extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
+extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
uint32_t vram_h, uint32_t tt_h);
extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
@@ -832,21 +917,25 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
uint64_t offset, uint64_t size, int access,
int target, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
- uint64_t offset, uint64_t size,
- int access, struct nouveau_gpuobj **,
- uint32_t *o_ret);
-extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
- struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
- struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
+extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
+ u64 size, int target, int access, u32 type,
+ u32 comp, struct nouveau_gpuobj **pobj);
+extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
+ int class, u64 base, u64 size, int target,
+ int access, u32 type, u32 comp);
extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
struct drm_file *);
/* nouveau_irq.c */
+extern int nouveau_irq_init(struct drm_device *);
+extern void nouveau_irq_fini(struct drm_device *);
extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void nouveau_irq_register(struct drm_device *, int status_bit,
+ void (*)(struct drm_device *));
+extern void nouveau_irq_unregister(struct drm_device *, int status_bit);
extern void nouveau_irq_preinstall(struct drm_device *);
extern int nouveau_irq_postinstall(struct drm_device *);
extern void nouveau_irq_uninstall(struct drm_device *);
@@ -854,8 +943,8 @@ extern void nouveau_irq_uninstall(struct drm_device *);
/* nouveau_sgdma.c */
extern int nouveau_sgdma_init(struct drm_device *);
extern void nouveau_sgdma_takedown(struct drm_device *);
-extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
- uint32_t *page);
+extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
+ uint32_t offset);
extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
/* nouveau_debugfs.c */
@@ -966,18 +1055,25 @@ extern void nv04_fb_takedown(struct drm_device *);
/* nv10_fb.c */
extern int nv10_fb_init(struct drm_device *);
extern void nv10_fb_takedown(struct drm_device *);
-extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
+ uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
+extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
/* nv30_fb.c */
extern int nv30_fb_init(struct drm_device *);
extern void nv30_fb_takedown(struct drm_device *);
+extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
+ uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
/* nv40_fb.c */
extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
-extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
+
/* nv50_fb.c */
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
@@ -989,6 +1085,7 @@ extern void nvc0_fb_takedown(struct drm_device *);
/* nv04_fifo.c */
extern int nv04_fifo_init(struct drm_device *);
+extern void nv04_fifo_fini(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
extern void nv04_fifo_enable(struct drm_device *);
extern bool nv04_fifo_reassign(struct drm_device *, bool);
@@ -998,19 +1095,18 @@ extern int nv04_fifo_create_context(struct nouveau_channel *);
extern void nv04_fifo_destroy_context(struct nouveau_channel *);
extern int nv04_fifo_load_context(struct nouveau_channel *);
extern int nv04_fifo_unload_context(struct drm_device *);
+extern void nv04_fifo_isr(struct drm_device *);
/* nv10_fifo.c */
extern int nv10_fifo_init(struct drm_device *);
extern int nv10_fifo_channel_id(struct drm_device *);
extern int nv10_fifo_create_context(struct nouveau_channel *);
-extern void nv10_fifo_destroy_context(struct nouveau_channel *);
extern int nv10_fifo_load_context(struct nouveau_channel *);
extern int nv10_fifo_unload_context(struct drm_device *);
/* nv40_fifo.c */
extern int nv40_fifo_init(struct drm_device *);
extern int nv40_fifo_create_context(struct nouveau_channel *);
-extern void nv40_fifo_destroy_context(struct nouveau_channel *);
extern int nv40_fifo_load_context(struct nouveau_channel *);
extern int nv40_fifo_unload_context(struct drm_device *);
@@ -1038,7 +1134,6 @@ extern int nvc0_fifo_load_context(struct nouveau_channel *);
extern int nvc0_fifo_unload_context(struct drm_device *);
/* nv04_graph.c */
-extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
extern int nv04_graph_init(struct drm_device *);
extern void nv04_graph_takedown(struct drm_device *);
extern void nv04_graph_fifo_access(struct drm_device *, bool);
@@ -1047,10 +1142,11 @@ extern int nv04_graph_create_context(struct nouveau_channel *);
extern void nv04_graph_destroy_context(struct nouveau_channel *);
extern int nv04_graph_load_context(struct nouveau_channel *);
extern int nv04_graph_unload_context(struct drm_device *);
-extern void nv04_graph_context_switch(struct drm_device *);
+extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data);
+extern struct nouveau_bitfield nv04_graph_nsource[];
/* nv10_graph.c */
-extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
extern int nv10_graph_init(struct drm_device *);
extern void nv10_graph_takedown(struct drm_device *);
extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
@@ -1058,13 +1154,11 @@ extern int nv10_graph_create_context(struct nouveau_channel *);
extern void nv10_graph_destroy_context(struct nouveau_channel *);
extern int nv10_graph_load_context(struct nouveau_channel *);
extern int nv10_graph_unload_context(struct drm_device *);
-extern void nv10_graph_context_switch(struct drm_device *);
-extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
+extern struct nouveau_bitfield nv10_graph_intr[];
+extern struct nouveau_bitfield nv10_graph_nstatus[];
/* nv20_graph.c */
-extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
-extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
extern int nv20_graph_create_context(struct nouveau_channel *);
extern void nv20_graph_destroy_context(struct nouveau_channel *);
extern int nv20_graph_load_context(struct nouveau_channel *);
@@ -1072,11 +1166,9 @@ extern int nv20_graph_unload_context(struct drm_device *);
extern int nv20_graph_init(struct drm_device *);
extern void nv20_graph_takedown(struct drm_device *);
extern int nv30_graph_init(struct drm_device *);
-extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
/* nv40_graph.c */
-extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
extern int nv40_graph_init(struct drm_device *);
extern void nv40_graph_takedown(struct drm_device *);
extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
@@ -1085,11 +1177,9 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
extern int nv40_graph_load_context(struct nouveau_channel *);
extern int nv40_graph_unload_context(struct drm_device *);
extern void nv40_grctx_init(struct nouveau_grctx *);
-extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
- uint32_t, uint32_t);
+extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
/* nv50_graph.c */
-extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
extern int nv50_graph_init(struct drm_device *);
extern void nv50_graph_takedown(struct drm_device *);
extern void nv50_graph_fifo_access(struct drm_device *, bool);
@@ -1098,10 +1188,10 @@ extern int nv50_graph_create_context(struct nouveau_channel *);
extern void nv50_graph_destroy_context(struct nouveau_channel *);
extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
-extern void nv50_graph_context_switch(struct drm_device *);
extern int nv50_grctx_init(struct nouveau_grctx *);
extern void nv50_graph_tlb_flush(struct drm_device *dev);
extern void nv86_graph_tlb_flush(struct drm_device *dev);
+extern struct nouveau_enum nv50_data_error_names[];
/* nvc0_graph.c */
extern int nvc0_graph_init(struct drm_device *);
@@ -1113,16 +1203,22 @@ extern void nvc0_graph_destroy_context(struct nouveau_channel *);
extern int nvc0_graph_load_context(struct nouveau_channel *);
extern int nvc0_graph_unload_context(struct drm_device *);
+/* nv84_crypt.c */
+extern int nv84_crypt_init(struct drm_device *dev);
+extern void nv84_crypt_fini(struct drm_device *dev);
+extern int nv84_crypt_create_context(struct nouveau_channel *);
+extern void nv84_crypt_destroy_context(struct nouveau_channel *);
+extern void nv84_crypt_tlb_flush(struct drm_device *dev);
+
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *);
extern int nv04_instmem_suspend(struct drm_device *);
extern void nv04_instmem_resume(struct drm_device *);
-extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
-extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv04_instmem_put(struct nouveau_gpuobj *);
+extern int nv04_instmem_map(struct nouveau_gpuobj *);
+extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
extern void nv04_instmem_flush(struct drm_device *);
/* nv50_instmem.c */
@@ -1130,26 +1226,18 @@ extern int nv50_instmem_init(struct drm_device *);
extern void nv50_instmem_takedown(struct drm_device *);
extern int nv50_instmem_suspend(struct drm_device *);
extern void nv50_instmem_resume(struct drm_device *);
-extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
-extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv50_instmem_put(struct nouveau_gpuobj *);
+extern int nv50_instmem_map(struct nouveau_gpuobj *);
+extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
extern void nv50_instmem_flush(struct drm_device *);
extern void nv84_instmem_flush(struct drm_device *);
-extern void nv50_vm_flush(struct drm_device *, int engine);
/* nvc0_instmem.c */
extern int nvc0_instmem_init(struct drm_device *);
extern void nvc0_instmem_takedown(struct drm_device *);
extern int nvc0_instmem_suspend(struct drm_device *);
extern void nvc0_instmem_resume(struct drm_device *);
-extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
- uint32_t *size);
-extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
-extern void nvc0_instmem_flush(struct drm_device *);
/* nv04_mc.c */
extern int nv04_mc_init(struct drm_device *);
@@ -1219,6 +1307,9 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
+extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu);
/* nouveau_fence.c */
struct nouveau_fence;
@@ -1234,12 +1325,35 @@ extern void nouveau_fence_work(struct nouveau_fence *fence,
void (*work)(void *priv, bool signalled),
void *priv);
struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
-extern bool nouveau_fence_signalled(void *obj, void *arg);
-extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+
+extern bool __nouveau_fence_signalled(void *obj, void *arg);
+extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int __nouveau_fence_flush(void *obj, void *arg);
+extern void __nouveau_fence_unref(void **obj);
+extern void *__nouveau_fence_ref(void *obj);
+
+static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
+{
+ return __nouveau_fence_signalled(obj, NULL);
+}
+static inline int
+nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
+{
+ return __nouveau_fence_wait(obj, NULL, lazy, intr);
+}
extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-extern int nouveau_fence_flush(void *obj, void *arg);
-extern void nouveau_fence_unref(void **obj);
-extern void *nouveau_fence_ref(void *obj);
+static inline int nouveau_fence_flush(struct nouveau_fence *obj)
+{
+ return __nouveau_fence_flush(obj, NULL);
+}
+static inline void nouveau_fence_unref(struct nouveau_fence **obj)
+{
+ __nouveau_fence_unref((void **)obj);
+}
+static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
+{
+ return __nouveau_fence_ref(obj);
+}
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
@@ -1259,15 +1373,28 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
+/* nouveau_display.c */
+int nouveau_vblank_enable(struct drm_device *dev, int crtc);
+void nouveau_vblank_disable(struct drm_device *dev, int crtc);
+int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
+int nouveau_finish_page_flip(struct nouveau_channel *,
+ struct nouveau_page_flip_state *);
+
/* nv10_gpio.c */
int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
/* nv50_gpio.c */
int nv50_gpio_init(struct drm_device *dev);
+void nv50_gpio_fini(struct drm_device *dev);
int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
+int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
+ void (*)(void *, int), void *);
+bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
/* nv50_calc. */
int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
@@ -1334,7 +1461,9 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
}
#define nv_wait(dev, reg, mask, val) \
- nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
+ nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
+#define nv_wait_ne(dev, reg, mask, val) \
+ nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
/* PRAMIN access */
static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1447,6 +1576,23 @@ nv_match_device(struct drm_device *dev, unsigned device,
dev->pdev->subsystem_device == sub_device;
}
+/* memory type/access flags, do not match hardware values */
+#define NV_MEM_ACCESS_RO 1
+#define NV_MEM_ACCESS_WO 2
+#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
+#define NV_MEM_ACCESS_SYS 4
+#define NV_MEM_ACCESS_VM 8
+
+#define NV_MEM_TARGET_VRAM 0
+#define NV_MEM_TARGET_PCI 1
+#define NV_MEM_TARGET_PCI_NOSNOOP 2
+#define NV_MEM_TARGET_VM 3
+#define NV_MEM_TARGET_GART 4
+
+#define NV_MEM_TYPE_VM 0x7f
+#define NV_MEM_COMP_VM 0x03
+
+/* NV_SW object class */
#define NV_SW 0x0000506e
#define NV_SW_DMA_SEMAPHORE 0x00000060
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
@@ -1457,5 +1603,6 @@ nv_match_device(struct drm_device *dev, unsigned device,
#define NV_SW_VBLSEM_OFFSET 0x00000400
#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
#define NV_SW_VBLSEM_RELEASE 0x00000408
+#define NV_SW_PAGE_FLIP 0x00000500
#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 02a4d1fd484..a26d04740c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -49,6 +49,102 @@
#include "nouveau_fbcon.h"
#include "nouveau_dma.h"
+static void
+nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&dev_priv->channel->mutex)) {
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_fillrect(info, rect);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_fillrect(info, rect);
+ else
+ ret = nvc0_fbcon_fillrect(info, rect);
+ mutex_unlock(&dev_priv->channel->mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ cfb_fillrect(info, rect);
+}
+
+static void
+nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&dev_priv->channel->mutex)) {
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_copyarea(info, image);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_copyarea(info, image);
+ else
+ ret = nvc0_fbcon_copyarea(info, image);
+ mutex_unlock(&dev_priv->channel->mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ cfb_copyarea(info, image);
+}
+
+static void
+nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ ret = -ENODEV;
+ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ mutex_trylock(&dev_priv->channel->mutex)) {
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_imageblit(info, image);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_imageblit(info, image);
+ else
+ ret = nvc0_fbcon_imageblit(info, image);
+ mutex_unlock(&dev_priv->channel->mutex);
+ }
+
+ if (ret == 0)
+ return;
+
+ if (ret != -ENODEV)
+ nouveau_fbcon_gpu_lockup(info);
+ cfb_imageblit(info, image);
+}
+
static int
nouveau_fbcon_sync(struct fb_info *info)
{
@@ -58,22 +154,36 @@ nouveau_fbcon_sync(struct fb_info *info)
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
- if (!chan || !chan->accel_done ||
+ if (!chan || !chan->accel_done || in_interrupt() ||
info->state != FBINFO_STATE_RUNNING ||
info->flags & FBINFO_HWACCEL_DISABLED)
return 0;
- if (RING_SPACE(chan, 4)) {
+ if (!mutex_trylock(&chan->mutex))
+ return 0;
+
+ ret = RING_SPACE(chan, 4);
+ if (ret) {
+ mutex_unlock(&chan->mutex);
nouveau_fbcon_gpu_lockup(info);
return 0;
}
- BEGIN_RING(chan, 0, 0x0104, 1);
- OUT_RING(chan, 0);
- BEGIN_RING(chan, 0, 0x0100, 1);
- OUT_RING(chan, 0);
+ if (dev_priv->card_type >= NV_C0) {
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
+ OUT_RING (chan, 0);
+ } else {
+ BEGIN_RING(chan, 0, 0x0104, 1);
+ OUT_RING (chan, 0);
+ BEGIN_RING(chan, 0, 0x0100, 1);
+ OUT_RING (chan, 0);
+ }
+
nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
FIRE_RING(chan);
+ mutex_unlock(&chan->mutex);
ret = -EBUSY;
for (i = 0; i < 100000; i++) {
@@ -97,9 +207,9 @@ static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = nouveau_fbcon_fillrect,
+ .fb_copyarea = nouveau_fbcon_copyarea,
+ .fb_imageblit = nouveau_fbcon_imageblit,
.fb_sync = nouveau_fbcon_sync,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
@@ -108,29 +218,13 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
-static struct fb_ops nv04_fbcon_ops = {
+static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = nv04_fbcon_fillrect,
- .fb_copyarea = nv04_fbcon_copyarea,
- .fb_imageblit = nv04_fbcon_imageblit,
- .fb_sync = nouveau_fbcon_sync,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
-};
-
-static struct fb_ops nv50_fbcon_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = nv50_fbcon_fillrect,
- .fb_copyarea = nv50_fbcon_copyarea,
- .fb_imageblit = nv50_fbcon_imageblit,
- .fb_sync = nouveau_fbcon_sync,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -257,21 +351,16 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
- info->fbops = &nouveau_fbcon_ops;
- info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
- dev_priv->vm_vram_base;
+ info->fbops = &nouveau_fbcon_sw_ops;
+ info->fix.smem_start = dev->mode_config.fb_base +
+ (nvbo->bo.mem.start << PAGE_SHIFT);
info->fix.smem_len = size;
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
info->screen_size = size;
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
- /* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(pdev, 1);
- info->fix.mmio_len = pci_resource_len(pdev, 1);
-
/* Set aperture base/size for vesafb takeover */
info->apertures = dev_priv->apertures;
if (!info->apertures) {
@@ -285,19 +374,20 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
+ mutex_unlock(&dev->struct_mutex);
+
if (dev_priv->channel && !nouveau_nofbaccel) {
- switch (dev_priv->card_type) {
- case NV_C0:
- break;
- case NV_50:
- nv50_fbcon_accel_init(info);
- info->fbops = &nv50_fbcon_ops;
- break;
- default:
- nv04_fbcon_accel_init(info);
- info->fbops = &nv04_fbcon_ops;
- break;
- };
+ ret = -ENODEV;
+ if (dev_priv->card_type < NV_50)
+ ret = nv04_fbcon_accel_init(info);
+ else
+ if (dev_priv->card_type < NV_C0)
+ ret = nv50_fbcon_accel_init(info);
+ else
+ ret = nvc0_fbcon_accel_init(info);
+
+ if (ret == 0)
+ info->fbops = &nouveau_fbcon_ops;
}
nouveau_fbcon_zfill(dev, nfbdev);
@@ -308,7 +398,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
nouveau_fb->base.height,
nvbo->bo.offset, nvbo);
- mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index e7e12684c37..b73c29f87fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,15 +40,21 @@ struct nouveau_fbdev {
void nouveau_fbcon_restore(void);
-void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv04_fbcon_accel_init(struct fb_info *info);
-void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+
+int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
+int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nvc0_fbcon_accel_init(struct fb_info *info);
+
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
int nouveau_fbcon_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ab1bbfbf266..221b8462ea3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,7 +32,8 @@
#include "nouveau_dma.h"
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
+ nouveau_private(dev)->card_type < NV_C0)
struct nouveau_fence {
struct nouveau_channel *channel;
@@ -64,6 +65,7 @@ nouveau_fence_del(struct kref *ref)
struct nouveau_fence *fence =
container_of(ref, struct nouveau_fence, refcount);
+ nouveau_channel_ref(NULL, &fence->channel);
kfree(fence);
}
@@ -76,14 +78,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
spin_lock(&chan->fence.lock);
- if (USE_REFCNT(dev))
- sequence = nvchan_rd32(chan, 0x48);
- else
- sequence = atomic_read(&chan->fence.last_sequence_irq);
+ /* Fetch the last sequence if the channel is still up and running */
+ if (likely(!list_empty(&chan->fence.pending))) {
+ if (USE_REFCNT(dev))
+ sequence = nvchan_rd32(chan, 0x48);
+ else
+ sequence = atomic_read(&chan->fence.last_sequence_irq);
- if (chan->fence.sequence_ack == sequence)
- goto out;
- chan->fence.sequence_ack = sequence;
+ if (chan->fence.sequence_ack == sequence)
+ goto out;
+ chan->fence.sequence_ack = sequence;
+ }
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
sequence = fence->sequence;
@@ -113,13 +118,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
if (!fence)
return -ENOMEM;
kref_init(&fence->refcount);
- fence->channel = chan;
+ nouveau_channel_ref(chan, &fence->channel);
if (emit)
ret = nouveau_fence_emit(fence);
if (ret)
- nouveau_fence_unref((void *)&fence);
+ nouveau_fence_unref(&fence);
*pfence = fence;
return ret;
}
@@ -127,7 +132,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
struct nouveau_channel *
nouveau_fence_channel(struct nouveau_fence *fence)
{
- return fence ? fence->channel : NULL;
+ return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
}
int
@@ -135,6 +140,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
ret = RING_SPACE(chan, 2);
@@ -155,8 +161,15 @@ nouveau_fence_emit(struct nouveau_fence *fence)
list_add_tail(&fence->entry, &chan->fence.pending);
spin_unlock(&chan->fence.lock);
- BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
- OUT_RING(chan, fence->sequence);
+ if (USE_REFCNT(dev)) {
+ if (dev_priv->card_type < NV_C0)
+ BEGIN_RING(chan, NvSubSw, 0x0050, 1);
+ else
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
+ } else {
+ BEGIN_RING(chan, NvSubSw, 0x0150, 1);
+ }
+ OUT_RING (chan, fence->sequence);
FIRE_RING(chan);
return 0;
@@ -182,7 +195,7 @@ nouveau_fence_work(struct nouveau_fence *fence,
}
void
-nouveau_fence_unref(void **sync_obj)
+__nouveau_fence_unref(void **sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(*sync_obj);
@@ -192,7 +205,7 @@ nouveau_fence_unref(void **sync_obj)
}
void *
-nouveau_fence_ref(void *sync_obj)
+__nouveau_fence_ref(void *sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
@@ -201,7 +214,7 @@ nouveau_fence_ref(void *sync_obj)
}
bool
-nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
struct nouveau_channel *chan = fence->channel;
@@ -214,13 +227,14 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg)
}
int
-nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
{
unsigned long timeout = jiffies + (3 * DRM_HZ);
+ unsigned long sleep_time = jiffies + 1;
int ret = 0;
while (1) {
- if (nouveau_fence_signalled(sync_obj, sync_arg))
+ if (__nouveau_fence_signalled(sync_obj, sync_arg))
break;
if (time_after_eq(jiffies, timeout)) {
@@ -230,7 +244,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
__set_current_state(intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE);
- if (lazy)
+ if (lazy && time_after_eq(jiffies, sleep_time))
schedule_timeout(1);
if (intr && signal_pending(current)) {
@@ -368,7 +382,7 @@ emit_semaphore(struct nouveau_channel *chan, int method,
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
- nouveau_fence_unref((void *)&fence);
+ nouveau_fence_unref(&fence);
return 0;
}
@@ -380,33 +394,49 @@ nouveau_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *chan = nouveau_fence_channel(fence);
struct drm_device *dev = wchan->dev;
struct nouveau_semaphore *sema;
- int ret;
+ int ret = 0;
- if (likely(!fence || chan == wchan ||
- nouveau_fence_signalled(fence, NULL)))
- return 0;
+ if (likely(!chan || chan == wchan ||
+ nouveau_fence_signalled(fence)))
+ goto out;
sema = alloc_semaphore(dev);
if (!sema) {
/* Early card or broken userspace, fall back to
* software sync. */
- return nouveau_fence_wait(fence, NULL, false, false);
+ ret = nouveau_fence_wait(fence, true, false);
+ goto out;
+ }
+
+ /* try to take chan's mutex, if we can't take it right away
+ * we have to fallback to software sync to prevent locking
+ * order issues
+ */
+ if (!mutex_trylock(&chan->mutex)) {
+ ret = nouveau_fence_wait(fence, true, false);
+ goto out_unref;
}
/* Make wchan wait until it gets signalled */
ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
if (ret)
- goto out;
+ goto out_unlock;
/* Signal the semaphore from chan */
ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
-out:
+
+out_unlock:
+ mutex_unlock(&chan->mutex);
+out_unref:
kref_put(&sema->ref, free_semaphore);
+out:
+ if (chan)
+ nouveau_channel_put_unlocked(&chan);
return ret;
}
int
-nouveau_fence_flush(void *sync_obj, void *sync_arg)
+__nouveau_fence_flush(void *sync_obj, void *sync_arg)
{
return 0;
}
@@ -420,30 +450,27 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
int ret;
/* Create an NV_SW object for various sync purposes */
- ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
+ ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
- ret = nouveau_ramht_insert(chan, NvSw, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret)
- return ret;
-
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, 0, 1);
- OUT_RING(chan, NvSw);
+ /* we leave subchannel empty for nvc0 */
+ if (dev_priv->card_type < NV_C0) {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
+ }
/* Create a DMA object for the shared cross-channel sync area. */
if (USE_SEMA(dev)) {
- struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
+ struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
mem->start << PAGE_SHIFT,
- mem->size << PAGE_SHIFT,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_VIDMEM, &obj);
+ mem->size, NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
if (ret)
return ret;
@@ -473,6 +500,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
{
struct nouveau_fence *tmp, *fence;
+ spin_lock(&chan->fence.lock);
+
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true;
list_del(&fence->entry);
@@ -482,6 +511,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
kref_put(&fence->refcount, nouveau_fence_del);
}
+
+ spin_unlock(&chan->fence.lock);
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9a1fdcf400c..506c508b7ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -48,9 +48,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
nvbo->gem = NULL;
- if (unlikely(nvbo->cpu_filp))
- ttm_bo_synccpu_write_release(bo);
-
if (unlikely(nvbo->pin_refcnt)) {
nvbo->pin_refcnt = 1;
nouveau_bo_unpin(nvbo);
@@ -106,32 +103,6 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
return 0;
}
-static bool
-nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (dev_priv->card_type >= NV_50) {
- switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
- case 0x0000:
- case 0x1800:
- case 0x2800:
- case 0x4800:
- case 0x7000:
- case 0x7400:
- case 0x7a00:
- case 0xe000:
- return true;
- }
- } else {
- if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
- return true;
- }
-
- NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
- return false;
-}
-
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -146,11 +117,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
- if (req->channel_hint) {
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
- file_priv, chan);
- }
-
if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= TTM_PL_FLAG_VRAM;
if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
@@ -158,13 +124,23 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
- if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
+ if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
+ NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
+ }
+
+ if (req->channel_hint) {
+ chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+ }
ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
req->info.tile_mode, req->info.tile_flags, false,
(req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
&nvbo);
+ if (chan)
+ nouveau_channel_put(&chan);
if (ret)
return ret;
@@ -231,15 +207,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
list_for_each_safe(entry, tmp, list) {
nvbo = list_entry(entry, struct nouveau_bo, entry);
- if (likely(fence)) {
- struct nouveau_fence *prev_fence;
-
- spin_lock(&nvbo->bo.lock);
- prev_fence = nvbo->bo.sync_obj;
- nvbo->bo.sync_obj = nouveau_fence_ref(fence);
- spin_unlock(&nvbo->bo.lock);
- nouveau_fence_unref((void *)&prev_fence);
- }
+
+ nouveau_bo_fence(nvbo, fence);
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
@@ -299,14 +268,15 @@ retry:
return -EINVAL;
}
- ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
+ ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
if (ret) {
validate_fini(op, NULL);
- if (ret == -EAGAIN)
- ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
+ if (unlikely(ret == -EAGAIN))
+ ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
drm_gem_object_unreference_unlocked(gem);
- if (ret) {
- NV_ERROR(dev, "fail reserve\n");
+ if (unlikely(ret)) {
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "fail reserve\n");
return ret;
}
goto retry;
@@ -331,25 +301,6 @@ retry:
validate_fini(op, NULL);
return -EINVAL;
}
-
- if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
- validate_fini(op, NULL);
-
- if (nvbo->cpu_filp == file_priv) {
- NV_ERROR(dev, "bo %p mapped by process trying "
- "to validate it!\n", nvbo);
- return -EINVAL;
- }
-
- mutex_unlock(&drm_global_mutex);
- ret = ttm_bo_wait_cpu(&nvbo->bo, false);
- mutex_lock(&drm_global_mutex);
- if (ret) {
- NV_ERROR(dev, "fail wait_cpu\n");
- return ret;
- }
- goto retry;
- }
}
return 0;
@@ -383,11 +334,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
}
nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
- ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
- false, false, false);
+ ret = nouveau_bo_validate(nvbo, true, false, false);
nvbo->channel = NULL;
if (unlikely(ret)) {
- NV_ERROR(dev, "fail ttm_validate\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "fail ttm_validate\n");
return ret;
}
@@ -439,13 +390,15 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
- NV_ERROR(dev, "validate_init\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate_init\n");
return ret;
}
ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
- NV_ERROR(dev, "validate vram_list\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -453,7 +406,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
- NV_ERROR(dev, "validate gart_list\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -461,7 +415,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
- NV_ERROR(dev, "validate both_list\n");
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -557,9 +512,9 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
data |= r->vor;
}
- spin_lock(&nvbo->bo.lock);
+ spin_lock(&nvbo->bo.bdev->fence_lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.lock);
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
if (ret) {
NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
break;
@@ -585,7 +540,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+ chan = nouveau_channel_get(dev, file_priv, req->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
req->vram_available = dev_priv->fb_aper_free;
req->gart_available = dev_priv->gart_info.aper_free;
@@ -595,28 +552,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
+ nouveau_channel_put(&chan);
return -EINVAL;
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
+ nouveau_channel_put(&chan);
return -EINVAL;
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
+ nouveau_channel_put(&chan);
return -EINVAL;
}
push = u_memcpya(req->push, req->nr_push, sizeof(*push));
- if (IS_ERR(push))
+ if (IS_ERR(push)) {
+ nouveau_channel_put(&chan);
return PTR_ERR(push);
+ }
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
if (IS_ERR(bo)) {
kfree(push);
+ nouveau_channel_put(&chan);
return PTR_ERR(bo);
}
@@ -639,7 +602,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
req->nr_buffers, &op, &do_reloc);
if (ret) {
- NV_ERROR(dev, "validate: %d\n", ret);
+ if (ret != -ERESTARTSYS)
+ NV_ERROR(dev, "validate: %d\n", ret);
goto out;
}
@@ -732,7 +696,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
out:
validate_fini(&op, fence);
- nouveau_fence_unref((void**)&fence);
+ nouveau_fence_unref(&fence);
kfree(bo);
kfree(push);
@@ -750,6 +714,7 @@ out_next:
req->suffix1 = 0x00000000;
}
+ nouveau_channel_put(&chan);
return ret;
}
@@ -781,26 +746,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- if (nvbo->cpu_filp) {
- if (nvbo->cpu_filp == file_priv)
- goto out;
-
- ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
- if (ret)
- goto out;
- }
-
- if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
- spin_lock(&nvbo->bo.lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
- spin_unlock(&nvbo->bo.lock);
- } else {
- ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
- if (ret == 0)
- nvbo->cpu_filp = file_priv;
- }
-
-out:
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
drm_gem_object_unreference_unlocked(gem);
return ret;
}
@@ -809,26 +757,7 @@ int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_nouveau_gem_cpu_prep *req = data;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- int ret = -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem)
- return -ENOENT;
- nvbo = nouveau_gem_object(gem);
-
- if (nvbo->cpu_filp != file_priv)
- goto out;
- nvbo->cpu_filp = NULL;
-
- ttm_bo_synccpu_write_release(&nvbo->bo);
- ret = 0;
-
-out:
- drm_gem_object_unreference_unlocked(gem);
- return ret;
+ return 0;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index b9672a05c41..053edf9d2f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -953,7 +953,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
- if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
+ if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
else
NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
@@ -999,8 +999,8 @@ nv_load_state_ext(struct drm_device *dev, int head,
if (dev_priv->card_type == NV_10) {
/* Not waiting for vertical retrace before modifying
CRE_53/CRE_54 causes lockups. */
- nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
- nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+ nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+ nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
}
wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
@@ -1017,8 +1017,9 @@ nv_load_state_ext(struct drm_device *dev, int head,
NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
- /* Setting 1 on this value gives you interrupts for every vblank period. */
- NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
+ /* Enable vblank interrupts. */
+ NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
+ (dev->vblank_enabled[head] ? 1 : 0));
NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 7bfd9e6c9d6..2ba7265bc96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -36,18 +36,7 @@
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_ramht.h"
-#include <linux/ratelimit.h>
-
-/* needed for hotplug irq */
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
-
-static int nouveau_ratelimit(void)
-{
- return __ratelimit(&nouveau_ratelimit_state);
-}
+#include "nouveau_util.h"
void
nouveau_irq_preinstall(struct drm_device *dev)
@@ -57,19 +46,19 @@ nouveau_irq_preinstall(struct drm_device *dev)
/* Master disable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
- if (dev_priv->card_type >= NV_50) {
- INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
- INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
- spin_lock_init(&dev_priv->hpd_state.lock);
- INIT_LIST_HEAD(&dev_priv->vbl_waiting);
- }
+ INIT_LIST_HEAD(&dev_priv->vbl_waiting);
}
int
nouveau_irq_postinstall(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
/* Master enable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
+ if (dev_priv->msi_enabled)
+ nv_wr08(dev, 0x00088068, 0xff);
+
return 0;
}
@@ -80,1178 +69,83 @@ nouveau_irq_uninstall(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
}
-static int
-nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_pgraph_object_method *grm;
- struct nouveau_pgraph_object_class *grc;
-
- grc = dev_priv->engine.graph.grclass;
- while (grc->id) {
- if (grc->id == class)
- break;
- grc++;
- }
-
- if (grc->id != class || !grc->methods)
- return -ENOENT;
-
- grm = grc->methods;
- while (grm->id) {
- if (grm->id == mthd)
- return grm->exec(chan, class, mthd, data);
- grm++;
- }
-
- return -ENOENT;
-}
-
-static bool
-nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
-{
- struct drm_device *dev = chan->dev;
- const int subc = (addr >> 13) & 0x7;
- const int mthd = addr & 0x1ffc;
-
- if (mthd == 0x0000) {
- struct nouveau_gpuobj *gpuobj;
-
- gpuobj = nouveau_ramht_find(chan, data);
- if (!gpuobj)
- return false;
-
- if (gpuobj->engine != NVOBJ_ENGINE_SW)
- return false;
-
- chan->sw_subchannel[subc] = gpuobj->class;
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
- NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
- return true;
- }
-
- /* hw object */
- if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
- return false;
-
- if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
- return false;
-
- return true;
-}
-
-static void
-nouveau_fifo_irq_handler(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- uint32_t status, reassign;
- int cnt = 0;
-
- reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
- while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
- struct nouveau_channel *chan = NULL;
- uint32_t chid, get;
-
- nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-
- chid = engine->fifo.channel_id(dev);
- if (chid >= 0 && chid < engine->fifo.channels)
- chan = dev_priv->fifos[chid];
- get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
-
- if (status & NV_PFIFO_INTR_CACHE_ERROR) {
- uint32_t mthd, data;
- int ptr;
-
- /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
- * wrapping on my G80 chips, but CACHE1 isn't big
- * enough for this much data.. Tests show that it
- * wraps around to the start at GET=0x800.. No clue
- * as to why..
- */
- ptr = (get & 0x7ff) >> 2;
-
- if (dev_priv->card_type < NV_40) {
- mthd = nv_rd32(dev,
- NV04_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(dev,
- NV04_PFIFO_CACHE1_DATA(ptr));
- } else {
- mthd = nv_rd32(dev,
- NV40_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(dev,
- NV40_PFIFO_CACHE1_DATA(ptr));
- }
-
- if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
- NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
- "Mthd 0x%04x Data 0x%08x\n",
- chid, (mthd >> 13) & 7, mthd & 0x1ffc,
- data);
- }
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
- nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-
- status &= ~NV_PFIFO_INTR_CACHE_ERROR;
- }
-
- if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- u32 dma_get = nv_rd32(dev, 0x003244);
- u32 dma_put = nv_rd32(dev, 0x003240);
- u32 push = nv_rd32(dev, 0x003220);
- u32 state = nv_rd32(dev, 0x003228);
-
- if (dev_priv->card_type == NV_50) {
- u32 ho_get = nv_rd32(dev, 0x003328);
- u32 ho_put = nv_rd32(dev, 0x003320);
- u32 ib_get = nv_rd32(dev, 0x003334);
- u32 ib_put = nv_rd32(dev, 0x003330);
-
- if (nouveau_ratelimit())
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
- "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x Push 0x%08x\n",
- chid, ho_get, dma_get, ho_put,
- dma_put, ib_get, ib_put, state,
- push);
-
- /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
- nv_wr32(dev, 0x003364, 0x00000000);
- if (dma_get != dma_put || ho_get != ho_put) {
- nv_wr32(dev, 0x003244, dma_put);
- nv_wr32(dev, 0x003328, ho_put);
- } else
- if (ib_get != ib_put) {
- nv_wr32(dev, 0x003334, ib_put);
- }
- } else {
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
- "Put 0x%08x State 0x%08x Push 0x%08x\n",
- chid, dma_get, dma_put, state, push);
-
- if (dma_get != dma_put)
- nv_wr32(dev, 0x003244, dma_put);
- }
-
- nv_wr32(dev, 0x003228, 0x00000000);
- nv_wr32(dev, 0x003220, 0x00000001);
- nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
- status &= ~NV_PFIFO_INTR_DMA_PUSHER;
- }
-
- if (status & NV_PFIFO_INTR_SEMAPHORE) {
- uint32_t sem;
-
- status &= ~NV_PFIFO_INTR_SEMAPHORE;
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_SEMAPHORE);
-
- sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
- }
-
- if (dev_priv->card_type == NV_50) {
- if (status & 0x00000010) {
- nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
- status &= ~0x00000010;
- nv_wr32(dev, 0x002100, 0x00000010);
- }
- }
-
- if (status) {
- if (nouveau_ratelimit())
- NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
- status, chid);
- nv_wr32(dev, NV03_PFIFO_INTR_0, status);
- status = 0;
- }
-
- nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
- }
-
- if (status) {
- NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
- nv_wr32(dev, 0x2140, 0);
- nv_wr32(dev, 0x140, 0);
- }
-
- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
-}
-
-struct nouveau_bitfield_names {
- uint32_t mask;
- const char *name;
-};
-
-static struct nouveau_bitfield_names nstatus_names[] =
-{
- { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
- { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
- { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
- { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nstatus_names_nv10[] =
-{
- { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
- { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
- { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
- { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nsource_names[] =
-{
- { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
- { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
- { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
- { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
- { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
- { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
- { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
- { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
- { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
- { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
- { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
- { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
- { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
-};
-
-static void
-nouveau_print_bitfield_names_(uint32_t value,
- const struct nouveau_bitfield_names *namelist,
- const int namelist_len)
-{
- /*
- * Caller must have already printed the KERN_* log level for us.
- * Also the caller is responsible for adding the newline.
- */
- int i;
- for (i = 0; i < namelist_len; ++i) {
- uint32_t mask = namelist[i].mask;
- if (value & mask) {
- printk(" %s", namelist[i].name);
- value &= ~mask;
- }
- }
- if (value)
- printk(" (unknown bits 0x%08x)", value);
-}
-#define nouveau_print_bitfield_names(val, namelist) \
- nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-struct nouveau_enum_names {
- uint32_t value;
- const char *name;
-};
-
-static void
-nouveau_print_enum_names_(uint32_t value,
- const struct nouveau_enum_names *namelist,
- const int namelist_len)
-{
- /*
- * Caller must have already printed the KERN_* log level for us.
- * Also the caller is responsible for adding the newline.
- */
- int i;
- for (i = 0; i < namelist_len; ++i) {
- if (value == namelist[i].value) {
- printk("%s", namelist[i].name);
- return;
- }
- }
- printk("unknown value 0x%08x", value);
-}
-#define nouveau_print_enum_names(val, namelist) \
- nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-static int
-nouveau_graph_chid_from_grctx(struct drm_device *dev)
+irqreturn_t
+nouveau_irq_handler(DRM_IRQ_ARGS)
{
+ struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t inst;
+ unsigned long flags;
+ u32 stat;
int i;
- if (dev_priv->card_type < NV_40)
- return dev_priv->engine.fifo.channels;
- else
- if (dev_priv->card_type < NV_50) {
- inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
-
- if (!chan || !chan->ramin_grctx)
- continue;
-
- if (inst == chan->ramin_grctx->pinst)
- break;
- }
- } else {
- inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
-
- if (!chan || !chan->ramin)
- continue;
-
- if (inst == chan->ramin->vinst)
- break;
- }
- }
-
-
- return i;
-}
-
-static int
-nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- int channel;
-
- if (dev_priv->card_type < NV_10)
- channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
- else
- if (dev_priv->card_type < NV_40)
- channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
- else
- channel = nouveau_graph_chid_from_grctx(dev);
-
- if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
- NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
- return -EINVAL;
- }
-
- *channel_ret = channel;
- return 0;
-}
-
-struct nouveau_pgraph_trap {
- int channel;
- int class;
- int subc, mthd, size;
- uint32_t data, data2;
- uint32_t nsource, nstatus;
-};
-
-static void
-nouveau_graph_trap_info(struct drm_device *dev,
- struct nouveau_pgraph_trap *trap)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t address;
-
- trap->nsource = trap->nstatus = 0;
- if (dev_priv->card_type < NV_50) {
- trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
- }
-
- if (nouveau_graph_trapped_channel(dev, &trap->channel))
- trap->channel = -1;
- address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
-
- trap->mthd = address & 0x1FFC;
- trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- if (dev_priv->card_type < NV_10) {
- trap->subc = (address >> 13) & 0x7;
- } else {
- trap->subc = (address >> 16) & 0x7;
- trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
- }
-
- if (dev_priv->card_type < NV_10)
- trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
- else if (dev_priv->card_type < NV_40)
- trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
- else if (dev_priv->card_type < NV_50)
- trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
- else
- trap->class = nv_rd32(dev, 0x400814);
-}
-
-static void
-nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
- struct nouveau_pgraph_trap *trap)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
-
- if (dev_priv->card_type < NV_50) {
- NV_INFO(dev, "%s - nSource:", id);
- nouveau_print_bitfield_names(nsource, nsource_names);
- printk(", nStatus:");
- if (dev_priv->card_type < NV_10)
- nouveau_print_bitfield_names(nstatus, nstatus_names);
- else
- nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
- printk("\n");
- }
-
- NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
- "Data 0x%08x:0x%08x\n",
- id, trap->channel, trap->subc,
- trap->class, trap->mthd,
- trap->data2, trap->data);
-}
-
-static int
-nouveau_pgraph_intr_swmthd(struct drm_device *dev,
- struct nouveau_pgraph_trap *trap)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (trap->channel < 0 ||
- trap->channel >= dev_priv->engine.fifo.channels ||
- !dev_priv->fifos[trap->channel])
- return -ENODEV;
-
- return nouveau_call_method(dev_priv->fifos[trap->channel],
- trap->class, trap->mthd, trap->data);
-}
-
-static inline void
-nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
-{
- struct nouveau_pgraph_trap trap;
- int unhandled = 0;
+ stat = nv_rd32(dev, NV03_PMC_INTR_0);
+ if (!stat)
+ return IRQ_NONE;
- nouveau_graph_trap_info(dev, &trap);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ for (i = 0; i < 32 && stat; i++) {
+ if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
+ continue;
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (nouveau_pgraph_intr_swmthd(dev, &trap))
- unhandled = 1;
- } else {
- unhandled = 1;
+ dev_priv->irq_handler[i](dev);
+ stat &= ~(1 << i);
}
- if (unhandled)
- nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
-}
-
-
-static inline void
-nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
-{
- struct nouveau_pgraph_trap trap;
- int unhandled = 0;
-
- nouveau_graph_trap_info(dev, &trap);
- trap.nsource = nsource;
-
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (nouveau_pgraph_intr_swmthd(dev, &trap))
- unhandled = 1;
- } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
- uint32_t v = nv_rd32(dev, 0x402000);
- nv_wr32(dev, 0x402000, v);
-
- /* dump the error anyway for now: it's useful for
- Gallium development */
- unhandled = 1;
- } else {
- unhandled = 1;
- }
+ if (dev_priv->msi_enabled)
+ nv_wr08(dev, 0x00088068, 0xff);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- if (unhandled && nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
+ if (stat && nouveau_ratelimit())
+ NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
+ return IRQ_HANDLED;
}
-static inline void
-nouveau_pgraph_intr_context_switch(struct drm_device *dev)
+int
+nouveau_irq_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- uint32_t chid;
-
- chid = engine->fifo.channel_id(dev);
- NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
-
- switch (dev_priv->card_type) {
- case NV_04:
- nv04_graph_context_switch(dev);
- break;
- case NV_10:
- nv10_graph_context_switch(dev);
- break;
- default:
- NV_ERROR(dev, "Context switch not implemented\n");
- break;
- }
-}
-
-static void
-nouveau_pgraph_irq_handler(struct drm_device *dev)
-{
- uint32_t status;
-
- while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-
- if (status & NV_PGRAPH_INTR_NOTIFY) {
- nouveau_pgraph_intr_notify(dev, nsource);
-
- status &= ~NV_PGRAPH_INTR_NOTIFY;
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
- }
-
- if (status & NV_PGRAPH_INTR_ERROR) {
- nouveau_pgraph_intr_error(dev, nsource);
+ int ret;
- status &= ~NV_PGRAPH_INTR_ERROR;
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
+ if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
+ ret = pci_enable_msi(dev->pdev);
+ if (ret == 0) {
+ NV_INFO(dev, "enabled MSI\n");
+ dev_priv->msi_enabled = true;
}
-
- if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- nv_wr32(dev, NV03_PGRAPH_INTR,
- NV_PGRAPH_INTR_CONTEXT_SWITCH);
-
- nouveau_pgraph_intr_context_switch(dev);
- }
-
- if (status) {
- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
- nv_wr32(dev, NV03_PGRAPH_INTR, status);
- }
-
- if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
- nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
}
- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
-}
-
-static struct nouveau_enum_names nv50_mp_exec_error_names[] =
-{
- { 3, "STACK_UNDERFLOW" },
- { 4, "QUADON_ACTIVE" },
- { 8, "TIMEOUT" },
- { 0x10, "INVALID_OPCODE" },
- { 0x40, "BREAKPOINT" },
-};
-
-static void
-nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t units = nv_rd32(dev, 0x1540);
- uint32_t addr, mp10, status, pc, oplow, ophigh;
- int i;
- int mps = 0;
- for (i = 0; i < 4; i++) {
- if (!(units & 1 << (i+24)))
- continue;
- if (dev_priv->chipset < 0xa0)
- addr = 0x408200 + (tpid << 12) + (i << 7);
- else
- addr = 0x408100 + (tpid << 11) + (i << 7);
- mp10 = nv_rd32(dev, addr + 0x10);
- status = nv_rd32(dev, addr + 0x14);
- if (!status)
- continue;
- if (display) {
- nv_rd32(dev, addr + 0x20);
- pc = nv_rd32(dev, addr + 0x24);
- oplow = nv_rd32(dev, addr + 0x70);
- ophigh= nv_rd32(dev, addr + 0x74);
- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
- "TP %d MP %d: ", tpid, i);
- nouveau_print_enum_names(status,
- nv50_mp_exec_error_names);
- printk(" at %06x warp %d, opcode %08x %08x\n",
- pc&0xffffff, pc >> 24,
- oplow, ophigh);
- }
- nv_wr32(dev, addr + 0x10, mp10);
- nv_wr32(dev, addr + 0x14, 0);
- mps++;
- }
- if (!mps && display)
- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
- "No MPs claiming errors?\n", tpid);
+ return drm_irq_install(dev);
}
-static void
-nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
- uint32_t ustatus_new, int display, const char *name)
+void
+nouveau_irq_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int tps = 0;
- uint32_t units = nv_rd32(dev, 0x1540);
- int i, r;
- uint32_t ustatus_addr, ustatus;
- for (i = 0; i < 16; i++) {
- if (!(units & (1 << i)))
- continue;
- if (dev_priv->chipset < 0xa0)
- ustatus_addr = ustatus_old + (i << 12);
- else
- ustatus_addr = ustatus_new + (i << 11);
- ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
- if (!ustatus)
- continue;
- tps++;
- switch (type) {
- case 6: /* texture error... unknown for now */
- nv50_fb_vm_trap(dev, display, name);
- if (display) {
- NV_ERROR(dev, "magic set %d:\n", i);
- for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- }
- break;
- case 7: /* MP error */
- if (ustatus & 0x00010000) {
- nv50_pgraph_mp_trap(dev, i, display);
- ustatus &= ~0x00010000;
- }
- break;
- case 8: /* TPDMA error */
- {
- uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
- uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
- uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
- uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
- uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
- uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
- uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
- nv50_fb_vm_trap(dev, display, name);
- /* 2d engine destination */
- if (ustatus & 0x00000010) {
- if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000010;
- }
- /* Render target */
- if (ustatus & 0x00000040) {
- if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000040;
- }
- /* CUDA memory: l[], g[] or stack. */
- if (ustatus & 0x00000080) {
- if (display) {
- if (e18 & 0x80000000) {
- /* g[] read fault? */
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 24) & 0x1f));
- e18 &= ~0x1f000000;
- } else if (e18 & 0xc) {
- /* g[] write fault? */
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 7) & 0x1f));
- e18 &= ~0x00000f80;
- } else {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
- i, e14, e10);
- }
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000080;
- }
- }
- break;
- }
- if (ustatus) {
- if (display)
- NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
- }
- nv_wr32(dev, ustatus_addr, 0xc0000000);
- }
-
- if (!tps && display)
- NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
-}
-
-static void
-nv50_pgraph_trap_handler(struct drm_device *dev)
-{
- struct nouveau_pgraph_trap trap;
- uint32_t status = nv_rd32(dev, 0x400108);
- uint32_t ustatus;
- int display = nouveau_ratelimit();
-
-
- if (!status && display) {
- nouveau_graph_trap_info(dev, &trap);
- nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
- NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
- }
-
- /* DISPATCH: Relays commands to other units and handles NOTIFY,
- * COND, QUERY. If you get a trap from it, the command is still stuck
- * in DISPATCH and you need to do something about it. */
- if (status & 0x001) {
- ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
- }
-
- /* Known to be triggered by screwed up NOTIFY and COND... */
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
- nv_wr32(dev, 0x400500, 0);
- if (nv_rd32(dev, 0x400808) & 0x80000000) {
- if (display) {
- if (nouveau_graph_trapped_channel(dev, &trap.channel))
- trap.channel = -1;
- trap.class = nv_rd32(dev, 0x400814);
- trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
- trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
- trap.data = nv_rd32(dev, 0x40080c);
- trap.data2 = nv_rd32(dev, 0x400810);
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
- }
- nv_wr32(dev, 0x400808, 0);
- } else if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
- }
- nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
- nv_wr32(dev, 0x400848, 0);
- ustatus &= ~0x00000001;
- }
- if (ustatus & 0x00000002) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
- nv_wr32(dev, 0x400500, 0);
- if (nv_rd32(dev, 0x40084c) & 0x80000000) {
- if (display) {
- if (nouveau_graph_trapped_channel(dev, &trap.channel))
- trap.channel = -1;
- trap.class = nv_rd32(dev, 0x400814);
- trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
- trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
- trap.data = nv_rd32(dev, 0x40085c);
- trap.data2 = 0;
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
- }
- nv_wr32(dev, 0x40084c, 0);
- } else if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
- }
- ustatus &= ~0x00000002;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x400804, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x001);
- status &= ~0x001;
- }
-
- /* TRAPs other than dispatch use the "normal" trap regs. */
- if (status && display) {
- nouveau_graph_trap_info(dev, &trap);
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_TRAP", &trap);
- }
-
- /* M2MF: Memory to memory copy engine. */
- if (status & 0x002) {
- ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
- ustatus &= ~0x00000001;
- }
- if (ustatus & 0x00000002) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
- ustatus &= ~0x00000002;
- }
- if (ustatus & 0x00000004) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
- ustatus &= ~0x00000004;
- }
- NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x406804),
- nv_rd32(dev, 0x406808),
- nv_rd32(dev, 0x40680c),
- nv_rd32(dev, 0x406810));
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
- /* No sane way found yet -- just reset the bugger. */
- nv_wr32(dev, 0x400040, 2);
- nv_wr32(dev, 0x400040, 0);
- nv_wr32(dev, 0x406800, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x002);
- status &= ~0x002;
- }
-
- /* VFETCH: Fetches data from vertex buffers. */
- if (status & 0x004) {
- ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
- NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x400c00),
- nv_rd32(dev, 0x400c08),
- nv_rd32(dev, 0x400c0c),
- nv_rd32(dev, 0x400c10));
- ustatus &= ~0x00000001;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x400c04, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x004);
- status &= ~0x004;
- }
-
- /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
- if (status & 0x008) {
- ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
- NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x401804),
- nv_rd32(dev, 0x401808),
- nv_rd32(dev, 0x40180c),
- nv_rd32(dev, 0x401810));
- ustatus &= ~0x00000001;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
- /* No sane way found yet -- just reset the bugger. */
- nv_wr32(dev, 0x400040, 0x80);
- nv_wr32(dev, 0x400040, 0);
- nv_wr32(dev, 0x401800, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x008);
- status &= ~0x008;
- }
-
- /* CCACHE: Handles code and c[] caches and fills them. */
- if (status & 0x010) {
- ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
- }
- if (ustatus & 0x00000001) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
- NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x405800),
- nv_rd32(dev, 0x405804),
- nv_rd32(dev, 0x405808),
- nv_rd32(dev, 0x40580c),
- nv_rd32(dev, 0x405810),
- nv_rd32(dev, 0x405814),
- nv_rd32(dev, 0x40581c));
- ustatus &= ~0x00000001;
- }
- if (ustatus && display)
- NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x405018, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x010);
- status &= ~0x010;
- }
-
- /* Unknown, not seen yet... 0x402000 is the only trap status reg
- * remaining, so try to handle it anyway. Perhaps related to that
- * unknown DMA slot on tesla? */
- if (status & 0x20) {
- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
- ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
- if (display)
- NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
- nv_wr32(dev, 0x402000, 0xc0000000);
- /* no status modifiction on purpose */
- }
-
- /* TEXTURE: CUDA texturing units */
- if (status & 0x040) {
- nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
- "PGRAPH_TRAP_TEXTURE");
- nv_wr32(dev, 0x400108, 0x040);
- status &= ~0x040;
- }
-
- /* MP: CUDA execution engines. */
- if (status & 0x080) {
- nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
- "PGRAPH_TRAP_MP");
- nv_wr32(dev, 0x400108, 0x080);
- status &= ~0x080;
- }
-
- /* TPDMA: Handles TP-initiated uncached memory accesses:
- * l[], g[], stack, 2d surfaces, render targets. */
- if (status & 0x100) {
- nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
- "PGRAPH_TRAP_TPDMA");
- nv_wr32(dev, 0x400108, 0x100);
- status &= ~0x100;
- }
-
- if (status) {
- if (display)
- NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
- status);
- nv_wr32(dev, 0x400108, status);
- }
-}
-
-/* There must be a *lot* of these. Will take some time to gather them up. */
-static struct nouveau_enum_names nv50_data_error_names[] =
-{
- { 4, "INVALID_VALUE" },
- { 5, "INVALID_ENUM" },
- { 8, "INVALID_OBJECT" },
- { 0xc, "INVALID_BITFIELD" },
- { 0x28, "MP_NO_REG_SPACE" },
- { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
-};
-
-static void
-nv50_pgraph_irq_handler(struct drm_device *dev)
-{
- struct nouveau_pgraph_trap trap;
- int unhandled = 0;
- uint32_t status;
-
- while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- /* NOTIFY: You've set a NOTIFY an a command and it's done. */
- if (status & 0x00000001) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_NOTIFY", &trap);
- status &= ~0x00000001;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
- }
-
- /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
- * when you write 0x200 to 0x50c0 method 0x31c. */
- if (status & 0x00000002) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_COMPUTE_QUERY", &trap);
- status &= ~0x00000002;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
- }
-
- /* Unknown, never seen: 0x4 */
-
- /* ILLEGAL_MTHD: You used a wrong method for this class. */
- if (status & 0x00000010) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_pgraph_intr_swmthd(dev, &trap))
- unhandled = 1;
- if (unhandled && nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_ILLEGAL_MTHD", &trap);
- status &= ~0x00000010;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
- }
-
- /* ILLEGAL_CLASS: You used a wrong class. */
- if (status & 0x00000020) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_ILLEGAL_CLASS", &trap);
- status &= ~0x00000020;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
- }
-
- /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
- if (status & 0x00000040) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_DOUBLE_NOTIFY", &trap);
- status &= ~0x00000040;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
- }
-
- /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
- if (status & 0x00001000) {
- nv_wr32(dev, 0x400500, 0x00000000);
- nv_wr32(dev, NV03_PGRAPH_INTR,
- NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
- NV40_PGRAPH_INTR_EN) &
- ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, 0x400500, 0x00010001);
-
- nv50_graph_context_switch(dev);
-
- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- }
-
- /* BUFFER_NOTIFY: Your m2mf transfer finished */
- if (status & 0x00010000) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_BUFFER_NOTIFY", &trap);
- status &= ~0x00010000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
- }
-
- /* DATA_ERROR: Invalid value for this method, or invalid
- * state in current PGRAPH context for this operation */
- if (status & 0x00100000) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit()) {
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_DATA_ERROR", &trap);
- NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
- nouveau_print_enum_names(nv_rd32(dev, 0x400110),
- nv50_data_error_names);
- printk("\n");
- }
- status &= ~0x00100000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
- }
- /* TRAP: Something bad happened in the middle of command
- * execution. Has a billion types, subtypes, and even
- * subsubtypes. */
- if (status & 0x00200000) {
- nv50_pgraph_trap_handler(dev);
- status &= ~0x00200000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
- }
-
- /* Unknown, never seen: 0x00400000 */
-
- /* SINGLE_STEP: Happens on every method if you turned on
- * single stepping in 40008c */
- if (status & 0x01000000) {
- nouveau_graph_trap_info(dev, &trap);
- if (nouveau_ratelimit())
- nouveau_graph_dump_trap_info(dev,
- "PGRAPH_SINGLE_STEP", &trap);
- status &= ~0x01000000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
- }
-
- /* 0x02000000 happens when you pause a ctxprog...
- * but the only way this can happen that I know is by
- * poking the relevant MMIO register, and we don't
- * do that. */
-
- if (status) {
- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
- status);
- nv_wr32(dev, NV03_PGRAPH_INTR, status);
- }
-
- {
- const int isb = (1 << 16) | (1 << 0);
-
- if ((nv_rd32(dev, 0x400500) & isb) != isb)
- nv_wr32(dev, 0x400500,
- nv_rd32(dev, 0x400500) | isb);
- }
- }
-
- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
- if (nv_rd32(dev, 0x400824) & (1 << 31))
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+ drm_irq_uninstall(dev);
+ if (dev_priv->msi_enabled)
+ pci_disable_msi(dev->pdev);
}
-static void
-nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
+void
+nouveau_irq_register(struct drm_device *dev, int status_bit,
+ void (*handler)(struct drm_device *))
{
- if (crtc & 1)
- nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- if (crtc & 2)
- nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ dev_priv->irq_handler[status_bit] = handler;
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
-irqreturn_t
-nouveau_irq_handler(DRM_IRQ_ARGS)
+void
+nouveau_irq_unregister(struct drm_device *dev, int status_bit)
{
- struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t status;
unsigned long flags;
- status = nv_rd32(dev, NV03_PMC_INTR_0);
- if (!status)
- return IRQ_NONE;
-
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
- nouveau_fifo_irq_handler(dev);
- status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
- }
-
- if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
- if (dev_priv->card_type >= NV_50)
- nv50_pgraph_irq_handler(dev);
- else
- nouveau_pgraph_irq_handler(dev);
-
- status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
- }
-
- if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
- nouveau_crtc_irq_handler(dev, (status>>24)&3);
- status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
- }
-
- if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
- NV_PMC_INTR_0_NV50_I2C_PENDING)) {
- nv50_display_irq_handler(dev);
- status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
- NV_PMC_INTR_0_NV50_I2C_PENDING);
- }
-
- if (status)
- NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
-
+ dev_priv->irq_handler[status_bit] = NULL;
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fe4a30dc4b4..69044eb104b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -36,183 +36,112 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
/*
* NV10-NV40 tiling helpers
*/
static void
-nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv10_mem_update_tile_region(struct drm_device *dev,
+ struct nouveau_tile_reg *tile, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+ int i = tile - dev_priv->tile.reg;
+ unsigned long save;
- tile->addr = addr;
- tile->size = size;
- tile->used = !!pitch;
- nouveau_fence_unref((void **)&tile->fence);
+ nouveau_fence_unref(&tile->fence);
+ if (tile->pitch)
+ pfb->free_tile_region(dev, i);
+
+ if (pitch)
+ pfb->init_tile_region(dev, i, addr, size, pitch, flags);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, save);
pfifo->reassign(dev, false);
pfifo->cache_pull(dev, false);
nouveau_wait_for_idle(dev);
- pgraph->set_region_tiling(dev, i, addr, size, pitch);
- pfb->set_region_tiling(dev, i, addr, size, pitch);
+ pfb->set_tile_region(dev, i);
+ pgraph->set_tile_region(dev, i);
pfifo->cache_pull(dev, true);
pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
}
-struct nouveau_tile_reg *
-nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
- uint32_t pitch)
+static struct nouveau_tile_reg *
+nv10_mem_get_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nouveau_tile_reg *found = NULL;
- unsigned long i, flags;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- for (i = 0; i < pfb->num_tiles; i++) {
- struct nouveau_tile_reg *tile = &dev_priv->tile[i];
-
- if (tile->used)
- /* Tile region in use. */
- continue;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- if (tile->fence &&
- !nouveau_fence_signalled(tile->fence, NULL))
- /* Pending tile region. */
- continue;
-
- if (max(tile->addr, addr) <
- min(tile->addr + tile->size, addr + size))
- /* Kill an intersecting tile region. */
- nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
-
- if (pitch && !found) {
- /* Free tile region. */
- nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
- found = tile;
- }
- }
+ spin_lock(&dev_priv->tile.lock);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ if (!tile->used &&
+ (!tile->fence || nouveau_fence_signalled(tile->fence)))
+ tile->used = true;
+ else
+ tile = NULL;
- return found;
+ spin_unlock(&dev_priv->tile.lock);
+ return tile;
}
void
-nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
- struct nouveau_fence *fence)
-{
- if (fence) {
- /* Mark it as pending. */
- tile->fence = fence;
- nouveau_fence_ref(fence);
- }
-
- tile->used = false;
-}
-
-/*
- * NV50 VM helpers
- */
-int
-nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
- uint32_t flags, uint64_t phys)
+nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *pgt;
- unsigned block;
- int i;
- virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
- size = (size >> 16) << 1;
-
- phys |= ((uint64_t)flags << 32);
- phys |= 1;
- if (dev_priv->vram_sys_base) {
- phys += dev_priv->vram_sys_base;
- phys |= 0x30;
- }
-
- while (size) {
- unsigned offset_h = upper_32_bits(phys);
- unsigned offset_l = lower_32_bits(phys);
- unsigned pte, end;
-
- for (i = 7; i >= 0; i--) {
- block = 1 << (i + 1);
- if (size >= block && !(virt & (block - 1)))
- break;
+ if (tile) {
+ spin_lock(&dev_priv->tile.lock);
+ if (fence) {
+ /* Mark it as pending. */
+ tile->fence = fence;
+ nouveau_fence_ref(fence);
}
- offset_l |= (i << 7);
-
- phys += block << 15;
- size -= block;
-
- while (block) {
- pgt = dev_priv->vm_vram_pt[virt >> 14];
- pte = virt & 0x3ffe;
-
- end = pte + block;
- if (end > 16384)
- end = 16384;
- block -= (end - pte);
- virt += (end - pte);
-
- while (pte < end) {
- nv_wo32(pgt, (pte * 4) + 0, offset_l);
- nv_wo32(pgt, (pte * 4) + 4, offset_h);
- pte += 2;
- }
- }
- }
- dev_priv->engine.instmem.flush(dev);
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
- nv50_vm_flush(dev, 6);
- return 0;
+ tile->used = false;
+ spin_unlock(&dev_priv->tile.lock);
+ }
}
-void
-nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
+struct nouveau_tile_reg *
+nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *pgt;
- unsigned pages, pte, end;
-
- virt -= dev_priv->vm_vram_base;
- pages = (size >> 16) << 1;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nouveau_tile_reg *tile, *found = NULL;
+ int i;
- while (pages) {
- pgt = dev_priv->vm_vram_pt[virt >> 29];
- pte = (virt & 0x1ffe0000ULL) >> 15;
+ for (i = 0; i < pfb->num_tiles; i++) {
+ tile = nv10_mem_get_tile_region(dev, i);
- end = pte + pages;
- if (end > 16384)
- end = 16384;
- pages -= (end - pte);
- virt += (end - pte) << 15;
+ if (pitch && !found) {
+ found = tile;
+ continue;
- while (pte < end) {
- nv_wo32(pgt, (pte * 4), 0);
- pte++;
+ } else if (tile && tile->pitch) {
+ /* Kill an unused tile region. */
+ nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
}
+
+ nv10_mem_put_tile_region(dev, tile, NULL);
}
- dev_priv->engine.instmem.flush(dev);
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
- nv50_vm_flush(dev, 6);
+ if (found)
+ nv10_mem_update_tile_region(dev, found, addr, size,
+ pitch, flags);
+ return found;
}
/*
@@ -312,62 +241,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
return 0;
}
-static void
-nv50_vram_preinit(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i, parts, colbits, rowbitsa, rowbitsb, banks;
- u64 rowsize, predicted;
- u32 r0, r4, rt, ru;
-
- r0 = nv_rd32(dev, 0x100200);
- r4 = nv_rd32(dev, 0x100204);
- rt = nv_rd32(dev, 0x100250);
- ru = nv_rd32(dev, 0x001540);
- NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
- for (i = 0, parts = 0; i < 8; i++) {
- if (ru & (0x00010000 << i))
- parts++;
- }
-
- colbits = (r4 & 0x0000f000) >> 12;
- rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
- rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
- banks = ((r4 & 0x01000000) ? 8 : 4);
-
- rowsize = parts * banks * (1 << colbits) * 8;
- predicted = rowsize << rowbitsa;
- if (r0 & 0x00000004)
- predicted += rowsize << rowbitsb;
-
- if (predicted != dev_priv->vram_size) {
- NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
- (u32)(dev_priv->vram_size >> 20));
- NV_WARN(dev, "we calculated %dMiB VRAM\n",
- (u32)(predicted >> 20));
- }
-
- dev_priv->vram_rblock_size = rowsize >> 12;
- if (rt & 1)
- dev_priv->vram_rblock_size *= 3;
-
- NV_DEBUG(dev, "rblock %lld bytes\n",
- (u64)dev_priv->vram_rblock_size << 12);
-}
-
-static void
-nvaa_vram_preinit(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- /* To our knowledge, there's no large scale reordering of pages
- * that occurs on IGP chipsets.
- */
- dev_priv->vram_rblock_size = 1;
-}
-
-static int
+int
nouveau_mem_detect(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -381,33 +255,6 @@ nouveau_mem_detect(struct drm_device *dev)
if (dev_priv->card_type < NV_50) {
dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
- } else
- if (dev_priv->card_type < NV_C0) {
- dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
- dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
- dev_priv->vram_size &= 0xffffffff00ll;
-
- switch (dev_priv->chipset) {
- case 0xaa:
- case 0xac:
- case 0xaf:
- dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
- dev_priv->vram_sys_base <<= 12;
- nvaa_vram_preinit(dev);
- break;
- default:
- nv50_vram_preinit(dev);
- break;
- }
- } else {
- dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
- dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
- }
-
- NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
- if (dev_priv->vram_sys_base) {
- NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
- dev_priv->vram_sys_base);
}
if (dev_priv->vram_size)
@@ -415,6 +262,15 @@ nouveau_mem_detect(struct drm_device *dev)
return -ENOMEM;
}
+bool
+nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
+ return true;
+
+ return false;
+}
+
#if __OS_HAS_AGP
static unsigned long
get_agp_mode(struct drm_device *dev, unsigned long mode)
@@ -547,10 +403,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (ret)
return ret;
- ret = nouveau_mem_detect(dev);
- if (ret)
- return ret;
-
dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
ret = nouveau_ttm_global_init(dev_priv);
@@ -566,13 +418,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- dev_priv->fb_available_size = dev_priv->vram_size;
- dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
- if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
- dev_priv->fb_mappable_pages =
- pci_resource_len(dev->pdev, 1);
- dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
-
/* reserve space at end of VRAM for PRAMIN */
if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
@@ -583,6 +428,22 @@ nouveau_mem_vram_init(struct drm_device *dev)
else
dev_priv->ramin_rsvd_vram = (512 * 1024);
+ ret = dev_priv->engine.vram.init(dev);
+ if (ret)
+ return ret;
+
+ NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
+ if (dev_priv->vram_sys_base) {
+ NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
+ dev_priv->vram_sys_base);
+ }
+
+ dev_priv->fb_available_size = dev_priv->vram_size;
+ dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
+ if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
+ dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
+ dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
+
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_aper_free = dev_priv->fb_available_size;
@@ -799,3 +660,118 @@ nouveau_mem_timing_fini(struct drm_device *dev)
kfree(mem->timing);
}
+
+static int
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_mm *mm;
+ u32 b_size;
+ int ret;
+
+ p_size = (p_size << PAGE_SHIFT) >> 12;
+ b_size = dev_priv->vram_rblock_size >> 12;
+
+ ret = nouveau_mm_init(&mm, 0, p_size, b_size);
+ if (ret)
+ return ret;
+
+ man->priv = mm;
+ return 0;
+}
+
+static int
+nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+{
+ struct nouveau_mm *mm = man->priv;
+ int ret;
+
+ ret = nouveau_mm_fini(&mm);
+ if (ret)
+ return ret;
+
+ man->priv = NULL;
+ return 0;
+}
+
+static void
+nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct drm_device *dev = dev_priv->dev;
+
+ vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
+}
+
+static int
+nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct drm_device *dev = dev_priv->dev;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vram *node;
+ u32 size_nc = 0;
+ int ret;
+
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
+ size_nc = 1 << nvbo->vma.node->type;
+
+ ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
+ mem->page_alignment << PAGE_SHIFT, size_nc,
+ (nvbo->tile_flags >> 8) & 0xff, &node);
+ if (ret)
+ return ret;
+
+ node->page_shift = 12;
+ if (nvbo->vma.node)
+ node->page_shift = nvbo->vma.node->type;
+
+ mem->mm_node = node;
+ mem->start = node->offset >> PAGE_SHIFT;
+ return 0;
+}
+
+void
+nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
+ int i;
+
+ mutex_lock(&mm->mutex);
+ list_for_each_entry(r, &mm->nodes, nl_entry) {
+ printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
+ prefix, r->free ? "free" : "used", r->type,
+ ((u64)r->offset << 12),
+ (((u64)r->offset + r->length) << 12));
+ total += r->length;
+ ttotal[r->type] += r->length;
+ if (r->free)
+ tfree[r->type] += r->length;
+ else
+ tused[r->type] += r->length;
+ }
+ mutex_unlock(&mm->mutex);
+
+ printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12);
+ for (i = 0; i < 3; i++) {
+ printk(KERN_DEBUG "%s type %d: 0x%010llx, "
+ "used 0x%010llx, free 0x%010llx\n", prefix,
+ i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
+ }
+}
+
+const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+ nouveau_vram_manager_init,
+ nouveau_vram_manager_fini,
+ nouveau_vram_manager_new,
+ nouveau_vram_manager_del,
+ nouveau_vram_manager_debug
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
new file mode 100644
index 00000000000..cdbb11eb701
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static inline void
+region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
+{
+ list_del(&a->nl_entry);
+ list_del(&a->fl_entry);
+ kfree(a);
+}
+
+static struct nouveau_mm_node *
+region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
+{
+ struct nouveau_mm_node *b;
+
+ if (a->length == size)
+ return a;
+
+ b = kmalloc(sizeof(*b), GFP_KERNEL);
+ if (unlikely(b == NULL))
+ return NULL;
+
+ b->offset = a->offset;
+ b->length = size;
+ b->free = a->free;
+ b->type = a->type;
+ a->offset += size;
+ a->length -= size;
+ list_add_tail(&b->nl_entry, &a->nl_entry);
+ if (b->free)
+ list_add_tail(&b->fl_entry, &a->fl_entry);
+ return b;
+}
+
+static struct nouveau_mm_node *
+nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+{
+ struct nouveau_mm_node *prev, *next;
+
+ /* try to merge with free adjacent entries of same type */
+ prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
+ if (this->nl_entry.prev != &rmm->nodes) {
+ if (prev->free && prev->type == this->type) {
+ prev->length += this->length;
+ region_put(rmm, this);
+ this = prev;
+ }
+ }
+
+ next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
+ if (this->nl_entry.next != &rmm->nodes) {
+ if (next->free && next->type == this->type) {
+ next->offset = this->offset;
+ next->length += this->length;
+ region_put(rmm, this);
+ this = next;
+ }
+ }
+
+ return this;
+}
+
+void
+nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+{
+ u32 block_s, block_l;
+
+ this->free = true;
+ list_add(&this->fl_entry, &rmm->free);
+ this = nouveau_mm_merge(rmm, this);
+
+ /* any entirely free blocks now? we'll want to remove typing
+ * on them now so they can be use for any memory allocation
+ */
+ block_s = roundup(this->offset, rmm->block_size);
+ if (block_s + rmm->block_size > this->offset + this->length)
+ return;
+
+ /* split off any still-typed region at the start */
+ if (block_s != this->offset) {
+ if (!region_split(rmm, this, block_s - this->offset))
+ return;
+ }
+
+ /* split off the soon-to-be-untyped block(s) */
+ block_l = rounddown(this->length, rmm->block_size);
+ if (block_l != this->length) {
+ this = region_split(rmm, this, block_l);
+ if (!this)
+ return;
+ }
+
+ /* mark as having no type, and retry merge with any adjacent
+ * untyped blocks
+ */
+ this->type = 0;
+ nouveau_mm_merge(rmm, this);
+}
+
+int
+nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
+ u32 align, struct nouveau_mm_node **pnode)
+{
+ struct nouveau_mm_node *this, *tmp, *next;
+ u32 splitoff, avail, alloc;
+
+ list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
+ next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
+ if (this->nl_entry.next == &rmm->nodes)
+ next = NULL;
+
+ /* skip wrongly typed blocks */
+ if (this->type && this->type != type)
+ continue;
+
+ /* account for alignment */
+ splitoff = this->offset & (align - 1);
+ if (splitoff)
+ splitoff = align - splitoff;
+
+ if (this->length <= splitoff)
+ continue;
+
+ /* determine total memory available from this, and
+ * the next block (if appropriate)
+ */
+ avail = this->length;
+ if (next && next->free && (!next->type || next->type == type))
+ avail += next->length;
+
+ avail -= splitoff;
+
+ /* determine allocation size */
+ if (size_nc) {
+ alloc = min(avail, size);
+ alloc = rounddown(alloc, size_nc);
+ if (alloc == 0)
+ continue;
+ } else {
+ alloc = size;
+ if (avail < alloc)
+ continue;
+ }
+
+ /* untyped block, split off a chunk that's a multiple
+ * of block_size and type it
+ */
+ if (!this->type) {
+ u32 block = roundup(alloc + splitoff, rmm->block_size);
+ if (this->length < block)
+ continue;
+
+ this = region_split(rmm, this, block);
+ if (!this)
+ return -ENOMEM;
+
+ this->type = type;
+ }
+
+ /* stealing memory from adjacent block */
+ if (alloc > this->length) {
+ u32 amount = alloc - (this->length - splitoff);
+
+ if (!next->type) {
+ amount = roundup(amount, rmm->block_size);
+
+ next = region_split(rmm, next, amount);
+ if (!next)
+ return -ENOMEM;
+
+ next->type = type;
+ }
+
+ this->length += amount;
+ next->offset += amount;
+ next->length -= amount;
+ if (!next->length) {
+ list_del(&next->nl_entry);
+ list_del(&next->fl_entry);
+ kfree(next);
+ }
+ }
+
+ if (splitoff) {
+ if (!region_split(rmm, this, splitoff))
+ return -ENOMEM;
+ }
+
+ this = region_split(rmm, this, alloc);
+ if (this == NULL)
+ return -ENOMEM;
+
+ this->free = false;
+ list_del(&this->fl_entry);
+ *pnode = this;
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+int
+nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
+{
+ struct nouveau_mm *rmm;
+ struct nouveau_mm_node *heap;
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
+ return -ENOMEM;
+ heap->free = true;
+ heap->offset = roundup(offset, block);
+ heap->length = rounddown(offset + length, block) - heap->offset;
+
+ rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
+ if (!rmm) {
+ kfree(heap);
+ return -ENOMEM;
+ }
+ rmm->block_size = block;
+ mutex_init(&rmm->mutex);
+ INIT_LIST_HEAD(&rmm->nodes);
+ INIT_LIST_HEAD(&rmm->free);
+ list_add(&heap->nl_entry, &rmm->nodes);
+ list_add(&heap->fl_entry, &rmm->free);
+
+ *prmm = rmm;
+ return 0;
+}
+
+int
+nouveau_mm_fini(struct nouveau_mm **prmm)
+{
+ struct nouveau_mm *rmm = *prmm;
+ struct nouveau_mm_node *heap =
+ list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
+
+ if (!list_is_singular(&rmm->nodes))
+ return -EBUSY;
+
+ kfree(heap);
+ kfree(rmm);
+ *prmm = NULL;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
new file mode 100644
index 00000000000..af384493303
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_REGION_H__
+#define __NOUVEAU_REGION_H__
+
+struct nouveau_mm_node {
+ struct list_head nl_entry;
+ struct list_head fl_entry;
+ struct list_head rl_entry;
+
+ bool free;
+ int type;
+
+ u32 offset;
+ u32 length;
+};
+
+struct nouveau_mm {
+ struct list_head nodes;
+ struct list_head free;
+
+ struct mutex mutex;
+
+ u32 block_size;
+};
+
+int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
+int nouveau_mm_fini(struct nouveau_mm **);
+int nouveau_mm_pre(struct nouveau_mm *);
+int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
+ u32 align, struct nouveau_mm_node **);
+void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
+
+int nv50_vram_init(struct drm_device *);
+int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
+ u32 memtype, struct nouveau_vram **);
+void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
+bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+int nvc0_vram_init(struct drm_device *);
+int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
+ u32 memtype, struct nouveau_vram **);
+bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2cc59f8c658..fe29d604b82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -99,7 +99,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
int size, uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
uint32_t offset;
@@ -113,31 +112,15 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
return -ENOMEM;
}
- offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
- if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
- target = NV_DMA_TARGET_VIDMEM;
- } else
- if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
- if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
- dev_priv->card_type < NV_50) {
- ret = nouveau_sgdma_get_page(dev, offset, &offset);
- if (ret)
- return ret;
- target = NV_DMA_TARGET_PCI;
- } else {
- target = NV_DMA_TARGET_AGP;
- if (dev_priv->card_type >= NV_50)
- offset += dev_priv->vm_gart_base;
- }
- } else {
- NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
- chan->notifier_bo->bo.mem.mem_type);
- return -EINVAL;
- }
+ if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ target = NV_MEM_TARGET_VRAM;
+ else
+ target = NV_MEM_TARGET_GART;
+ offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
offset += mem->start;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
- mem->size, NV_DMA_ACCESS_RW, target,
+ mem->size, NV_MEM_ACCESS_RW, target,
&nobj);
if (ret) {
drm_mm_put_block(mem);
@@ -181,15 +164,20 @@ int
nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_notifierobj_alloc *na = data;
struct nouveau_channel *chan;
int ret;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
+ /* completely unnecessary for these chipsets... */
+ if (unlikely(dev_priv->card_type >= NV_C0))
+ return -EINVAL;
+
+ chan = nouveau_channel_get(dev, file_priv, na->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
- if (ret)
- return ret;
-
- return 0;
+ nouveau_channel_put(&chan);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index dd572adca02..30b6544467c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -35,6 +35,102 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_ramht.h"
+#include "nouveau_vm.h"
+
+struct nouveau_gpuobj_method {
+ struct list_head head;
+ u32 mthd;
+ int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
+};
+
+struct nouveau_gpuobj_class {
+ struct list_head head;
+ struct list_head methods;
+ u32 id;
+ u32 engine;
+};
+
+int
+nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_class *oc;
+
+ oc = kzalloc(sizeof(*oc), GFP_KERNEL);
+ if (!oc)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&oc->methods);
+ oc->id = class;
+ oc->engine = engine;
+ list_add(&oc->head, &dev_priv->classes);
+ return 0;
+}
+
+int
+nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
+ int (*exec)(struct nouveau_channel *, u32, u32, u32))
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_method *om;
+ struct nouveau_gpuobj_class *oc;
+
+ list_for_each_entry(oc, &dev_priv->classes, head) {
+ if (oc->id == class)
+ goto found;
+ }
+
+ return -EINVAL;
+
+found:
+ om = kzalloc(sizeof(*om), GFP_KERNEL);
+ if (!om)
+ return -ENOMEM;
+
+ om->mthd = mthd;
+ om->exec = exec;
+ list_add(&om->head, &oc->methods);
+ return 0;
+}
+
+int
+nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_gpuobj_method *om;
+ struct nouveau_gpuobj_class *oc;
+
+ list_for_each_entry(oc, &dev_priv->classes, head) {
+ if (oc->id != class)
+ continue;
+
+ list_for_each_entry(om, &oc->methods, head) {
+ if (om->mthd == mthd)
+ return om->exec(chan, class, mthd, data);
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
+ u32 class, u32 mthd, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ if (chid > 0 && chid < dev_priv->engine.fifo.channels)
+ chan = dev_priv->channels.ptr[chid];
+ if (chan)
+ ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return ret;
+}
/* NVidia uses context objects to drive drawing operations.
@@ -73,17 +169,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
struct nouveau_gpuobj **gpuobj_ret)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
struct nouveau_gpuobj *gpuobj;
struct drm_mm_node *ramin = NULL;
- int ret;
+ int ret, i;
NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
chan ? chan->id : -1, size, align, flags);
- if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
- return -EINVAL;
-
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
return -ENOMEM;
@@ -98,88 +191,41 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
spin_unlock(&dev_priv->ramin_lock);
if (chan) {
- NV_DEBUG(dev, "channel heap\n");
-
ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
if (ramin)
ramin = drm_mm_get_block(ramin, size, align);
-
if (!ramin) {
nouveau_gpuobj_ref(NULL, &gpuobj);
return -ENOMEM;
}
- } else {
- NV_DEBUG(dev, "global heap\n");
-
- /* allocate backing pages, sets vinst */
- ret = engine->instmem.populate(dev, gpuobj, &size);
- if (ret) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return ret;
- }
-
- /* try and get aperture space */
- do {
- if (drm_mm_pre_get(&dev_priv->ramin_heap))
- return -ENOMEM;
- spin_lock(&dev_priv->ramin_lock);
- ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
- align, 0);
- if (ramin == NULL) {
- spin_unlock(&dev_priv->ramin_lock);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -ENOMEM;
- }
-
- ramin = drm_mm_get_block_atomic(ramin, size, align);
- spin_unlock(&dev_priv->ramin_lock);
- } while (ramin == NULL);
-
- /* on nv50 it's ok to fail, we have a fallback path */
- if (!ramin && dev_priv->card_type < NV_50) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -ENOMEM;
- }
- }
+ gpuobj->pinst = chan->ramin->pinst;
+ if (gpuobj->pinst != ~0)
+ gpuobj->pinst += ramin->start;
- /* if we got a chunk of the aperture, map pages into it */
- gpuobj->im_pramin = ramin;
- if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
- ret = engine->instmem.bind(dev, gpuobj);
+ gpuobj->cinst = ramin->start;
+ gpuobj->vinst = ramin->start + chan->ramin->vinst;
+ gpuobj->node = ramin;
+ } else {
+ ret = instmem->get(gpuobj, size, align);
if (ret) {
nouveau_gpuobj_ref(NULL, &gpuobj);
return ret;
}
- }
-
- /* calculate the various different addresses for the object */
- if (chan) {
- gpuobj->pinst = chan->ramin->pinst;
- if (gpuobj->pinst != ~0)
- gpuobj->pinst += gpuobj->im_pramin->start;
- if (dev_priv->card_type < NV_50) {
- gpuobj->cinst = gpuobj->pinst;
- } else {
- gpuobj->cinst = gpuobj->im_pramin->start;
- gpuobj->vinst = gpuobj->im_pramin->start +
- chan->ramin->vinst;
- }
- } else {
- if (gpuobj->im_pramin)
- gpuobj->pinst = gpuobj->im_pramin->start;
- else
+ ret = -ENOSYS;
+ if (!(flags & NVOBJ_FLAG_DONT_MAP))
+ ret = instmem->map(gpuobj);
+ if (ret)
gpuobj->pinst = ~0;
- gpuobj->cinst = 0xdeadbeef;
+
+ gpuobj->cinst = NVOBJ_CINST_GLOBAL;
}
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
- int i;
-
for (i = 0; i < gpuobj->size; i += 4)
nv_wo32(gpuobj, i, 0);
- engine->instmem.flush(dev);
+ instmem->flush(dev);
}
@@ -195,6 +241,7 @@ nouveau_gpuobj_init(struct drm_device *dev)
NV_DEBUG(dev, "\n");
INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+ INIT_LIST_HEAD(&dev_priv->classes);
spin_lock_init(&dev_priv->ramin_lock);
dev_priv->ramin_base = ~0;
@@ -205,9 +252,20 @@ void
nouveau_gpuobj_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_method *om, *tm;
+ struct nouveau_gpuobj_class *oc, *tc;
NV_DEBUG(dev, "\n");
+ list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
+ list_for_each_entry_safe(om, tm, &oc->methods, head) {
+ list_del(&om->head);
+ kfree(om);
+ }
+ list_del(&oc->head);
+ kfree(oc);
+ }
+
BUG_ON(!list_empty(&dev_priv->gpuobj_list));
}
@@ -219,26 +277,34 @@ nouveau_gpuobj_del(struct kref *ref)
container_of(ref, struct nouveau_gpuobj, refcount);
struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
int i;
NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
- if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
+ if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
for (i = 0; i < gpuobj->size; i += 4)
nv_wo32(gpuobj, i, 0);
- engine->instmem.flush(dev);
+ instmem->flush(dev);
}
if (gpuobj->dtor)
gpuobj->dtor(dev, gpuobj);
- if (gpuobj->im_backing)
- engine->instmem.clear(dev, gpuobj);
+ if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
+ if (gpuobj->node) {
+ instmem->unmap(gpuobj);
+ instmem->put(gpuobj);
+ }
+ } else {
+ if (gpuobj->node) {
+ spin_lock(&dev_priv->ramin_lock);
+ drm_mm_put_block(gpuobj->node);
+ spin_unlock(&dev_priv->ramin_lock);
+ }
+ }
spin_lock(&dev_priv->ramin_lock);
- if (gpuobj->im_pramin)
- drm_mm_put_block(gpuobj->im_pramin);
list_del(&gpuobj->list);
spin_unlock(&dev_priv->ramin_lock);
@@ -278,7 +344,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
kref_init(&gpuobj->refcount);
gpuobj->size = size;
gpuobj->pinst = pinst;
- gpuobj->cinst = 0xdeadbeef;
+ gpuobj->cinst = NVOBJ_CINST_GLOBAL;
gpuobj->vinst = vinst;
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
@@ -335,113 +401,150 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
The method below creates a DMA object in instance RAM and returns a handle
to it that can be used to set up context objects.
*/
-int
-nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
- uint64_t offset, uint64_t size, int access,
- int target, struct nouveau_gpuobj **gpuobj)
+
+void
+nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
+ u64 base, u64 size, int target, int access,
+ u32 type, u32 comp)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
- int ret;
+ struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ u32 flags0;
- NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
- chan->id, class, offset, size);
- NV_DEBUG(dev, "access=%d target=%d\n", access, target);
+ flags0 = (comp << 29) | (type << 22) | class;
+ flags0 |= 0x00100000;
+
+ switch (access) {
+ case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
+ case NV_MEM_ACCESS_RW:
+ case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
+ default:
+ break;
+ }
switch (target) {
- case NV_DMA_TARGET_AGP:
- offset += dev_priv->gart_info.aper_base;
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00010000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00030000;
break;
+ case NV_MEM_TARGET_GART:
+ base += dev_priv->gart_info.aper_base;
default:
+ flags0 &= ~0x00100000;
break;
}
- ret = nouveau_gpuobj_new(dev, chan,
- nouveau_gpuobj_class_instmem_size(dev, class),
- 16, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, gpuobj);
- if (ret) {
- NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
- return ret;
- }
+ /* convert to base + limit */
+ size = (base + size) - 1;
- if (dev_priv->card_type < NV_50) {
- uint32_t frame, adjust, pte_flags = 0;
-
- if (access != NV_DMA_ACCESS_RO)
- pte_flags |= (1<<1);
- adjust = offset & 0x00000fff;
- frame = offset & ~0x00000fff;
-
- nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
- (access << 14) | (target << 16) |
- class));
- nv_wo32(*gpuobj, 4, size - 1);
- nv_wo32(*gpuobj, 8, frame | pte_flags);
- nv_wo32(*gpuobj, 12, frame | pte_flags);
- } else {
- uint64_t limit = offset + size - 1;
- uint32_t flags0, flags5;
+ nv_wo32(obj, offset + 0x00, flags0);
+ nv_wo32(obj, offset + 0x04, lower_32_bits(size));
+ nv_wo32(obj, offset + 0x08, lower_32_bits(base));
+ nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
+ upper_32_bits(base));
+ nv_wo32(obj, offset + 0x10, 0x00000000);
+ nv_wo32(obj, offset + 0x14, 0x00000000);
- if (target == NV_DMA_TARGET_VIDMEM) {
- flags0 = 0x00190000;
- flags5 = 0x00010000;
- } else {
- flags0 = 0x7fc00000;
- flags5 = 0x00080000;
- }
+ pinstmem->flush(obj->dev);
+}
- nv_wo32(*gpuobj, 0, flags0 | class);
- nv_wo32(*gpuobj, 4, lower_32_bits(limit));
- nv_wo32(*gpuobj, 8, lower_32_bits(offset));
- nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
- (upper_32_bits(offset) & 0xff));
- nv_wo32(*gpuobj, 20, flags5);
- }
+int
+nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
+ int target, int access, u32 type, u32 comp,
+ struct nouveau_gpuobj **pobj)
+{
+ struct drm_device *dev = chan->dev;
+ int ret;
- instmem->flush(dev);
+ ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
+ if (ret)
+ return ret;
- (*gpuobj)->engine = NVOBJ_ENGINE_SW;
- (*gpuobj)->class = class;
+ nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
+ access, type, comp);
return 0;
}
int
-nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
- uint64_t offset, uint64_t size, int access,
- struct nouveau_gpuobj **gpuobj,
- uint32_t *o_ret)
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
+ u64 size, int access, int target,
+ struct nouveau_gpuobj **pobj)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj;
+ u32 flags0, flags2;
int ret;
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
- (dev_priv->card_type >= NV_50 &&
- dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- offset + dev_priv->vm_gart_base,
- size, access, NV_DMA_TARGET_AGP,
- gpuobj);
- if (o_ret)
- *o_ret = 0;
- } else
- if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
- nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
- if (offset & ~0xffffffffULL) {
- NV_ERROR(dev, "obj offset exceeds 32-bits\n");
- return -EINVAL;
+ if (dev_priv->card_type >= NV_50) {
+ u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
+ u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
+
+ return nv50_gpuobj_dma_new(chan, class, base, size,
+ target, access, type, comp, pobj);
+ }
+
+ if (target == NV_MEM_TARGET_GART) {
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ target = NV_MEM_TARGET_PCI_NOSNOOP;
+ base += dev_priv->gart_info.aper_base;
+ } else
+ if (base != 0) {
+ base = nouveau_sgdma_get_physical(dev, base);
+ target = NV_MEM_TARGET_PCI;
+ } else {
+ nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
+ return 0;
}
- if (o_ret)
- *o_ret = (uint32_t)offset;
- ret = (*gpuobj != NULL) ? 0 : -EINVAL;
- } else {
- NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
- return -EINVAL;
}
- return ret;
+ flags0 = class;
+ flags0 |= 0x00003000; /* PT present, PT linear */
+ flags2 = 0;
+
+ switch (target) {
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00020000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00030000;
+ break;
+ default:
+ break;
+ }
+
+ switch (access) {
+ case NV_MEM_ACCESS_RO:
+ flags0 |= 0x00004000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ flags0 |= 0x00008000;
+ default:
+ flags2 |= 0x00000002;
+ break;
+ }
+
+ flags0 |= (base & 0x00000fff) << 20;
+ flags2 |= (base & 0xfffff000);
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+
+ nv_wo32(obj, 0x00, flags0);
+ nv_wo32(obj, 0x04, size - 1);
+ nv_wo32(obj, 0x08, flags2);
+ nv_wo32(obj, 0x0c, flags2);
+
+ obj->engine = NVOBJ_ENGINE_SW;
+ obj->class = class;
+ *pobj = obj;
+ return 0;
}
/* Context objects in the instance RAM have the following structure.
@@ -495,82 +598,130 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
entry[5]:
set to 0?
*/
+static int
+nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
+ struct nouveau_gpuobj **gpuobj_ret)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+
+ gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+ if (!gpuobj)
+ return -ENOMEM;
+ gpuobj->dev = chan->dev;
+ gpuobj->engine = NVOBJ_ENGINE_SW;
+ gpuobj->class = class;
+ kref_init(&gpuobj->refcount);
+ gpuobj->cinst = 0x40;
+
+ spin_lock(&dev_priv->ramin_lock);
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
+ *gpuobj_ret = gpuobj;
+ return 0;
+}
+
int
-nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
- struct nouveau_gpuobj **gpuobj)
+nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_class *oc;
+ struct nouveau_gpuobj *gpuobj;
int ret;
NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
+ list_for_each_entry(oc, &dev_priv->classes, head) {
+ if (oc->id == class)
+ goto found;
+ }
+
+ NV_ERROR(dev, "illegal object class: 0x%x\n", class);
+ return -EINVAL;
+
+found:
+ switch (oc->engine) {
+ case NVOBJ_ENGINE_SW:
+ if (dev_priv->card_type < NV_C0) {
+ ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
+ if (ret)
+ return ret;
+ goto insert;
+ }
+ break;
+ case NVOBJ_ENGINE_GR:
+ if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
+ (dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) {
+ struct nouveau_pgraph_engine *pgraph =
+ &dev_priv->engine.graph;
+
+ ret = pgraph->create_context(chan);
+ if (ret)
+ return ret;
+ }
+ break;
+ case NVOBJ_ENGINE_CRYPT:
+ if (!chan->crypt_ctx) {
+ struct nouveau_crypt_engine *pcrypt =
+ &dev_priv->engine.crypt;
+
+ ret = pcrypt->create_context(chan);
+ if (ret)
+ return ret;
+ }
+ break;
+ }
+
+ /* we're done if this is fermi */
+ if (dev_priv->card_type >= NV_C0)
+ return 0;
+
ret = nouveau_gpuobj_new(dev, chan,
nouveau_gpuobj_class_instmem_size(dev, class),
16,
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
- gpuobj);
+ &gpuobj);
if (ret) {
- NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+ NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
return ret;
}
if (dev_priv->card_type >= NV_50) {
- nv_wo32(*gpuobj, 0, class);
- nv_wo32(*gpuobj, 20, 0x00010000);
+ nv_wo32(gpuobj, 0, class);
+ nv_wo32(gpuobj, 20, 0x00010000);
} else {
switch (class) {
case NV_CLASS_NULL:
- nv_wo32(*gpuobj, 0, 0x00001030);
- nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
+ nv_wo32(gpuobj, 0, 0x00001030);
+ nv_wo32(gpuobj, 4, 0xFFFFFFFF);
break;
default:
if (dev_priv->card_type >= NV_40) {
- nv_wo32(*gpuobj, 0, class);
+ nv_wo32(gpuobj, 0, class);
#ifdef __BIG_ENDIAN
- nv_wo32(*gpuobj, 8, 0x01000000);
+ nv_wo32(gpuobj, 8, 0x01000000);
#endif
} else {
#ifdef __BIG_ENDIAN
- nv_wo32(*gpuobj, 0, class | 0x00080000);
+ nv_wo32(gpuobj, 0, class | 0x00080000);
#else
- nv_wo32(*gpuobj, 0, class);
+ nv_wo32(gpuobj, 0, class);
#endif
}
}
}
dev_priv->engine.instmem.flush(dev);
- (*gpuobj)->engine = NVOBJ_ENGINE_GR;
- (*gpuobj)->class = class;
- return 0;
-}
+ gpuobj->engine = oc->engine;
+ gpuobj->class = oc->id;
-int
-nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
- struct nouveau_gpuobj **gpuobj_ret)
-{
- struct drm_nouveau_private *dev_priv;
- struct nouveau_gpuobj *gpuobj;
-
- if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
- return -EINVAL;
- dev_priv = chan->dev->dev_private;
-
- gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
- if (!gpuobj)
- return -ENOMEM;
- gpuobj->dev = chan->dev;
- gpuobj->engine = NVOBJ_ENGINE_SW;
- gpuobj->class = class;
- kref_init(&gpuobj->refcount);
- gpuobj->cinst = 0x40;
-
- spin_lock(&dev_priv->ramin_lock);
- list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
- spin_unlock(&dev_priv->ramin_lock);
- *gpuobj_ret = gpuobj;
- return 0;
+insert:
+ ret = nouveau_ramht_insert(chan, handle, gpuobj);
+ if (ret)
+ NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
+ return ret;
}
static int
@@ -585,7 +736,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
/* Base amount for object storage (4KiB enough?) */
- size = 0x1000;
+ size = 0x2000;
base = 0;
/* PGRAPH context */
@@ -624,12 +775,30 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
- int ret, i;
+ int ret;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
+ if (dev_priv->card_type == NV_C0) {
+ struct nouveau_vm *vm = dev_priv->chan_vm;
+ struct nouveau_vm_pgd *vpgd;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
+ &chan->ramin);
+ if (ret)
+ return ret;
+
+ nouveau_vm_ref(vm, &chan->vm, NULL);
+
+ vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
+ nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0208, 0xffffffff);
+ nv_wo32(chan->ramin, 0x020c, 0x000000ff);
+ return 0;
+ }
+
/* Allocate a chunk of memory for per-channel object storage */
ret = nouveau_gpuobj_channel_init_pramin(chan);
if (ret) {
@@ -639,14 +808,12 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* NV50 VM
* - Allocate per-channel page-directory
- * - Map GART and VRAM into the channel's address space at the
- * locations determined during init.
+ * - Link with shared channel VM
*/
- if (dev_priv->card_type >= NV_50) {
+ if (dev_priv->chan_vm) {
u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
u64 vm_vinst = chan->ramin->vinst + pgd_offs;
u32 vm_pinst = chan->ramin->pinst;
- u32 pde;
if (vm_pinst != ~0)
vm_pinst += pgd_offs;
@@ -655,29 +822,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
0, &chan->vm_pd);
if (ret)
return ret;
- for (i = 0; i < 0x4000; i += 8) {
- nv_wo32(chan->vm_pd, i + 0, 0x00000000);
- nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
- }
-
- nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
- &chan->vm_gart_pt);
- pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
- nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
- nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
-
- pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
- &chan->vm_vram_pt[i]);
-
- nv_wo32(chan->vm_pd, pde + 0,
- chan->vm_vram_pt[i]->vinst | 0x61);
- nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
- pde += 8;
- }
- instmem->flush(dev);
+ nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
}
/* RAMHT */
@@ -700,9 +846,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* VRAM ctxdma */
if (dev_priv->card_type >= NV_50) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->vm_end,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_AGP, &vram);
+ 0, (1ULL << 40), NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VM, &vram);
if (ret) {
NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
return ret;
@@ -710,8 +855,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
} else {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->fb_available_size,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_VIDMEM, &vram);
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &vram);
if (ret) {
NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
return ret;
@@ -728,21 +873,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* TT memory ctxdma */
if (dev_priv->card_type >= NV_50) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->vm_end,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_AGP, &tt);
- if (ret) {
- NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
- return ret;
- }
- } else
- if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
- ret = nouveau_gpuobj_gart_dma_new(chan, 0,
- dev_priv->gart_info.aper_size,
- NV_DMA_ACCESS_RW, &tt, NULL);
+ 0, (1ULL << 40), NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VM, &tt);
} else {
- NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
- ret = -EINVAL;
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->gart_info.aper_size,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_GART, &tt);
}
if (ret) {
@@ -763,21 +900,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
void
nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- int i;
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (!chan->ramht)
- return;
-
nouveau_ramht_ref(NULL, &chan->ramht, chan);
+ nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
- nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
if (chan->ramin_heap.free_stack.next)
drm_mm_takedown(&chan->ramin_heap);
@@ -791,147 +921,91 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
struct nouveau_gpuobj *gpuobj;
int i;
- if (dev_priv->card_type < NV_50) {
- dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
- if (!dev_priv->susres.ramin_copy)
- return -ENOMEM;
-
- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
- dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
- return 0;
- }
-
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
- if (!gpuobj->im_backing)
+ if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
continue;
- gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
- if (!gpuobj->im_backing_suspend) {
+ gpuobj->suspend = vmalloc(gpuobj->size);
+ if (!gpuobj->suspend) {
nouveau_gpuobj_resume(dev);
return -ENOMEM;
}
for (i = 0; i < gpuobj->size; i += 4)
- gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
+ gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
}
return 0;
}
void
-nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj;
-
- if (dev_priv->card_type < NV_50) {
- vfree(dev_priv->susres.ramin_copy);
- dev_priv->susres.ramin_copy = NULL;
- return;
- }
-
- list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
- if (!gpuobj->im_backing_suspend)
- continue;
-
- vfree(gpuobj->im_backing_suspend);
- gpuobj->im_backing_suspend = NULL;
- }
-}
-
-void
nouveau_gpuobj_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj;
int i;
- if (dev_priv->card_type < NV_50) {
- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
- nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
- nouveau_gpuobj_suspend_cleanup(dev);
- return;
- }
-
list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
- if (!gpuobj->im_backing_suspend)
+ if (!gpuobj->suspend)
continue;
for (i = 0; i < gpuobj->size; i += 4)
- nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
- dev_priv->engine.instmem.flush(dev);
+ nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
+
+ vfree(gpuobj->suspend);
+ gpuobj->suspend = NULL;
}
- nouveau_gpuobj_suspend_cleanup(dev);
+ dev_priv->engine.instmem.flush(dev);
}
int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_grobj_alloc *init = data;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_pgraph_object_class *grc;
- struct nouveau_gpuobj *gr = NULL;
struct nouveau_channel *chan;
int ret;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
-
if (init->handle == ~0)
return -EINVAL;
- grc = pgraph->grclass;
- while (grc->id) {
- if (grc->id == init->class)
- break;
- grc++;
- }
+ chan = nouveau_channel_get(dev, file_priv, init->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
- if (!grc->id) {
- NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
- return -EPERM;
+ if (nouveau_ramht_find(chan, init->handle)) {
+ ret = -EEXIST;
+ goto out;
}
- if (nouveau_ramht_find(chan, init->handle))
- return -EEXIST;
-
- if (!grc->software)
- ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
- else
- ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
+ ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
if (ret) {
NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
ret, init->channel, init->handle);
- return ret;
}
- ret = nouveau_ramht_insert(chan, init->handle, gr);
- nouveau_gpuobj_ref(NULL, &gr);
- if (ret) {
- NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
- ret, init->channel, init->handle);
- return ret;
- }
-
- return 0;
+out:
+ nouveau_channel_put(&chan);
+ return ret;
}
int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gpuobj_free *objfree = data;
- struct nouveau_gpuobj *gpuobj;
struct nouveau_channel *chan;
+ int ret;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
+ chan = nouveau_channel_get(dev, file_priv, objfree->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
- gpuobj = nouveau_ramht_find(chan, objfree->handle);
- if (!gpuobj)
- return -ENOENT;
+ /* Synchronize with the user channel */
+ nouveau_channel_idle(chan);
- nouveau_ramht_remove(chan, objfree->handle);
- return 0;
+ ret = nouveau_ramht_remove(chan, objfree->handle);
+ nouveau_channel_put(&chan);
+ return ret;
}
u32
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 9f7b158f582..fb846a3fef1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -27,6 +27,10 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -418,8 +422,7 @@ nouveau_hwmon_init(struct drm_device *dev)
return ret;
}
dev_set_drvdata(hwmon_dev, dev);
- ret = sysfs_create_group(&hwmon_dev->kobj,
- &hwmon_attrgroup);
+ ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
if (ret) {
NV_ERROR(dev,
"Unable to create hwmon sysfs file: %d\n", ret);
@@ -446,6 +449,25 @@ nouveau_hwmon_fini(struct drm_device *dev)
#endif
}
+#ifdef CONFIG_ACPI
+static int
+nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct drm_nouveau_private *dev_priv =
+ container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb);
+ struct drm_device *dev = dev_priv->dev;
+ struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+ if (strcmp(entry->device_class, "ac_adapter") == 0) {
+ bool ac = power_supply_is_system_supplied();
+
+ NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
+ }
+
+ return NOTIFY_OK;
+}
+#endif
+
int
nouveau_pm_init(struct drm_device *dev)
{
@@ -485,6 +507,10 @@ nouveau_pm_init(struct drm_device *dev)
nouveau_sysfs_init(dev);
nouveau_hwmon_init(dev);
+#ifdef CONFIG_ACPI
+ pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
+ register_acpi_notifier(&pm->acpi_nb);
+#endif
return 0;
}
@@ -503,6 +529,9 @@ nouveau_pm_fini(struct drm_device *dev)
nouveau_perf_fini(dev);
nouveau_volt_fini(dev);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&pm->acpi_nb);
+#endif
nouveau_hwmon_fini(dev);
nouveau_sysfs_fini(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index 2d8580927ca..bef3e691041 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -104,17 +104,17 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
if (dev_priv->card_type < NV_40) {
- ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
+ ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
(chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
} else
if (dev_priv->card_type < NV_50) {
- ctx = (gpuobj->cinst >> 4) |
+ ctx = (gpuobj->pinst >> 4) |
(chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else {
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
- ctx = (gpuobj->cinst << 10) | 2;
+ ctx = (gpuobj->cinst << 10) | chan->id;
} else {
ctx = (gpuobj->cinst >> 4) |
((gpuobj->engine <<
@@ -214,18 +214,19 @@ out:
spin_unlock_irqrestore(&chan->ramht->lock, flags);
}
-void
+int
nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
{
struct nouveau_ramht_entry *entry;
entry = nouveau_ramht_remove_entry(chan, handle);
if (!entry)
- return;
+ return -ENOENT;
nouveau_ramht_remove_hash(chan, entry->handle);
nouveau_gpuobj_ref(NULL, &entry->gpuobj);
kfree(entry);
+ return 0;
}
struct nouveau_gpuobj *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
index b79cb5e1a8f..c82de98fee0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h
@@ -48,7 +48,7 @@ extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
struct nouveau_gpuobj *);
-extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
+extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
extern struct nouveau_gpuobj *
nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 1b42541ca9e..04e8fb79526 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -45,6 +45,11 @@
# define NV04_PFB_REF_CMD_REFRESH (1 << 0)
#define NV04_PFB_PRE 0x001002d4
# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0)
+#define NV20_PFB_ZCOMP(i) (0x00100300 + 4*(i))
+# define NV20_PFB_ZCOMP_MODE_32 (4 << 24)
+# define NV20_PFB_ZCOMP_EN (1 << 31)
+# define NV25_PFB_ZCOMP_MODE_16 (1 << 20)
+# define NV25_PFB_ZCOMP_MODE_32 (2 << 20)
#define NV10_PFB_CLOSE_PAGE2 0x0010033c
#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i))
#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
@@ -74,17 +79,6 @@
# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
-/* DMA object defines */
-#define NV_DMA_ACCESS_RW 0
-#define NV_DMA_ACCESS_RO 1
-#define NV_DMA_ACCESS_WO 2
-#define NV_DMA_TARGET_VIDMEM 0
-#define NV_DMA_TARGET_PCI 2
-#define NV_DMA_TARGET_AGP 3
-/* The following is not a real value used by the card, it's changed by
- * nouveau_object_dma_create */
-#define NV_DMA_TARGET_PCI_NONLINEAR 8
-
/* Some object classes we care about in the drm */
#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
#define NV_CLASS_DMA_TO_MEMORY 0x00000003
@@ -332,6 +326,7 @@
#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
#define NV03_PGRAPH_STATUS 0x004006B0
#define NV04_PGRAPH_STATUS 0x00400700
+# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000
#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
#define NV04_PGRAPH_SURFACE 0x0040070C
@@ -378,6 +373,7 @@
#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
+#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
@@ -714,31 +710,32 @@
#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010
#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020
#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040
-#define NV50_PDISPLAY_INTR_EN 0x0061002c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2))
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040
+#define NV50_PDISPLAY_INTR_EN_0 0x00610028
+#define NV50_PDISPLAY_INTR_EN_1 0x0061002c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC 0x0000000c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0 0x00000004
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1 0x00000008
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 0x00000010
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 0x00000020
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40 0x00000040
#define NV50_PDISPLAY_UNK30_CTRL 0x00610030
#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200
#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400
#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000
-#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080
-#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084
-#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200)
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010
-#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204)
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001
-#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208)
-#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c)
+#define NV50_PDISPLAY_TRAPPED_ADDR(i) ((i) * 0x08 + 0x00610080)
+#define NV50_PDISPLAY_TRAPPED_DATA(i) ((i) * 0x08 + 0x00610084)
+#define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200)
+#define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010
+#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000
+#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED 0x00000010
+#define NV50_PDISPLAY_EVO_DMA_CB(i) ((i) * 0x10 + 0x00610204)
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION 0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM 0x00000000
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM 0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_VALID 0x00000001
+#define NV50_PDISPLAY_EVO_UNK2(i) ((i) * 0x10 + 0x00610208)
+#define NV50_PDISPLAY_EVO_HASH_TAG(i) ((i) * 0x10 + 0x0061020c)
#define NV50_PDISPLAY_CURSOR 0x00610270
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270)
@@ -746,15 +743,11 @@
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000
#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000
-#define NV50_PDISPLAY_CTRL_STATE 0x00610300
-#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000
-#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc
-#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001
-#define NV50_PDISPLAY_CTRL_VAL 0x00610304
-#define NV50_PDISPLAY_UNK_380 0x00610380
-#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384
-#define NV50_PDISPLAY_UNK_388 0x00610388
-#define NV50_PDISPLAY_UNK_38C 0x0061038c
+#define NV50_PDISPLAY_PIO_CTRL 0x00610300
+#define NV50_PDISPLAY_PIO_CTRL_PENDING 0x80000000
+#define NV50_PDISPLAY_PIO_CTRL_MTHD 0x00001ffc
+#define NV50_PDISPLAY_PIO_CTRL_ENABLED 0x00000001
+#define NV50_PDISPLAY_PIO_DATA 0x00610304
#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index d4ac9700703..9a250eb5309 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
dma_addr_t *pages;
unsigned nr_pages;
- unsigned pte_start;
+ u64 offset;
bool bound;
};
@@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
}
}
-static inline unsigned
-nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
-
- if (dev_priv->card_type < NV_50)
- return pte + 2;
-
- return pte << 1;
-}
-
static int
nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
@@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
- pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
- nvbe->pte_start = pte;
+ nvbe->offset = mem->start << PAGE_SHIFT;
+ pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
for (i = 0; i < nvbe->nr_pages; i++) {
dma_addr_t dma_offset = nvbe->pages[i];
uint32_t offset_l = lower_32_bits(dma_offset);
- uint32_t offset_h = upper_32_bits(dma_offset);
-
- for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50) {
- nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
- pte += 1;
- } else {
- nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
- nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
- pte += 2;
- }
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
+ nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
dma_offset += NV_CTXDMA_PAGE_SIZE;
}
}
- dev_priv->engine.instmem.flush(nvbe->dev);
-
- if (dev_priv->card_type == NV_50) {
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
- }
nvbe->bound = true;
return 0;
@@ -142,28 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
if (!nvbe->bound)
return 0;
- pte = nvbe->pte_start;
+ pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
for (i = 0; i < nvbe->nr_pages; i++) {
- dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
-
- for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50) {
- nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
- pte += 1;
- } else {
- nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
- nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
- pte += 2;
- }
-
- dma_offset += NV_CTXDMA_PAGE_SIZE;
- }
- }
- dev_priv->engine.instmem.flush(nvbe->dev);
-
- if (dev_priv->card_type == NV_50) {
- dev_priv->engine.fifo.tlb_flush(dev);
- dev_priv->engine.graph.tlb_flush(dev);
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
+ nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
}
nvbe->bound = false;
@@ -186,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
}
}
+static int
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+ nvbe->offset = mem->start << PAGE_SHIFT;
+
+ nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
+ nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+ if (!nvbe->bound)
+ return 0;
+
+ nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
+ nvbe->nr_pages << PAGE_SHIFT);
+ nvbe->bound = false;
+ return 0;
+}
+
static struct ttm_backend_func nouveau_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
@@ -194,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
.destroy = nouveau_sgdma_destroy
};
+static struct ttm_backend_func nv50_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nv50_sgdma_bind,
+ .unbind = nv50_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
struct ttm_backend *
nouveau_sgdma_init_ttm(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_sgdma_be *nvbe;
- if (!dev_priv->gart_info.sg_ctxdma)
- return NULL;
-
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe)
return NULL;
nvbe->dev = dev;
- nvbe->backend.func = &nouveau_sgdma_backend;
-
+ if (dev_priv->card_type < NV_50)
+ nvbe->backend.func = &nouveau_sgdma_backend;
+ else
+ nvbe->backend.func = &nv50_sgdma_backend;
return &nvbe->backend;
}
@@ -218,7 +209,6 @@ int
nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
struct nouveau_gpuobj *gpuobj = NULL;
uint32_t aper_size, obj_size;
int i, ret;
@@ -231,68 +221,40 @@ nouveau_sgdma_init(struct drm_device *dev)
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
obj_size += 8; /* ctxdma header */
- } else {
- /* 1 entire VM page table */
- aper_size = (512 * 1024 * 1024);
- obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
- }
-
- ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &gpuobj);
- if (ret) {
- NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
- return ret;
- }
-
- dev_priv->gart_info.sg_dummy_page =
- alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
- if (!dev_priv->gart_info.sg_dummy_page) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -ENOMEM;
- }
- set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
- dev_priv->gart_info.sg_dummy_bus =
- pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return -EFAULT;
- }
+ ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+ return ret;
+ }
- if (dev_priv->card_type < NV_50) {
- /* special case, allocated from global instmem heap so
- * cinst is invalid, we use it on all channels though so
- * cinst needs to be valid, set it the same as pinst
- */
- gpuobj->cinst = gpuobj->pinst;
-
- /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
- * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
- * on those cards? */
nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
(1 << 12) /* PT present */ |
(0 << 13) /* PT *not* linear */ |
- (NV_DMA_ACCESS_RW << 14) |
- (NV_DMA_TARGET_PCI << 16));
+ (0 << 14) /* RW */ |
+ (2 << 16) /* PCI */);
nv_wo32(gpuobj, 4, aper_size - 1);
- for (i = 2; i < 2 + (aper_size >> 12); i++) {
- nv_wo32(gpuobj, i * 4,
- dev_priv->gart_info.sg_dummy_bus | 3);
- }
- } else {
- for (i = 0; i < obj_size; i += 8) {
- nv_wo32(gpuobj, i + 0, 0x00000000);
- nv_wo32(gpuobj, i + 4, 0x00000000);
- }
+ for (i = 2; i < 2 + (aper_size >> 12); i++)
+ nv_wo32(gpuobj, i * 4, 0x00000000);
+
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ } else
+ if (dev_priv->chan_vm) {
+ ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
+ 12, NV_MEM_ACCESS_RW,
+ &dev_priv->gart_info.vma);
+ if (ret)
+ return ret;
+
+ dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
+ dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
}
- dev_priv->engine.instmem.flush(dev);
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
- dev_priv->gart_info.aper_base = 0;
- dev_priv->gart_info.aper_size = aper_size;
- dev_priv->gart_info.sg_ctxdma = gpuobj;
return 0;
}
@@ -301,31 +263,19 @@ nouveau_sgdma_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->gart_info.sg_dummy_page) {
- pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
- NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- unlock_page(dev_priv->gart_info.sg_dummy_page);
- __free_page(dev_priv->gart_info.sg_dummy_page);
- dev_priv->gart_info.sg_dummy_page = NULL;
- dev_priv->gart_info.sg_dummy_bus = 0;
- }
-
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
+ nouveau_vm_put(&dev_priv->gart_info.vma);
}
-int
-nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+uint32_t
+nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
- int pte;
+ int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
- if (dev_priv->card_type < NV_50) {
- *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
- return 0;
- }
+ BUG_ON(dev_priv->card_type >= NV_50);
- NV_ERROR(dev, "Unimplemented on NV50\n");
- return -EINVAL;
+ return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
+ (offset & NV_CTXDMA_PAGE_MASK);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 049f755567e..a54fc431fe9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -65,7 +65,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv04_fb_init;
engine->fb.takedown = nv04_fb_takedown;
- engine->graph.grclass = nv04_graph_grclass;
engine->graph.init = nv04_graph_init;
engine->graph.takedown = nv04_graph_takedown;
engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -76,7 +75,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.unload_context = nv04_graph_unload_context;
engine->fifo.channels = 16;
engine->fifo.init = nv04_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
@@ -99,16 +98,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_get = nv04_pm_clock_get;
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x10:
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -117,8 +120,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
- engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
- engine->graph.grclass = nv10_graph_grclass;
+ engine->fb.init_tile_region = nv10_fb_init_tile_region;
+ engine->fb.set_tile_region = nv10_fb_set_tile_region;
+ engine->fb.free_tile_region = nv10_fb_free_tile_region;
engine->graph.init = nv10_graph_init;
engine->graph.takedown = nv10_graph_takedown;
engine->graph.channel = nv10_graph_channel;
@@ -127,17 +131,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv10_graph_load_context;
engine->graph.unload_context = nv10_graph_unload_context;
- engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv10_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -153,16 +157,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_get = nv04_pm_clock_get;
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x20:
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -171,8 +179,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
- engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
- engine->graph.grclass = nv20_graph_grclass;
+ engine->fb.init_tile_region = nv10_fb_init_tile_region;
+ engine->fb.set_tile_region = nv10_fb_set_tile_region;
+ engine->fb.free_tile_region = nv10_fb_free_tile_region;
engine->graph.init = nv20_graph_init;
engine->graph.takedown = nv20_graph_takedown;
engine->graph.channel = nv10_graph_channel;
@@ -181,17 +190,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
- engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv20_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -207,16 +216,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_get = nv04_pm_clock_get;
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x30:
engine->instmem.init = nv04_instmem_init;
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
@@ -225,8 +238,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv30_fb_init;
engine->fb.takedown = nv30_fb_takedown;
- engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
- engine->graph.grclass = nv30_graph_grclass;
+ engine->fb.init_tile_region = nv30_fb_init_tile_region;
+ engine->fb.set_tile_region = nv10_fb_set_tile_region;
+ engine->fb.free_tile_region = nv30_fb_free_tile_region;
engine->graph.init = nv30_graph_init;
engine->graph.takedown = nv20_graph_takedown;
engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -235,17 +249,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv20_graph_destroy_context;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
- engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv20_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -263,6 +277,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_set = nv04_pm_clock_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x40:
case 0x60:
@@ -270,10 +288,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.takedown = nv04_instmem_takedown;
engine->instmem.suspend = nv04_instmem_suspend;
engine->instmem.resume = nv04_instmem_resume;
- engine->instmem.populate = nv04_instmem_populate;
- engine->instmem.clear = nv04_instmem_clear;
- engine->instmem.bind = nv04_instmem_bind;
- engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.get = nv04_instmem_get;
+ engine->instmem.put = nv04_instmem_put;
+ engine->instmem.map = nv04_instmem_map;
+ engine->instmem.unmap = nv04_instmem_unmap;
engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv40_mc_init;
engine->mc.takedown = nv40_mc_takedown;
@@ -282,8 +300,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv40_fb_init;
engine->fb.takedown = nv40_fb_takedown;
- engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
- engine->graph.grclass = nv40_graph_grclass;
+ engine->fb.init_tile_region = nv30_fb_init_tile_region;
+ engine->fb.set_tile_region = nv40_fb_set_tile_region;
+ engine->fb.free_tile_region = nv30_fb_free_tile_region;
engine->graph.init = nv40_graph_init;
engine->graph.takedown = nv40_graph_takedown;
engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -292,17 +311,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv40_graph_destroy_context;
engine->graph.load_context = nv40_graph_load_context;
engine->graph.unload_context = nv40_graph_unload_context;
- engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
+ engine->graph.set_tile_region = nv40_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv40_fifo_init;
- engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.takedown = nv04_fifo_fini;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv40_fifo_create_context;
- engine->fifo.destroy_context = nv40_fifo_destroy_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv40_fifo_load_context;
engine->fifo.unload_context = nv40_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
@@ -321,6 +340,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->pm.temp_get = nv40_temp_get;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nouveau_mem_detect;
+ engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -330,10 +353,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.takedown = nv50_instmem_takedown;
engine->instmem.suspend = nv50_instmem_suspend;
engine->instmem.resume = nv50_instmem_resume;
- engine->instmem.populate = nv50_instmem_populate;
- engine->instmem.clear = nv50_instmem_clear;
- engine->instmem.bind = nv50_instmem_bind;
- engine->instmem.unbind = nv50_instmem_unbind;
+ engine->instmem.get = nv50_instmem_get;
+ engine->instmem.put = nv50_instmem_put;
+ engine->instmem.map = nv50_instmem_map;
+ engine->instmem.unmap = nv50_instmem_unmap;
if (dev_priv->chipset == 0x50)
engine->instmem.flush = nv50_instmem_flush;
else
@@ -345,7 +368,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv50_fb_init;
engine->fb.takedown = nv50_fb_takedown;
- engine->graph.grclass = nv50_graph_grclass;
engine->graph.init = nv50_graph_init;
engine->graph.takedown = nv50_graph_takedown;
engine->graph.fifo_access = nv50_graph_fifo_access;
@@ -381,24 +403,32 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.init = nv50_display_init;
engine->display.destroy = nv50_display_destroy;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.takedown = nv50_gpio_fini;
engine->gpio.get = nv50_gpio_get;
engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_register = nv50_gpio_irq_register;
+ engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
switch (dev_priv->chipset) {
- case 0xa3:
- case 0xa5:
- case 0xa8:
- case 0xaf:
- engine->pm.clock_get = nva3_pm_clock_get;
- engine->pm.clock_pre = nva3_pm_clock_pre;
- engine->pm.clock_set = nva3_pm_clock_set;
- break;
- default:
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ case 0x50:
engine->pm.clock_get = nv50_pm_clock_get;
engine->pm.clock_pre = nv50_pm_clock_pre;
engine->pm.clock_set = nv50_pm_clock_set;
break;
+ default:
+ engine->pm.clock_get = nva3_pm_clock_get;
+ engine->pm.clock_pre = nva3_pm_clock_pre;
+ engine->pm.clock_set = nva3_pm_clock_set;
+ break;
}
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
@@ -406,17 +436,39 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.temp_get = nv84_temp_get;
else
engine->pm.temp_get = nv40_temp_get;
+ switch (dev_priv->chipset) {
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0xa0:
+ engine->crypt.init = nv84_crypt_init;
+ engine->crypt.takedown = nv84_crypt_fini;
+ engine->crypt.create_context = nv84_crypt_create_context;
+ engine->crypt.destroy_context = nv84_crypt_destroy_context;
+ engine->crypt.tlb_flush = nv84_crypt_tlb_flush;
+ break;
+ default:
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ break;
+ }
+ engine->vram.init = nv50_vram_init;
+ engine->vram.get = nv50_vram_new;
+ engine->vram.put = nv50_vram_del;
+ engine->vram.flags_valid = nv50_vram_flags_valid;
break;
case 0xC0:
engine->instmem.init = nvc0_instmem_init;
engine->instmem.takedown = nvc0_instmem_takedown;
engine->instmem.suspend = nvc0_instmem_suspend;
engine->instmem.resume = nvc0_instmem_resume;
- engine->instmem.populate = nvc0_instmem_populate;
- engine->instmem.clear = nvc0_instmem_clear;
- engine->instmem.bind = nvc0_instmem_bind;
- engine->instmem.unbind = nvc0_instmem_unbind;
- engine->instmem.flush = nvc0_instmem_flush;
+ engine->instmem.get = nv50_instmem_get;
+ engine->instmem.put = nv50_instmem_put;
+ engine->instmem.map = nv50_instmem_map;
+ engine->instmem.unmap = nv50_instmem_unmap;
+ engine->instmem.flush = nv84_instmem_flush;
engine->mc.init = nv50_mc_init;
engine->mc.takedown = nv50_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -424,7 +476,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->graph.grclass = NULL; //nvc0_graph_grclass;
engine->graph.init = nvc0_graph_init;
engine->graph.takedown = nvc0_graph_takedown;
engine->graph.fifo_access = nvc0_graph_fifo_access;
@@ -453,7 +504,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.takedown = nouveau_stub_takedown;
engine->gpio.get = nv50_gpio_get;
engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_register = nv50_gpio_irq_register;
+ engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
+ engine->crypt.init = nouveau_stub_init;
+ engine->crypt.takedown = nouveau_stub_takedown;
+ engine->vram.init = nvc0_vram_init;
+ engine->vram.get = nvc0_vram_new;
+ engine->vram.put = nv50_vram_del;
+ engine->vram.flags_valid = nvc0_vram_flags_valid;
break;
default:
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -493,9 +552,13 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
return ret;
+ /* no dma objects on fermi... */
+ if (dev_priv->card_type >= NV_C0)
+ goto out_done;
+
ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->vram_size,
- NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+ NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
&gpuobj);
if (ret)
goto out_err;
@@ -505,9 +568,10 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
goto out_err;
- ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
- dev_priv->gart_info.aper_size,
- NV_DMA_ACCESS_RW, &gpuobj, NULL);
+ ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->gart_info.aper_size,
+ NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
+ &gpuobj);
if (ret)
goto out_err;
@@ -516,11 +580,12 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
goto out_err;
+out_done:
+ mutex_unlock(&dev_priv->channel->mutex);
return 0;
out_err:
- nouveau_channel_free(dev_priv->channel);
- dev_priv->channel = NULL;
+ nouveau_channel_put(&dev_priv->channel);
return ret;
}
@@ -531,15 +596,25 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
nouveau_pci_resume(pdev);
drm_kms_helper_poll_enable(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(dev);
nouveau_pci_suspend(pdev, pmm);
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
+static void nouveau_switcheroo_reprobe(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ nouveau_fbcon_output_poll_changed(dev);
+}
+
static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -560,6 +635,7 @@ nouveau_card_init(struct drm_device *dev)
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
+ nouveau_switcheroo_reprobe,
nouveau_switcheroo_can_switch);
/* Initialise internal driver API hooks */
@@ -567,6 +643,8 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out;
engine = &dev_priv->engine;
+ spin_lock_init(&dev_priv->channels.lock);
+ spin_lock_init(&dev_priv->tile.lock);
spin_lock_init(&dev_priv->context_switch_lock);
/* Make the CRTCs and I2C buses accessible */
@@ -625,26 +703,28 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_fb;
+ /* PCRYPT */
+ ret = engine->crypt.init(dev);
+ if (ret)
+ goto out_graph;
+
/* PFIFO */
ret = engine->fifo.init(dev);
if (ret)
- goto out_graph;
+ goto out_crypt;
}
ret = engine->display.create(dev);
if (ret)
goto out_fifo;
- /* this call irq_preinstall, register irq handler and
- * call irq_postinstall
- */
- ret = drm_irq_install(dev);
+ ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
if (ret)
- goto out_display;
+ goto out_vblank;
- ret = drm_vblank_init(dev, 0);
+ ret = nouveau_irq_init(dev);
if (ret)
- goto out_irq;
+ goto out_vblank;
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
@@ -669,12 +749,16 @@ nouveau_card_init(struct drm_device *dev)
out_fence:
nouveau_fence_fini(dev);
out_irq:
- drm_irq_uninstall(dev);
-out_display:
+ nouveau_irq_fini(dev);
+out_vblank:
+ drm_vblank_cleanup(dev);
engine->display.destroy(dev);
out_fifo:
if (!nouveau_noaccel)
engine->fifo.takedown(dev);
+out_crypt:
+ if (!nouveau_noaccel)
+ engine->crypt.takedown(dev);
out_graph:
if (!nouveau_noaccel)
engine->graph.takedown(dev);
@@ -713,12 +797,12 @@ static void nouveau_card_takedown(struct drm_device *dev)
if (!engine->graph.accel_blocked) {
nouveau_fence_fini(dev);
- nouveau_channel_free(dev_priv->channel);
- dev_priv->channel = NULL;
+ nouveau_channel_put_unlocked(&dev_priv->channel);
}
if (!nouveau_noaccel) {
engine->fifo.takedown(dev);
+ engine->crypt.takedown(dev);
engine->graph.takedown(dev);
}
engine->fb.takedown(dev);
@@ -737,7 +821,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
nouveau_gpuobj_takedown(dev);
nouveau_mem_vram_fini(dev);
- drm_irq_uninstall(dev);
+ nouveau_irq_fini(dev);
+ drm_vblank_cleanup(dev);
nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
@@ -980,6 +1065,7 @@ err_out:
void nouveau_lastclose(struct drm_device *dev)
{
+ vga_switcheroo_process_delayed_switch();
}
int nouveau_unload(struct drm_device *dev)
@@ -1024,21 +1110,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
else
getparam->value = NV_PCI;
break;
- case NOUVEAU_GETPARAM_FB_PHYSICAL:
- getparam->value = dev_priv->fb_phys;
- break;
- case NOUVEAU_GETPARAM_AGP_PHYSICAL:
- getparam->value = dev_priv->gart_info.aper_base;
- break;
- case NOUVEAU_GETPARAM_PCI_PHYSICAL:
- if (dev->sg) {
- getparam->value = (unsigned long)dev->sg->virtual;
- } else {
- NV_ERROR(dev, "Requested PCIGART address, "
- "while no PCIGART was created\n");
- return -EINVAL;
- }
- break;
case NOUVEAU_GETPARAM_FB_SIZE:
getparam->value = dev_priv->fb_available_size;
break;
@@ -1046,7 +1117,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = dev_priv->gart_info.aper_size;
break;
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
- getparam->value = dev_priv->vm_vram_base;
+ getparam->value = 0; /* deprecated */
break;
case NOUVEAU_GETPARAM_PTIMER_TIME:
getparam->value = dev_priv->engine.timer.read(dev);
@@ -1054,6 +1125,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_HAS_BO_USAGE:
getparam->value = 1;
break;
+ case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
+ getparam->value = (dev_priv->card_type < NV_50);
+ break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
* address is the same. User is supposed to know the card
@@ -1087,8 +1161,9 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
}
/* Wait until (value(reg) & mask) == val, up until timeout has hit */
-bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
- uint32_t reg, uint32_t mask, uint32_t val)
+bool
+nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -1102,10 +1177,33 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
return false;
}
+/* Wait until (value(reg) & mask) != val, up until timeout has hit */
+bool
+nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ uint64_t start = ptimer->read(dev);
+
+ do {
+ if ((nv_rd32(dev, reg) & mask) != val)
+ return true;
+ } while (ptimer->read(dev) - start < timeout);
+
+ return false;
+}
+
/* Waits for PGRAPH to go completely idle */
bool nouveau_wait_for_idle(struct drm_device *dev)
{
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t mask = ~0;
+
+ if (dev_priv->card_type == NV_40)
+ mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
+
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
nv_rd32(dev, NV04_PGRAPH_STATUS));
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
new file mode 100644
index 00000000000..fbe0fb13bc1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/ratelimit.h>
+
+#include "nouveau_util.h"
+
+static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
+
+void
+nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
+{
+ while (bf->name) {
+ if (value & bf->mask) {
+ printk(" %s", bf->name);
+ value &= ~bf->mask;
+ }
+
+ bf++;
+ }
+
+ if (value)
+ printk(" (unknown bits 0x%08x)", value);
+}
+
+void
+nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+{
+ while (en->name) {
+ if (value == en->value) {
+ printk("%s", en->name);
+ return;
+ }
+
+ en++;
+ }
+
+ printk("(unknown enum 0x%08x)", value);
+}
+
+int
+nouveau_ratelimit(void)
+{
+ return __ratelimit(&nouveau_ratelimit_state);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
new file mode 100644
index 00000000000..d9ceaea26f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_UTIL_H__
+#define __NOUVEAU_UTIL_H__
+
+struct nouveau_bitfield {
+ u32 mask;
+ const char *name;
+};
+
+struct nouveau_enum {
+ u32 value;
+ const char *name;
+};
+
+void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
+void nouveau_enum_print(const struct nouveau_enum *, u32 value);
+int nouveau_ratelimit(void);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
new file mode 100644
index 00000000000..97d82aedf86
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
+
+void
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
+{
+ struct nouveau_vm *vm = vma->vm;
+ struct nouveau_mm_node *r;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ list_for_each_entry(r, &vram->regions, rl_entry) {
+ u64 phys = (u64)r->offset << 12;
+ u32 num = r->length >> bits;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->map(vma, pgt, vram, pte, len, phys);
+
+ num -= len;
+ pte += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
+{
+ nouveau_vm_map_at(vma, 0, vram);
+}
+
+void
+nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
+ dma_addr_t *list)
+{
+ struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->map_sg(vma, pgt, pte, list, len);
+
+ num -= len;
+ pte += len;
+ list += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
+{
+ struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->unmap(pgt, pte, len);
+
+ num -= len;
+ pte += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap(struct nouveau_vma *vma)
+{
+ nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
+}
+
+static void
+nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
+{
+ struct nouveau_vm_pgd *vpgd;
+ struct nouveau_vm_pgt *vpgt;
+ struct nouveau_gpuobj *pgt;
+ u32 pde;
+
+ for (pde = fpde; pde <= lpde; pde++) {
+ vpgt = &vm->pgt[pde - vm->fpde];
+ if (--vpgt->refcount[big])
+ continue;
+
+ pgt = vpgt->obj[big];
+ vpgt->obj[big] = NULL;
+
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+ }
+
+ mutex_unlock(&vm->mm->mutex);
+ nouveau_gpuobj_ref(NULL, &pgt);
+ mutex_lock(&vm->mm->mutex);
+ }
+}
+
+static int
+nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
+{
+ struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ struct nouveau_vm_pgd *vpgd;
+ struct nouveau_gpuobj *pgt;
+ int big = (type != vm->spg_shift);
+ u32 pgt_size;
+ int ret;
+
+ pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
+ pgt_size *= 8;
+
+ mutex_unlock(&vm->mm->mutex);
+ ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &pgt);
+ mutex_lock(&vm->mm->mutex);
+ if (unlikely(ret))
+ return ret;
+
+ /* someone beat us to filling the PDE while we didn't have the lock */
+ if (unlikely(vpgt->refcount[big]++)) {
+ mutex_unlock(&vm->mm->mutex);
+ nouveau_gpuobj_ref(NULL, &pgt);
+ mutex_lock(&vm->mm->mutex);
+ return 0;
+ }
+
+ vpgt->obj[big] = pgt;
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+ }
+
+ return 0;
+}
+
+int
+nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
+ u32 access, struct nouveau_vma *vma)
+{
+ u32 align = (1 << page_shift) >> 12;
+ u32 msize = size >> 12;
+ u32 fpde, lpde, pde;
+ int ret;
+
+ mutex_lock(&vm->mm->mutex);
+ ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&vm->mm->mutex);
+ return ret;
+ }
+
+ fpde = (vma->node->offset >> vm->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+ for (pde = fpde; pde <= lpde; pde++) {
+ struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ int big = (vma->node->type != vm->spg_shift);
+
+ if (likely(vpgt->refcount[big])) {
+ vpgt->refcount[big]++;
+ continue;
+ }
+
+ ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
+ if (ret) {
+ if (pde != fpde)
+ nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
+ nouveau_mm_put(vm->mm, vma->node);
+ mutex_unlock(&vm->mm->mutex);
+ vma->node = NULL;
+ return ret;
+ }
+ }
+ mutex_unlock(&vm->mm->mutex);
+
+ vma->vm = vm;
+ vma->offset = (u64)vma->node->offset << 12;
+ vma->access = access;
+ return 0;
+}
+
+void
+nouveau_vm_put(struct nouveau_vma *vma)
+{
+ struct nouveau_vm *vm = vma->vm;
+ u32 fpde, lpde;
+
+ if (unlikely(vma->node == NULL))
+ return;
+ fpde = (vma->node->offset >> vm->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+
+ mutex_lock(&vm->mm->mutex);
+ nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
+ nouveau_mm_put(vm->mm, vma->node);
+ vma->node = NULL;
+ mutex_unlock(&vm->mm->mutex);
+}
+
+int
+nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
+ struct nouveau_vm **pvm)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vm *vm;
+ u64 mm_length = (offset + length) - mm_offset;
+ u32 block, pgt_bits;
+ int ret;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return -ENOMEM;
+
+ if (dev_priv->card_type == NV_50) {
+ vm->map_pgt = nv50_vm_map_pgt;
+ vm->map = nv50_vm_map;
+ vm->map_sg = nv50_vm_map_sg;
+ vm->unmap = nv50_vm_unmap;
+ vm->flush = nv50_vm_flush;
+ vm->spg_shift = 12;
+ vm->lpg_shift = 16;
+
+ pgt_bits = 29;
+ block = (1 << pgt_bits);
+ if (length < block)
+ block = length;
+
+ } else
+ if (dev_priv->card_type == NV_C0) {
+ vm->map_pgt = nvc0_vm_map_pgt;
+ vm->map = nvc0_vm_map;
+ vm->map_sg = nvc0_vm_map_sg;
+ vm->unmap = nvc0_vm_unmap;
+ vm->flush = nvc0_vm_flush;
+ vm->spg_shift = 12;
+ vm->lpg_shift = 17;
+ pgt_bits = 27;
+
+ /* Should be 4096 everywhere, this is a hack that's
+ * currently necessary to avoid an elusive bug that
+ * causes corruption when mixing small/large pages
+ */
+ if (length < (1ULL << 40))
+ block = 4096;
+ else {
+ block = (1 << pgt_bits);
+ if (length < block)
+ block = length;
+ }
+ } else {
+ kfree(vm);
+ return -ENOSYS;
+ }
+
+ vm->fpde = offset >> pgt_bits;
+ vm->lpde = (offset + length - 1) >> pgt_bits;
+ vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
+ if (!vm->pgt) {
+ kfree(vm);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&vm->pgd_list);
+ vm->dev = dev;
+ vm->refcount = 1;
+ vm->pgt_bits = pgt_bits - 12;
+
+ ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
+ block >> 12);
+ if (ret) {
+ kfree(vm);
+ return ret;
+ }
+
+ *pvm = vm;
+ return 0;
+}
+
+static int
+nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+ struct nouveau_vm_pgd *vpgd;
+ int i;
+
+ if (!pgd)
+ return 0;
+
+ vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
+ if (!vpgd)
+ return -ENOMEM;
+
+ nouveau_gpuobj_ref(pgd, &vpgd->obj);
+
+ mutex_lock(&vm->mm->mutex);
+ for (i = vm->fpde; i <= vm->lpde; i++)
+ vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+ list_add(&vpgd->head, &vm->pgd_list);
+ mutex_unlock(&vm->mm->mutex);
+ return 0;
+}
+
+static void
+nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+ struct nouveau_vm_pgd *vpgd, *tmp;
+
+ if (!pgd)
+ return;
+
+ mutex_lock(&vm->mm->mutex);
+ list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+ if (vpgd->obj != pgd)
+ continue;
+
+ list_del(&vpgd->head);
+ nouveau_gpuobj_ref(NULL, &vpgd->obj);
+ kfree(vpgd);
+ }
+ mutex_unlock(&vm->mm->mutex);
+}
+
+static void
+nouveau_vm_del(struct nouveau_vm *vm)
+{
+ struct nouveau_vm_pgd *vpgd, *tmp;
+
+ list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+ nouveau_vm_unlink(vm, vpgd->obj);
+ }
+ WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
+
+ kfree(vm->pgt);
+ kfree(vm);
+}
+
+int
+nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
+ struct nouveau_gpuobj *pgd)
+{
+ struct nouveau_vm *vm;
+ int ret;
+
+ vm = ref;
+ if (vm) {
+ ret = nouveau_vm_link(vm, pgd);
+ if (ret)
+ return ret;
+
+ vm->refcount++;
+ }
+
+ vm = *ptr;
+ *ptr = ref;
+
+ if (vm) {
+ nouveau_vm_unlink(vm, pgd);
+
+ if (--vm->refcount == 0)
+ nouveau_vm_del(vm);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
new file mode 100644
index 00000000000..e1193515771
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_VM_H__
+#define __NOUVEAU_VM_H__
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+struct nouveau_vm_pgt {
+ struct nouveau_gpuobj *obj[2];
+ u32 refcount[2];
+};
+
+struct nouveau_vm_pgd {
+ struct list_head head;
+ struct nouveau_gpuobj *obj;
+};
+
+struct nouveau_vma {
+ struct nouveau_vm *vm;
+ struct nouveau_mm_node *node;
+ u64 offset;
+ u32 access;
+};
+
+struct nouveau_vm {
+ struct drm_device *dev;
+ struct nouveau_mm *mm;
+ int refcount;
+
+ struct list_head pgd_list;
+ atomic_t pgraph_refs;
+ atomic_t pcrypt_refs;
+
+ struct nouveau_vm_pgt *pgt;
+ u32 fpde;
+ u32 lpde;
+
+ u32 pgt_bits;
+ u8 spg_shift;
+ u8 lpg_shift;
+
+ void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+ void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ u32 pte, dma_addr_t *, u32 cnt);
+ void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
+ void (*flush)(struct nouveau_vm *);
+};
+
+/* nouveau_vm.c */
+int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
+ struct nouveau_vm **);
+int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
+ struct nouveau_gpuobj *pgd);
+int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
+ u32 access, struct nouveau_vma *);
+void nouveau_vm_put(struct nouveau_vma *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
+void nouveau_vm_unmap(struct nouveau_vma *);
+void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
+void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
+ dma_addr_t *);
+
+/* nv50_vm.c */
+void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+ u32 pte, dma_addr_t *, u32 cnt);
+void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nv50_vm_flush(struct nouveau_vm *);
+void nv50_vm_flush_engine(struct drm_device *, int engine);
+
+/* nvc0_vm.c */
+void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+ u32 pte, dma_addr_t *, u32 cnt);
+void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nvc0_vm_flush(struct nouveau_vm *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 40e18074162..297505eb98d 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -551,7 +551,10 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
if (dev_priv->card_type >= NV_30)
regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
- regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+ if (dev_priv->card_type >= NV_10)
+ regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+ else
+ regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
/* Some misc regs */
if (dev_priv->card_type == NV_40) {
@@ -669,6 +672,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
if (nv_two_heads(dev))
NVSetOwner(dev, nv_crtc->index);
+ drm_vblank_pre_modeset(dev, nv_crtc->index);
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
NVBlankScreen(dev, nv_crtc->index, true);
@@ -701,6 +705,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
#endif
funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+ drm_vblank_post_modeset(dev, nv_crtc->index);
}
static void nv_crtc_destroy(struct drm_crtc *crtc)
@@ -986,6 +991,7 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
.cursor_move = nv04_crtc_cursor_move,
.gamma_set = nv_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
+ .page_flip = nouveau_crtc_page_flip,
.destroy = nv_crtc_destroy,
};
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index ba6423f2ffc..e000455e06d 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -74,14 +74,14 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
* use a 10ms timeout (guards against crtc being inactive, in
* which case blank state would never change)
*/
- if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000000))
+ if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
return -EBUSY;
- if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000001))
+ if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000001))
return -EBUSY;
- if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
- 0x00000001, 0x00000000))
+ if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
return -EBUSY;
udelay(100);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 9e28cf772e3..1715e1464b7 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,6 +32,9 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
+static void nv04_vblank_crtc0_isr(struct drm_device *);
+static void nv04_vblank_crtc1_isr(struct drm_device *);
+
static void
nv04_display_store_initial_head_owner(struct drm_device *dev)
{
@@ -197,6 +200,8 @@ nv04_display_create(struct drm_device *dev)
func->save(encoder);
}
+ nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
+ nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
return 0;
}
@@ -208,6 +213,9 @@ nv04_display_destroy(struct drm_device *dev)
NV_DEBUG_KMS(dev, "\n");
+ nouveau_irq_unregister(dev, 24);
+ nouveau_irq_unregister(dev, 25);
+
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_mode_set modeset = {
@@ -258,3 +266,16 @@ nv04_display_init(struct drm_device *dev)
return 0;
}
+static void
+nv04_vblank_crtc0_isr(struct drm_device *dev)
+{
+ nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+ drm_handle_vblank(dev, 0);
+}
+
+static void
+nv04_vblank_crtc1_isr(struct drm_device *dev)
+{
+ nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+ drm_handle_vblank(dev, 1);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 33e4c9388bc..7a118937109 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -28,52 +28,39 @@
#include "nouveau_ramht.h"
#include "nouveau_fbcon.h"
-void
+int
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_copyarea(info, region);
- return;
- }
+ ret = RING_SPACE(chan, 4);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
OUT_RING(chan, (region->sy << 16) | region->sx);
OUT_RING(chan, (region->dy << 16) | region->dx);
OUT_RING(chan, (region->height << 16) | region->width);
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_fillrect(info, rect);
- return;
- }
+ ret = RING_SPACE(chan, 7);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
@@ -87,9 +74,10 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
OUT_RING(chan, (rect->dx << 16) | rect->dy);
OUT_RING(chan, (rect->width << 16) | rect->height);
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
@@ -101,23 +89,14 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
uint32_t dsize;
uint32_t width;
uint32_t *data = (uint32_t *)image->data;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (image->depth != 1) {
- cfb_imageblit(info, image);
- return;
- }
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
- nouveau_fbcon_gpu_lockup(info);
- }
+ if (image->depth != 1)
+ return -ENODEV;
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ return ret;
width = ALIGN(image->width, 8);
dsize = ALIGN(width * image->height, 32) >> 5;
@@ -144,11 +123,9 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
- if (RING_SPACE(chan, iter_len + 1)) {
- nouveau_fbcon_gpu_lockup(info);
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, iter_len + 1);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
OUT_RINGp(chan, data, iter_len);
@@ -157,22 +134,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
FIRE_RING(chan);
-}
-
-static int
-nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
+ return 0;
}
int
@@ -214,29 +176,31 @@ nv04_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
- 0x0062 : 0x0042, NvCtxSurf2D);
+ ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D,
+ dev_priv->card_type >= NV_10 ?
+ 0x0062 : 0x0042);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
+ ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
+ ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
+ ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
+ ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a);
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
- 0x009f : 0x005f, NvImageBlit);
+ ret = nouveau_gpuobj_gr_new(chan, NvImageBlit,
+ dev_priv->chipset >= 0x11 ?
+ 0x009f : 0x005f);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 708293b7ddc..f89d104698d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -28,6 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
+#include "nouveau_util.h"
#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
#define NV04_RAMFC__SIZE 32
@@ -128,6 +129,11 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
/* Setup initial state */
@@ -151,10 +157,31 @@ void
nv04_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ unsigned long flags;
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pfifo->reassign(dev, false);
+ /* Unload the context if it's the currently active one */
+ if (pfifo->channel_id(dev) == chan->id) {
+ pfifo->disable(dev);
+ pfifo->unload_context(dev);
+ pfifo->enable(dev);
+ }
+
+ /* Keep it from being rescheduled */
+ nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
+
+ pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the channel resources */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
nouveau_gpuobj_ref(NULL, &chan->ramfc);
}
@@ -208,7 +235,7 @@ nv04_fifo_unload_context(struct drm_device *dev)
if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
return 0;
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (!chan) {
NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
return -EINVAL;
@@ -267,6 +294,7 @@ nv04_fifo_init_ramxx(struct drm_device *dev)
static void
nv04_fifo_init_intr(struct drm_device *dev)
{
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
}
@@ -289,7 +317,7 @@ nv04_fifo_init(struct drm_device *dev)
pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->fifos[i]) {
+ if (dev_priv->channels.ptr[i]) {
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
}
@@ -298,3 +326,207 @@ nv04_fifo_init(struct drm_device *dev)
return 0;
}
+void
+nv04_fifo_fini(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x2140, 0x00000000);
+ nouveau_irq_unregister(dev, 8);
+}
+
+static bool
+nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ struct nouveau_gpuobj *obj;
+ unsigned long flags;
+ const int subc = (addr >> 13) & 0x7;
+ const int mthd = addr & 0x1ffc;
+ bool handled = false;
+ u32 engine;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
+ chan = dev_priv->channels.ptr[chid];
+ if (unlikely(!chan))
+ goto out;
+
+ switch (mthd) {
+ case 0x0000: /* bind object to subchannel */
+ obj = nouveau_ramht_find(chan, data);
+ if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
+ break;
+
+ chan->sw_subchannel[subc] = obj->class;
+ engine = 0x0000000f << (subc * 4);
+
+ nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
+ handled = true;
+ break;
+ default:
+ engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
+ if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
+ break;
+
+ if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
+ mthd, data))
+ handled = true;
+ break;
+ }
+
+out:
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return handled;
+}
+
+void
+nv04_fifo_isr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ uint32_t status, reassign;
+ int cnt = 0;
+
+ reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
+ while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+ uint32_t chid, get;
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+ chid = engine->fifo.channel_id(dev);
+ get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
+
+ if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+ uint32_t mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+ * wrapping on my G80 chips, but CACHE1 isn't big
+ * enough for this much data.. Tests show that it
+ * wraps around to the start at GET=0x800.. No clue
+ * as to why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (dev_priv->card_type < NV_40) {
+ mthd = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
+ NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
+ "Mthd 0x%04x Data 0x%08x\n",
+ chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+ data);
+ }
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+
+ status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ }
+
+ if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+ u32 dma_get = nv_rd32(dev, 0x003244);
+ u32 dma_put = nv_rd32(dev, 0x003240);
+ u32 push = nv_rd32(dev, 0x003220);
+ u32 state = nv_rd32(dev, 0x003228);
+
+ if (dev_priv->card_type == NV_50) {
+ u32 ho_get = nv_rd32(dev, 0x003328);
+ u32 ho_put = nv_rd32(dev, 0x003320);
+ u32 ib_get = nv_rd32(dev, 0x003334);
+ u32 ib_put = nv_rd32(dev, 0x003330);
+
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+ "State 0x%08x Push 0x%08x\n",
+ chid, ho_get, dma_get, ho_put,
+ dma_put, ib_get, ib_put, state,
+ push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nv_wr32(dev, 0x003364, 0x00000000);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nv_wr32(dev, 0x003244, dma_put);
+ nv_wr32(dev, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put) {
+ nv_wr32(dev, 0x003334, ib_put);
+ }
+ } else {
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+ "Put 0x%08x State 0x%08x Push 0x%08x\n",
+ chid, dma_get, dma_put, state, push);
+
+ if (dma_get != dma_put)
+ nv_wr32(dev, 0x003244, dma_put);
+ }
+
+ nv_wr32(dev, 0x003228, 0x00000000);
+ nv_wr32(dev, 0x003220, 0x00000001);
+ nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ }
+
+ if (status & NV_PFIFO_INTR_SEMAPHORE) {
+ uint32_t sem;
+
+ status &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_SEMAPHORE);
+
+ sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ if (dev_priv->card_type == NV_50) {
+ if (status & 0x00000010) {
+ nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+ status &= ~0x00000010;
+ nv_wr32(dev, 0x002100, 0x00000010);
+ }
+ }
+
+ if (status) {
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+ status, chid);
+ nv_wr32(dev, NV03_PFIFO_INTR_0, status);
+ status = 0;
+ }
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
+ }
+
+ if (status) {
+ NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
+ nv_wr32(dev, 0x2140, 0);
+ nv_wr32(dev, 0x140, 0);
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index c8973421b63..af75015068d 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -26,6 +26,11 @@
#include "drm.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_util.h"
+
+static int nv04_graph_register(struct drm_device *dev);
+static void nv04_graph_isr(struct drm_device *dev);
static uint32_t nv04_graph_ctx_regs[] = {
0x0040053c,
@@ -357,10 +362,10 @@ nv04_graph_channel(struct drm_device *dev)
if (chid >= dev_priv->engine.fifo.channels)
return NULL;
- return dev_priv->fifos[chid];
+ return dev_priv->channels.ptr[chid];
}
-void
+static void
nv04_graph_context_switch(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,7 +373,6 @@ nv04_graph_context_switch(struct drm_device *dev)
struct nouveau_channel *chan = NULL;
int chid;
- pgraph->fifo_access(dev, false);
nouveau_wait_for_idle(dev);
/* If previous context is valid, we need to save it */
@@ -376,11 +380,9 @@ nv04_graph_context_switch(struct drm_device *dev)
/* Load context for next channel */
chid = dev_priv->engine.fifo.channel_id(dev);
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (chan)
nv04_graph_load_context(chan);
-
- pgraph->fifo_access(dev, true);
}
static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
@@ -412,10 +414,25 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
void nv04_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+ /* Free the context resources */
kfree(pgraph_ctx);
chan->pgraph_ctx = NULL;
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
int nv04_graph_load_context(struct nouveau_channel *chan)
@@ -468,13 +485,19 @@ int nv04_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ int ret;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
+ ret = nv04_graph_register(dev);
+ if (ret)
+ return ret;
+
/* Enable PGRAPH interrupts */
+ nouveau_irq_register(dev, 12, nv04_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -510,6 +533,8 @@ int nv04_graph_init(struct drm_device *dev)
void nv04_graph_takedown(struct drm_device *dev)
{
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
}
void
@@ -524,13 +549,27 @@ nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
}
static int
-nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
atomic_set(&chan->fence.last_sequence_irq, data);
return 0;
}
+int
+nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_page_flip_state s;
+
+ if (!nouveau_finish_page_flip(chan, &s))
+ nv_set_crtc_base(dev, s.crtc,
+ s.offset + s.y * s.pitch + s.x * s.bpp / 8);
+
+ return 0;
+}
+
/*
* Software methods, why they are needed, and how they all work:
*
@@ -606,12 +645,12 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
*/
static void
-nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
{
struct drm_device *dev = chan->dev;
- uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
- uint32_t tmp;
+ u32 tmp;
tmp = nv_ri32(dev, instance);
tmp &= ~mask;
@@ -623,11 +662,11 @@ nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
}
static void
-nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
{
struct drm_device *dev = chan->dev;
- uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
- uint32_t tmp, ctx1;
+ u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ u32 tmp, ctx1;
int class, op, valid = 1;
ctx1 = nv_ri32(dev, instance);
@@ -672,13 +711,13 @@ nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t val
}
static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
if (data > 5)
return 1;
/* Old versions of the objects only accept first three operations. */
- if (data > 2 && grclass < 0x40)
+ if (data > 2 && class < 0x40)
return 1;
nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
/* changing operation changes set of objects needed for validation */
@@ -687,8 +726,8 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
uint32_t min = data & 0xffff, max;
uint32_t w = data >> 16;
@@ -706,8 +745,8 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
uint32_t min = data & 0xffff, max;
uint32_t w = data >> 16;
@@ -725,8 +764,8 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -742,8 +781,8 @@ nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -763,8 +802,8 @@ nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -778,8 +817,8 @@ nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -793,8 +832,8 @@ nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -808,8 +847,8 @@ nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -823,8 +862,8 @@ nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -838,8 +877,8 @@ nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -853,8 +892,8 @@ nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -868,8 +907,8 @@ nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -883,8 +922,8 @@ nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -898,8 +937,8 @@ nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -913,8 +952,8 @@ nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
}
static int
-nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
@@ -930,194 +969,346 @@ nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
return 1;
}
-static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
- { 0x0150, nv04_graph_mthd_set_ref },
- {}
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
- { 0x0184, nv04_graph_mthd_bind_nv01_patt },
- { 0x0188, nv04_graph_mthd_bind_rop },
- { 0x018c, nv04_graph_mthd_bind_beta1 },
- { 0x0190, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_nv01_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x019c, nv04_graph_mthd_bind_surf_src },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_nv04_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_beta4 },
- { 0x019c, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
- { 0x0188, nv04_graph_mthd_bind_chroma },
- { 0x018c, nv04_graph_mthd_bind_clip },
- { 0x0190, nv04_graph_mthd_bind_nv04_patt },
- { 0x0194, nv04_graph_mthd_bind_rop },
- { 0x0198, nv04_graph_mthd_bind_beta1 },
- { 0x019c, nv04_graph_mthd_bind_beta4 },
- { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
- { 0x03e4, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_nv01_patt },
- { 0x0190, nv04_graph_mthd_bind_rop },
- { 0x0194, nv04_graph_mthd_bind_beta1 },
- { 0x0198, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_nv01_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
- { 0x0184, nv04_graph_mthd_bind_chroma },
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
- { 0x0188, nv04_graph_mthd_bind_nv01_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x0304, nv04_graph_mthd_set_operation },
- {},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
- { 0x0304, nv04_graph_mthd_set_operation },
- {},
-};
+static int
+nv04_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
- { 0x0184, nv04_graph_mthd_bind_clip },
- { 0x0188, nv04_graph_mthd_bind_nv01_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_surf_dst },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
-};
+ if (dev_priv->engine.graph.registered)
+ return 0;
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
- { 0x0184, nv04_graph_mthd_bind_clip },
- { 0x0188, nv04_graph_mthd_bind_nv04_patt },
- { 0x018c, nv04_graph_mthd_bind_rop },
- { 0x0190, nv04_graph_mthd_bind_beta1 },
- { 0x0194, nv04_graph_mthd_bind_beta4 },
- { 0x0198, nv04_graph_mthd_bind_surf2d },
- { 0x02fc, nv04_graph_mthd_set_operation },
- {},
+ /* dvd subpicture */
+ NVOBJ_CLASS(dev, 0x0038, GR);
+
+ /* m2mf */
+ NVOBJ_CLASS(dev, 0x0039, GR);
+
+ /* nv03 gdirect */
+ NVOBJ_CLASS(dev, 0x004b, GR);
+ NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 gdirect */
+ NVOBJ_CLASS(dev, 0x004a, GR);
+ NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 imageblit */
+ NVOBJ_CLASS(dev, 0x001f, GR);
+ NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
+ NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 imageblit */
+ NVOBJ_CLASS(dev, 0x005f, GR);
+ NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 iifc */
+ NVOBJ_CLASS(dev, 0x0060, GR);
+ NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
+ NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
+
+ /* nv05 iifc */
+ NVOBJ_CLASS(dev, 0x0064, GR);
+
+ /* nv01 ifc */
+ NVOBJ_CLASS(dev, 0x0021, GR);
+ NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 ifc */
+ NVOBJ_CLASS(dev, 0x0061, GR);
+ NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv05 ifc */
+ NVOBJ_CLASS(dev, 0x0065, GR);
+
+ /* nv03 sifc */
+ NVOBJ_CLASS(dev, 0x0036, GR);
+ NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 sifc */
+ NVOBJ_CLASS(dev, 0x0076, GR);
+ NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv05 sifc */
+ NVOBJ_CLASS(dev, 0x0066, GR);
+
+ /* nv03 sifm */
+ NVOBJ_CLASS(dev, 0x0037, GR);
+ NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
+
+ /* nv04 sifm */
+ NVOBJ_CLASS(dev, 0x0077, GR);
+ NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
+ NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
+
+ /* null */
+ NVOBJ_CLASS(dev, 0x0030, GR);
+
+ /* surf2d */
+ NVOBJ_CLASS(dev, 0x0042, GR);
+
+ /* rop */
+ NVOBJ_CLASS(dev, 0x0043, GR);
+
+ /* beta1 */
+ NVOBJ_CLASS(dev, 0x0012, GR);
+
+ /* beta4 */
+ NVOBJ_CLASS(dev, 0x0072, GR);
+
+ /* cliprect */
+ NVOBJ_CLASS(dev, 0x0019, GR);
+
+ /* nv01 pattern */
+ NVOBJ_CLASS(dev, 0x0018, GR);
+
+ /* nv04 pattern */
+ NVOBJ_CLASS(dev, 0x0044, GR);
+
+ /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0052, GR);
+
+ /* surf3d */
+ NVOBJ_CLASS(dev, 0x0053, GR);
+ NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
+ NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
+
+ /* nv03 tex_tri */
+ NVOBJ_CLASS(dev, 0x0048, GR);
+ NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
+ NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
+
+ /* tex_tri */
+ NVOBJ_CLASS(dev, 0x0054, GR);
+
+ /* multitex_tri */
+ NVOBJ_CLASS(dev, 0x0055, GR);
+
+ /* nv01 chroma */
+ NVOBJ_CLASS(dev, 0x0017, GR);
+
+ /* nv04 chroma */
+ NVOBJ_CLASS(dev, 0x0057, GR);
+
+ /* surf_dst */
+ NVOBJ_CLASS(dev, 0x0058, GR);
+
+ /* surf_src */
+ NVOBJ_CLASS(dev, 0x0059, GR);
+
+ /* surf_color */
+ NVOBJ_CLASS(dev, 0x005a, GR);
+
+ /* surf_zeta */
+ NVOBJ_CLASS(dev, 0x005b, GR);
+
+ /* nv01 line */
+ NVOBJ_CLASS(dev, 0x001c, GR);
+ NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 line */
+ NVOBJ_CLASS(dev, 0x005c, GR);
+ NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 tri */
+ NVOBJ_CLASS(dev, 0x001d, GR);
+ NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 tri */
+ NVOBJ_CLASS(dev, 0x005d, GR);
+ NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 rect */
+ NVOBJ_CLASS(dev, 0x001e, GR);
+ NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 rect */
+ NVOBJ_CLASS(dev, 0x005e, GR);
+ NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
};
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
- { 0x0188, nv04_graph_mthd_bind_clip },
- { 0x018c, nv04_graph_mthd_bind_surf_color },
- { 0x0190, nv04_graph_mthd_bind_surf_zeta },
- {},
+static struct nouveau_bitfield nv04_graph_intr[] = {
+ { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+ {}
};
-static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
- { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
- { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
- {},
+static struct nouveau_bitfield nv04_graph_nstatus[] =
+{
+ { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
+ {}
};
-struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
- { 0x0038, false, NULL }, /* dvd subpicture */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
- { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
- { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
- { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
- { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
- { 0x0064, false, NULL }, /* nv05 iifc */
- { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
- { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
- { 0x0065, false, NULL }, /* nv05 ifc */
- { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
- { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
- { 0x0066, false, NULL }, /* nv05 sifc */
- { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
- { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
- { 0x0030, false, NULL }, /* null */
- { 0x0042, false, NULL }, /* surf2d */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0018, false, NULL }, /* nv01 pattern */
- { 0x0044, false, NULL }, /* nv04 pattern */
- { 0x0052, false, NULL }, /* swzsurf */
- { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
- { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
- { 0x0054, false, NULL }, /* tex_tri */
- { 0x0055, false, NULL }, /* multitex_tri */
- { 0x0017, false, NULL }, /* nv01 chroma */
- { 0x0057, false, NULL }, /* nv04 chroma */
- { 0x0058, false, NULL }, /* surf_dst */
- { 0x0059, false, NULL }, /* surf_src */
- { 0x005a, false, NULL }, /* surf_color */
- { 0x005b, false, NULL }, /* surf_zeta */
- { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
- { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
- { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
- { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
- { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
- { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
- { 0x506e, true, nv04_graph_mthds_sw },
+struct nouveau_bitfield nv04_graph_nsource[] =
+{
+ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
+ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
+ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
+ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
+ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
+ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
+ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
+ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
+ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
{}
};
+static void
+nv04_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x0f000000) >> 24;
+ u32 subc = (addr & 0x0000e000) >> 13;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_NOTIFY) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_NOTIFY;
+ }
+ }
+
+ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv04_graph_context_switch(dev);
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv04_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 0b5ae297abd..b8e3edb5c06 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -98,42 +98,66 @@ nv04_instmem_takedown(struct drm_device *dev)
}
int
-nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
- uint32_t *sz)
+nv04_instmem_suspend(struct drm_device *dev)
{
return 0;
}
void
-nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-}
-
-int
-nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv04_instmem_resume(struct drm_device *dev)
{
- return 0;
}
int
-nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct drm_mm_node *ramin = NULL;
+
+ do {
+ if (drm_mm_pre_get(&dev_priv->ramin_heap))
+ return -ENOMEM;
+
+ spin_lock(&dev_priv->ramin_lock);
+ ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
+ if (ramin == NULL) {
+ spin_unlock(&dev_priv->ramin_lock);
+ return -ENOMEM;
+ }
+
+ ramin = drm_mm_get_block_atomic(ramin, size, align);
+ spin_unlock(&dev_priv->ramin_lock);
+ } while (ramin == NULL);
+
+ gpuobj->node = ramin;
+ gpuobj->vinst = ramin->start;
return 0;
}
void
-nv04_instmem_flush(struct drm_device *dev)
+nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+
+ spin_lock(&dev_priv->ramin_lock);
+ drm_mm_put_block(gpuobj->node);
+ gpuobj->node = NULL;
+ spin_unlock(&dev_priv->ramin_lock);
}
int
-nv04_instmem_suspend(struct drm_device *dev)
+nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
{
+ gpuobj->pinst = gpuobj->vinst;
return 0;
}
void
-nv04_instmem_resume(struct drm_device *dev)
+nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
{
}
+void
+nv04_instmem_flush(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index cc5cda44e50..f78181a59b4 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,23 +3,109 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+static struct drm_mm_node *
+nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct drm_mm_node *mem;
+ int ret;
+
+ ret = drm_mm_pre_get(&pfb->tag_heap);
+ if (ret)
+ return NULL;
+
+ spin_lock(&dev_priv->tile.lock);
+ mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
+ if (mem)
+ mem = drm_mm_get_block_atomic(mem, size, 0);
+ spin_unlock(&dev_priv->tile.lock);
+
+ return mem;
+}
+
+static void
+nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ spin_lock(&dev_priv->tile.lock);
+ drm_mm_put_block(mem);
+ spin_unlock(&dev_priv->tile.lock);
+}
+
+void
+nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
+
+ tile->addr = addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+
+ if (dev_priv->card_type == NV_20) {
+ if (flags & NOUVEAU_GEM_TILE_ZETA) {
+ /*
+ * Allocate some of the on-die tag memory,
+ * used to store Z compression meta-data (most
+ * likely just a bitmap determining if a given
+ * tile is compressed or not).
+ */
+ tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
+
+ if (tile->tag_mem) {
+ /* Enable Z compression */
+ if (dev_priv->chipset >= 0x25)
+ tile->zcomp = tile->tag_mem->start |
+ (bpp == 16 ?
+ NV25_PFB_ZCOMP_MODE_16 :
+ NV25_PFB_ZCOMP_MODE_32);
+ else
+ tile->zcomp = tile->tag_mem->start |
+ NV20_PFB_ZCOMP_EN |
+ (bpp == 16 ? 0 :
+ NV20_PFB_ZCOMP_MODE_32);
+ }
+
+ tile->addr |= 3;
+ } else {
+ tile->addr |= 1;
+ }
+
+ } else {
+ tile->addr |= 1 << 31;
+ }
+}
+
void
-nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv10_fb_free_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t limit = max(1u, addr + size) - 1;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- if (pitch) {
- if (dev_priv->card_type >= NV_20)
- addr |= 1;
- else
- addr |= 1 << 31;
+ if (tile->tag_mem) {
+ nv20_fb_free_tag(dev, tile->tag_mem);
+ tile->tag_mem = NULL;
}
- nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
- nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
- nv_wr32(dev, NV10_PFB_TILE(i), addr);
+ tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
+}
+
+void
+nv10_fb_set_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
+
+ if (dev_priv->card_type == NV_20)
+ nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
}
int
@@ -31,9 +117,14 @@ nv10_fb_init(struct drm_device *dev)
pfb->num_tiles = NV10_PFB_TILE__SIZE;
+ if (dev_priv->card_type == NV_20)
+ drm_mm_init(&pfb->tag_heap, 0,
+ (dev_priv->chipset >= 0x25 ?
+ 64 * 1024 : 32 * 1024));
+
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_region_tiling(dev, i, 0, 0, 0);
+ pfb->set_tile_region(dev, i);
return 0;
}
@@ -41,4 +132,13 @@ nv10_fb_init(struct drm_device *dev)
void
nv10_fb_takedown(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i;
+
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->free_tile_region(dev, i);
+
+ if (dev_priv->card_type == NV_20)
+ drm_mm_takedown(&pfb->tag_heap);
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index f1b03ad58fd..d2ecbff4bee 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -53,6 +53,11 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
@@ -73,17 +78,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
return 0;
}
-void
-nv10_fifo_destroy_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
-
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
static void
nv10_fifo_do_load_context(struct drm_device *dev, int chid)
{
@@ -219,6 +213,7 @@ nv10_fifo_init_ramxx(struct drm_device *dev)
static void
nv10_fifo_init_intr(struct drm_device *dev)
{
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
}
@@ -241,7 +236,7 @@ nv10_fifo_init(struct drm_device *dev)
pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->fifos[i]) {
+ if (dev_priv->channels.ptr[i]) {
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 8e68c973115..8c92edb7bbc 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -26,6 +26,10 @@
#include "drm.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
+#include "nouveau_util.h"
+
+static int nv10_graph_register(struct drm_device *);
+static void nv10_graph_isr(struct drm_device *);
#define NV10_FIFO_NUMBER 32
@@ -786,15 +790,13 @@ nv10_graph_unload_context(struct drm_device *dev)
return 0;
}
-void
+static void
nv10_graph_context_switch(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_channel *chan = NULL;
int chid;
- pgraph->fifo_access(dev, false);
nouveau_wait_for_idle(dev);
/* If previous context is valid, we need to save it */
@@ -802,11 +804,9 @@ nv10_graph_context_switch(struct drm_device *dev)
/* Load context for next channel */
chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (chan && chan->pgraph_ctx)
nv10_graph_load_context(chan);
-
- pgraph->fifo_access(dev, true);
}
#define NV_WRITE_CTX(reg, val) do { \
@@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev)
if (chid >= dev_priv->engine.fifo.channels)
return NULL;
- return dev_priv->fifos[chid];
+ return dev_priv->channels.ptr[chid];
}
int nv10_graph_create_context(struct nouveau_channel *chan)
@@ -875,37 +875,54 @@ int nv10_graph_create_context(struct nouveau_channel *chan)
void nv10_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+ /* Free the context resources */
kfree(pgraph_ctx);
chan->pgraph_ctx = NULL;
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
void
-nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv10_graph_set_tile_region(struct drm_device *dev, int i)
{
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1 << 31;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
}
int nv10_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
- int i;
+ int ret, i;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
+ ret = nv10_graph_register(dev);
+ if (ret)
+ return ret;
+
+ nouveau_irq_register(dev, 12, nv10_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -928,7 +945,7 @@ int nv10_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv10_graph_set_tile_region(dev, i);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
@@ -948,17 +965,17 @@ int nv10_graph_init(struct drm_device *dev)
void nv10_graph_takedown(struct drm_device *dev)
{
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
}
static int
-nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
struct graph_state *ctx = chan->pgraph_ctx;
struct pipe_state *pipe = &ctx->pipe_state;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
uint32_t xfmode0, xfmode1;
int i;
@@ -1025,18 +1042,14 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
nouveau_wait_for_idle(dev);
- pgraph->fifo_access(dev, true);
-
return 0;
}
static int
-nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
nouveau_wait_for_idle(dev);
@@ -1045,40 +1058,118 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
nv_wr32(dev, 0x004006b0,
nv_rd32(dev, 0x004006b0) | 0x8 << 24);
- pgraph->fifo_access(dev, true);
+ return 0;
+}
+
+static int
+nv10_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
+ NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
+ NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
+
+ /* celcius */
+ if (dev_priv->chipset <= 0x10) {
+ NVOBJ_CLASS(dev, 0x0056, GR);
+ } else
+ if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
+ NVOBJ_CLASS(dev, 0x0096, GR);
+ } else {
+ NVOBJ_CLASS(dev, 0x0099, GR);
+ NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
+ }
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
return 0;
}
-static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
- { 0x1638, nv17_graph_mthd_lma_window },
- { 0x163c, nv17_graph_mthd_lma_window },
- { 0x1640, nv17_graph_mthd_lma_window },
- { 0x1644, nv17_graph_mthd_lma_window },
- { 0x1658, nv17_graph_mthd_lma_enable },
+struct nouveau_bitfield nv10_graph_intr[] = {
+ { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+ { NV_PGRAPH_INTR_ERROR, "ERROR" },
{}
};
-struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x005f, false, NULL }, /* imageblit */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x0089, false, NULL }, /* sifm */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x0052, false, NULL }, /* swzsurf */
- { 0x0093, false, NULL }, /* surf3d */
- { 0x0094, false, NULL }, /* tex_tri */
- { 0x0095, false, NULL }, /* multitex_tri */
- { 0x0056, false, NULL }, /* celcius (nv10) */
- { 0x0096, false, NULL }, /* celcius (nv11) */
- { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
+struct nouveau_bitfield nv10_graph_nstatus[] =
+{
+ { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
{}
};
+
+static void
+nv10_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x01f00000) >> 20;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ }
+ }
+
+ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv10_graph_context_switch(dev);
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 12ab9cd56ec..8464b76798d 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -32,6 +32,10 @@
#define NV34_GRCTX_SIZE (18140)
#define NV35_36_GRCTX_SIZE (22396)
+static int nv20_graph_register(struct drm_device *);
+static int nv30_graph_register(struct drm_device *);
+static void nv20_graph_isr(struct drm_device *);
+
static void
nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
@@ -425,9 +429,21 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ unsigned long flags;
- nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the context resources */
nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
+ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
}
int
@@ -496,24 +512,27 @@ nv20_graph_rdi(struct drm_device *dev)
}
void
-nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv20_graph_set_tile_region(struct drm_device *dev, int i)
{
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
+
+ if (dev_priv->card_type == NV_20) {
+ nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+ }
}
int
@@ -560,6 +579,13 @@ nv20_graph_init(struct drm_device *dev)
nv20_graph_rdi(dev);
+ ret = nv20_graph_register(dev);
+ if (ret) {
+ nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+ return ret;
+ }
+
+ nouveau_irq_register(dev, 12, nv20_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -571,16 +597,17 @@ nv20_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x40009C , 0x00000040);
if (dev_priv->chipset >= 0x25) {
- nv_wr32(dev, 0x400890, 0x00080000);
+ nv_wr32(dev, 0x400890, 0x00a8cfff);
nv_wr32(dev, 0x400610, 0x304B1FB6);
- nv_wr32(dev, 0x400B80, 0x18B82880);
+ nv_wr32(dev, 0x400B80, 0x1cbd3883);
nv_wr32(dev, 0x400B84, 0x44000000);
nv_wr32(dev, 0x400098, 0x40000080);
nv_wr32(dev, 0x400B88, 0x000000ff);
+
} else {
- nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
+ nv_wr32(dev, 0x400880, 0x0008c7df);
nv_wr32(dev, 0x400094, 0x00000005);
- nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
+ nv_wr32(dev, 0x400B80, 0x45eae20e);
nv_wr32(dev, 0x400B84, 0x24000000);
nv_wr32(dev, 0x400098, 0x00000040);
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
@@ -591,14 +618,8 @@ nv20_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv20_graph_set_tile_region(dev, i);
- for (i = 0; i < 8; i++) {
- nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, 0x100300 + i * 4));
- }
nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
@@ -642,6 +663,9 @@ nv20_graph_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
+
nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
}
@@ -684,9 +708,16 @@ nv30_graph_init(struct drm_device *dev)
return ret;
}
+ ret = nv30_graph_register(dev);
+ if (ret) {
+ nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+ return ret;
+ }
+
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
pgraph->ctx_table->pinst >> 4);
+ nouveau_irq_register(dev, 12, nv20_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -724,7 +755,7 @@ nv30_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv20_graph_set_tile_region(dev, i);
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
@@ -744,46 +775,125 @@ nv30_graph_init(struct drm_device *dev)
return 0;
}
-struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x0089, false, NULL }, /* sifm */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x009e, false, NULL }, /* swzsurf */
- { 0x0096, false, NULL }, /* celcius */
- { 0x0097, false, NULL }, /* kelvin (nv20) */
- { 0x0597, false, NULL }, /* kelvin (nv25) */
- {}
-};
-
-struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x038a, false, NULL }, /* ifc (nv30) */
- { 0x0089, false, NULL }, /* sifm */
- { 0x0389, false, NULL }, /* sifm (nv30) */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x0362, false, NULL }, /* surf2d (nv30) */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x039e, false, NULL }, /* swzsurf */
- { 0x0397, false, NULL }, /* rankine (nv30) */
- { 0x0497, false, NULL }, /* rankine (nv35) */
- { 0x0697, false, NULL }, /* rankine (nv34) */
- {}
-};
+static int
+nv20_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
+
+ /* kelvin */
+ if (dev_priv->chipset < 0x25)
+ NVOBJ_CLASS(dev, 0x0097, GR);
+ else
+ NVOBJ_CLASS(dev, 0x0597, GR);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
+
+static int
+nv30_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
+
+ /* rankine */
+ if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0397, GR);
+ else
+ if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0697, GR);
+ else
+ if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0497, GR);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
+
+static void
+nv20_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x01f00000) >> 20;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ }
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
index 4a3f2f09512..e0135f0e214 100644
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ b/drivers/gpu/drm/nouveau/nv30_fb.c
@@ -29,6 +29,27 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+void
+nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = addr | 1;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+void
+nv30_fb_free_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = tile->limit = tile->pitch = 0;
+}
+
static int
calc_bias(struct drm_device *dev, int k, int i, int j)
{
@@ -65,7 +86,7 @@ nv30_fb_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_region_tiling(dev, i, 0, 0, 0);
+ pfb->set_tile_region(dev, i);
/* Init the memory timing regs at 0x10037c/0x1003ac */
if (dev_priv->chipset == 0x30 ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index 3cd07d8d5bd..f3d9c0505f7 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -4,26 +4,22 @@
#include "nouveau_drm.h"
void
-nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv40_fb_set_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
switch (dev_priv->chipset) {
case 0x40:
- nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
- nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
- nv_wr32(dev, NV10_PFB_TILE(i), addr);
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
break;
default:
- nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
- nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
- nv_wr32(dev, NV40_PFB_TILE(i), addr);
+ nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
break;
}
}
@@ -64,7 +60,7 @@ nv40_fb_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_region_tiling(dev, i, 0, 0, 0);
+ pfb->set_tile_region(dev, i);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index d337b8b28cd..49b9a35a9cd 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -47,6 +47,11 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV40_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wi32(dev, fc + 0, chan->pushbuf_base);
@@ -59,7 +64,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0x30000000 /* no idea.. */);
- nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
nv_wi32(dev, fc + 60, 0x0001FFFF);
/* enable the fifo dma operation */
@@ -70,17 +74,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
return 0;
}
-void
-nv40_fifo_destroy_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
-
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
static void
nv40_fifo_do_load_context(struct drm_device *dev, int chid)
{
@@ -279,6 +272,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
static void
nv40_fifo_init_intr(struct drm_device *dev)
{
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
}
@@ -301,7 +295,7 @@ nv40_fifo_init(struct drm_device *dev)
pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->fifos[i]) {
+ if (dev_priv->channels.ptr[i]) {
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 7ee1b91569b..19ef92a0375 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -29,6 +29,9 @@
#include "nouveau_drv.h"
#include "nouveau_grctx.h"
+static int nv40_graph_register(struct drm_device *);
+static void nv40_graph_isr(struct drm_device *);
+
struct nouveau_channel *
nv40_graph_channel(struct drm_device *dev)
{
@@ -42,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev)
inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && chan->ramin_grctx &&
chan->ramin_grctx->pinst == inst)
@@ -59,6 +62,7 @@ nv40_graph_create_context(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_grctx ctx = {};
+ unsigned long flags;
int ret;
ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
@@ -73,12 +77,39 @@ nv40_graph_create_context(struct nouveau_channel *chan)
nv40_grctx_init(&ctx);
nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
+
+ /* init grctx pointer in ramfc, and on PFIFO if channel is
+ * already active there
+ */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4);
+ nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
+ if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
+ nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4);
+ nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
void
nv40_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the context resources */
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
}
@@ -174,43 +205,39 @@ nv40_graph_unload_context(struct drm_device *dev)
}
void
-nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch)
+nv40_graph_set_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t limit = max(1u, addr + size) - 1;
-
- if (pitch)
- addr |= 1;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
switch (dev_priv->chipset) {
case 0x44:
case 0x4a:
case 0x4e:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
break;
case 0x46:
case 0x47:
case 0x49:
case 0x4b:
- nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+ nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
break;
default:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
break;
}
}
@@ -232,7 +259,7 @@ nv40_graph_init(struct drm_device *dev)
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_grctx ctx = {};
uint32_t vramsz, *cp;
- int i, j;
+ int ret, i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
@@ -256,9 +283,14 @@ nv40_graph_init(struct drm_device *dev)
kfree(cp);
+ ret = nv40_graph_register(dev);
+ if (ret)
+ return ret;
+
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+ nouveau_irq_register(dev, 12, nv40_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -347,7 +379,7 @@ nv40_graph_init(struct drm_device *dev)
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
- nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
+ nv40_graph_set_tile_region(dev, i);
/* begin RAM config */
vramsz = pci_resource_len(dev->pdev, 0) - 1;
@@ -390,26 +422,111 @@ nv40_graph_init(struct drm_device *dev)
void nv40_graph_takedown(struct drm_device *dev)
{
+ nouveau_irq_unregister(dev, 12);
}
-struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
- { 0x0030, false, NULL }, /* null */
- { 0x0039, false, NULL }, /* m2mf */
- { 0x004a, false, NULL }, /* gdirect */
- { 0x009f, false, NULL }, /* imageblit (nv12) */
- { 0x008a, false, NULL }, /* ifc */
- { 0x0089, false, NULL }, /* sifm */
- { 0x3089, false, NULL }, /* sifm (nv40) */
- { 0x0062, false, NULL }, /* surf2d */
- { 0x3062, false, NULL }, /* surf2d (nv40) */
- { 0x0043, false, NULL }, /* rop */
- { 0x0012, false, NULL }, /* beta1 */
- { 0x0072, false, NULL }, /* beta4 */
- { 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
- { 0x309e, false, NULL }, /* swzsurf */
- { 0x4097, false, NULL }, /* curie (nv40) */
- { 0x4497, false, NULL }, /* curie (nv44) */
- {}
-};
+static int
+nv40_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
+
+ /* curie */
+ if (dev_priv->chipset >= 0x60 ||
+ 0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x4497, GR);
+ else
+ NVOBJ_CLASS(dev, 0x4097, GR);
+
+ /* nvsw */
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
+
+static int
+nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin_grctx)
+ continue;
+
+ if (inst == chan->ramin_grctx->pinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nv40_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
+ u32 chid = nv40_graph_isr_chid(dev, inst);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ } else
+ if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+ nv_mask(dev, 0x402000, 0, 0);
+ }
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 56476d0c6de..9023c4dbb44 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -115,15 +115,16 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
OUT_RING(evo, 0);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
if (dev_priv->chipset != 0x50)
- if (nv_crtc->fb.tile_flags == 0x7a00)
+ if (nv_crtc->fb.tile_flags == 0x7a00 ||
+ nv_crtc->fb.tile_flags == 0xfe00)
OUT_RING(evo, NvEvoFB32);
else
if (nv_crtc->fb.tile_flags == 0x7000)
OUT_RING(evo, NvEvoFB16);
else
- OUT_RING(evo, NvEvoVRAM);
+ OUT_RING(evo, NvEvoVRAM_LP);
else
- OUT_RING(evo, NvEvoVRAM);
+ OUT_RING(evo, NvEvoVRAM_LP);
}
nv_crtc->fb.blanked = blanked;
@@ -345,7 +346,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t buffer_handle, uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cursor = NULL;
struct drm_gem_object *gem;
@@ -374,8 +374,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nouveau_bo_unmap(cursor);
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
- dev_priv->vm_vram_base);
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
nv_crtc->cursor.show(nv_crtc, true);
out:
@@ -437,6 +436,7 @@ static const struct drm_crtc_funcs nv50_crtc_funcs = {
.cursor_move = nv50_crtc_cursor_move,
.gamma_set = nv50_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
+ .page_flip = nouveau_crtc_page_flip,
.destroy = nv50_crtc_destroy,
};
@@ -453,6 +453,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ drm_vblank_pre_modeset(dev, nv_crtc->index);
nv50_crtc_blank(nv_crtc, true);
}
@@ -468,6 +469,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
nv50_crtc_blank(nv_crtc, false);
+ drm_vblank_post_modeset(dev, nv_crtc->index);
ret = RING_SPACE(evo, 2);
if (ret) {
@@ -545,7 +547,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
+ nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
@@ -554,13 +556,14 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
- if (nv_crtc->fb.tile_flags == 0x7a00)
+ if (nv_crtc->fb.tile_flags == 0x7a00 ||
+ nv_crtc->fb.tile_flags == 0xfe00)
OUT_RING(evo, NvEvoFB32);
else
if (nv_crtc->fb.tile_flags == 0x7000)
OUT_RING(evo, NvEvoFB16);
else
- OUT_RING(evo, NvEvoVRAM);
+ OUT_RING(evo, NvEvoVRAM_LP);
}
ret = RING_SPACE(evo, 12);
@@ -574,8 +577,10 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
if (!nv_crtc->fb.tile_flags) {
OUT_RING(evo, drm_fb->pitch | (1 << 20));
} else {
- OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
- fb->nvbo->tile_mode);
+ u32 tile_mode = fb->nvbo->tile_mode;
+ if (dev_priv->card_type >= NV_C0)
+ tile_mode >>= 4;
+ OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
}
if (dev_priv->chipset == 0x50)
OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f624c611dde..7cc94ed9ed9 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -33,6 +33,8 @@
#include "nouveau_ramht.h"
#include "drm_crtc_helper.h"
+static void nv50_display_isr(struct drm_device *);
+
static inline int
nv50_sor_nr(struct drm_device *dev)
{
@@ -46,159 +48,6 @@ nv50_sor_nr(struct drm_device *dev)
return 4;
}
-static void
-nv50_evo_channel_del(struct nouveau_channel **pchan)
-{
- struct nouveau_channel *chan = *pchan;
-
- if (!chan)
- return;
- *pchan = NULL;
-
- nouveau_gpuobj_channel_takedown(chan);
- nouveau_bo_unmap(chan->pushbuf_bo);
- nouveau_bo_ref(NULL, &chan->pushbuf_bo);
-
- if (chan->user)
- iounmap(chan->user);
-
- kfree(chan);
-}
-
-static int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
- uint32_t tile_flags, uint32_t magic_flags,
- uint32_t offset, uint32_t limit)
-{
- struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
- struct drm_device *dev = evo->dev;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
- if (ret)
- return ret;
- obj->engine = NVOBJ_ENGINE_DISPLAY;
-
- nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
- nv_wo32(obj, 4, limit);
- nv_wo32(obj, 8, offset);
- nv_wo32(obj, 12, 0x00000000);
- nv_wo32(obj, 16, 0x00000000);
- if (dev_priv->card_type < NV_C0)
- nv_wo32(obj, 20, 0x00010000);
- else
- nv_wo32(obj, 20, 0x00020000);
- dev_priv->engine.instmem.flush(dev);
-
- ret = nouveau_ramht_insert(evo, name, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret) {
- return ret;
- }
-
- return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramht = NULL;
- struct nouveau_channel *chan;
- int ret;
-
- chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
- *pchan = chan;
-
- chan->id = -1;
- chan->dev = dev;
- chan->user_get = 4;
- chan->user_put = 0;
-
- ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
- if (ret) {
- NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
- if (ret) {
- NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
- if (ret) {
- NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
- nouveau_gpuobj_ref(NULL, &ramht);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- if (dev_priv->chipset != 0x50) {
- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
- 0, 0xffffffff);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
-
- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
- 0, 0xffffffff);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
- }
-
- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, dev_priv->vram_size);
- if (ret) {
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
- false, true, &chan->pushbuf_bo);
- if (ret == 0)
- ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- ret = nouveau_bo_map(chan->pushbuf_bo);
- if (ret) {
- NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
- nv50_evo_channel_del(pchan);
- return ret;
- }
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_PDISPLAY_USER(0), PAGE_SIZE);
- if (!chan->user) {
- NV_ERROR(dev, "Error mapping EVO control regs.\n");
- nv50_evo_channel_del(pchan);
- return -ENOMEM;
- }
-
- return 0;
-}
-
int
nv50_display_early_init(struct drm_device *dev)
{
@@ -214,17 +63,16 @@ int
nv50_display_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nouveau_channel *evo = dev_priv->evo;
struct drm_connector *connector;
- uint32_t val, ram_amount;
- uint64_t start;
+ struct nouveau_channel *evo;
int ret, i;
+ u32 val;
NV_DEBUG_KMS(dev, "\n");
nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
+
/*
* I think the 0x006101XX range is some kind of main control area
* that enables things.
@@ -240,16 +88,19 @@ nv50_display_init(struct drm_device *dev)
val = nv_rd32(dev, 0x0061610c + (i * 0x800));
nv_wr32(dev, 0x0061019c + (i * 0x10), val);
}
+
/* DAC */
for (i = 0; i < 3; i++) {
val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
}
+
/* SOR */
for (i = 0; i < nv50_sor_nr(dev); i++) {
val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
}
+
/* EXT */
for (i = 0; i < 3; i++) {
val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
@@ -262,17 +113,6 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
}
- /* This used to be in crtc unblank, but seems out of place there. */
- nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
- /* RAM is clamped to 256 MiB. */
- ram_amount = dev_priv->vram_size;
- NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
- if (ram_amount > 256*1024*1024)
- ram_amount = 256*1024*1024;
- nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
- nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
- nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
-
/* The precise purpose is unknown, i suspect it has something to do
* with text mode.
*/
@@ -287,37 +127,6 @@ nv50_display_init(struct drm_device *dev)
}
}
- /* taken from nv bug #12637, attempts to un-wedge the hw if it's
- * stuck in some unspecified state
- */
- start = ptimer->read(dev);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
- while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
- if ((val & 0x9f0000) == 0x20000)
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- val | 0x800000);
-
- if ((val & 0x3f0000) == 0x30000)
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- val | 0x200000);
-
- if (ptimer->read(dev) - start > 1000000000ULL) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
- return -EBUSY;
- }
- }
-
- nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
- if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- 0x40000000, 0x40000000)) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n",
- nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
- return -EBUSY;
- }
-
for (i = 0; i < 2; i++) {
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
@@ -341,39 +150,31 @@ nv50_display_init(struct drm_device *dev)
}
}
- nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
+ nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
+ nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
+ nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1,
+ NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
+ NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
+ NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
+
+ /* enable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
- /* initialise fifo */
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
- ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
- NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
- NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
- if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
- return -EBUSY;
+ if (conn->dcb->gpio_tag == 0xff)
+ continue;
+
+ pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
}
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
- (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
- NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
- nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
- NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
- nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
-
- evo->dma.max = (4096/4) - 2;
- evo->dma.put = 0;
- evo->dma.cur = evo->dma.put;
- evo->dma.free = evo->dma.max - evo->dma.cur;
-
- ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+
+ ret = nv50_evo_init(dev);
if (ret)
return ret;
+ evo = dev_priv->evo;
- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- OUT_RING(evo, 0);
+ nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
ret = RING_SPACE(evo, 11);
if (ret)
@@ -393,21 +194,6 @@ nv50_display_init(struct drm_device *dev)
if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
NV_ERROR(dev, "evo pushbuf stalled\n");
- /* enable clock change interrupts. */
- nv_wr32(dev, 0x610028, 0x00010001);
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
- NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
- NV50_PDISPLAY_INTR_EN_CLK_UNK40));
-
- /* enable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct nouveau_connector *conn = nouveau_connector(connector);
-
- if (conn->dcb->gpio_tag == 0xff)
- continue;
-
- pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
- }
return 0;
}
@@ -452,13 +238,7 @@ static int nv50_display_disable(struct drm_device *dev)
}
}
- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
- nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
- if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
- NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
- NV_ERROR(dev, "0x610200 = 0x%08x\n",
- nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
- }
+ nv50_evo_fini(dev);
for (i = 0; i < 3; i++) {
if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
@@ -470,7 +250,7 @@ static int nv50_display_disable(struct drm_device *dev)
}
/* disable interrupts. */
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
/* disable hotplug interrupts */
nv_wr32(dev, 0xe054, 0xffffffff);
@@ -508,13 +288,6 @@ int nv50_display_create(struct drm_device *dev)
dev->mode_config.fb_base = dev_priv->fb_phys;
- /* Create EVO channel */
- ret = nv50_evo_channel_new(dev, &dev_priv->evo);
- if (ret) {
- NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
- return ret;
- }
-
/* Create CRTC objects */
for (i = 0; i < 2; i++)
nv50_crtc_create(dev, i);
@@ -557,6 +330,9 @@ int nv50_display_create(struct drm_device *dev)
}
}
+ INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+ nouveau_irq_register(dev, 26, nv50_display_isr);
+
ret = nv50_display_init(dev);
if (ret) {
nv50_display_destroy(dev);
@@ -569,14 +345,12 @@ int nv50_display_create(struct drm_device *dev)
void
nv50_display_destroy(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
NV_DEBUG_KMS(dev, "\n");
drm_mode_config_cleanup(dev);
nv50_display_disable(dev);
- nv50_evo_channel_del(&dev_priv->evo);
+ nouveau_irq_unregister(dev, 26);
}
static u16
@@ -660,32 +434,32 @@ static void
nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- struct list_head *entry, *tmp;
+ struct nouveau_channel *chan, *tmp;
- list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
- chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
+ list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
+ nvsw.vbl_wait) {
+ if (chan->nvsw.vblsem_head != crtc)
+ continue;
nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
chan->nvsw.vblsem_rval);
list_del(&chan->nvsw.vbl_wait);
+ drm_vblank_put(dev, crtc);
}
+
+ drm_handle_vblank(dev, crtc);
}
static void
nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
{
- intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-
if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
nv50_display_vblank_crtc_handler(dev, 0);
if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
nv50_display_vblank_crtc_handler(dev, 1);
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
- NV50_PDISPLAY_INTR_EN) & ~intr);
- nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
}
static void
@@ -1011,108 +785,31 @@ nv50_display_irq_handler_bh(struct work_struct *work)
static void
nv50_display_error_handler(struct drm_device *dev)
{
- uint32_t addr, data;
-
- nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
- addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
- data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
-
- NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
- 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
-
- nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
-}
-
-void
-nv50_display_irq_hotplug_bh(struct work_struct *work)
-{
- struct drm_nouveau_private *dev_priv =
- container_of(work, struct drm_nouveau_private, hpd_work);
- struct drm_device *dev = dev_priv->dev;
- struct drm_connector *connector;
- const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
- uint32_t unplug_mask, plug_mask, change_mask;
- uint32_t hpd0, hpd1;
-
- spin_lock_irq(&dev_priv->hpd_state.lock);
- hpd0 = dev_priv->hpd_state.hpd0_bits;
- dev_priv->hpd_state.hpd0_bits = 0;
- hpd1 = dev_priv->hpd_state.hpd1_bits;
- dev_priv->hpd_state.hpd1_bits = 0;
- spin_unlock_irq(&dev_priv->hpd_state.lock);
-
- hpd0 &= nv_rd32(dev, 0xe050);
- if (dev_priv->chipset >= 0x90)
- hpd1 &= nv_rd32(dev, 0xe070);
-
- plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
- unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
- change_mask = plug_mask | unplug_mask;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_encoder_helper_funcs *helper;
- struct nouveau_connector *nv_connector =
- nouveau_connector(connector);
- struct nouveau_encoder *nv_encoder;
- struct dcb_gpio_entry *gpio;
- uint32_t reg;
- bool plugged;
-
- if (!nv_connector->dcb)
- continue;
+ u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
+ u32 addr, data;
+ int chid;
- gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
- if (!gpio || !(change_mask & (1 << gpio->line)))
+ for (chid = 0; chid < 5; chid++) {
+ if (!(channels & (1 << chid)))
continue;
- reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
- plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
- NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
- drm_get_connector_name(connector)) ;
-
- if (!connector->encoder || !connector->encoder->crtc ||
- !connector->encoder->crtc->enabled)
- continue;
- nv_encoder = nouveau_encoder(connector->encoder);
- helper = connector->encoder->helper_private;
-
- if (nv_encoder->dcb->type != OUTPUT_DP)
- continue;
+ nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
+ addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid));
+ data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid));
+ NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x "
+ "(0x%04x 0x%02x)\n", chid,
+ addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
- if (plugged)
- helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
- else
- helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+ nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
}
-
- drm_helper_hpd_irq_event(dev);
}
-void
-nv50_display_irq_handler(struct drm_device *dev)
+static void
+nv50_display_isr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t delayed = 0;
- if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
- uint32_t hpd0_bits, hpd1_bits = 0;
-
- hpd0_bits = nv_rd32(dev, 0xe054);
- nv_wr32(dev, 0xe054, hpd0_bits);
-
- if (dev_priv->chipset >= 0x90) {
- hpd1_bits = nv_rd32(dev, 0xe074);
- nv_wr32(dev, 0xe074, hpd1_bits);
- }
-
- spin_lock(&dev_priv->hpd_state.lock);
- dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
- dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
- spin_unlock(&dev_priv->hpd_state.lock);
-
- queue_work(dev_priv->wq, &dev_priv->hpd_work);
- }
-
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
@@ -1123,9 +820,9 @@ nv50_display_irq_handler(struct drm_device *dev)
if (!intr0 && !(intr1 & ~delayed))
break;
- if (intr0 & 0x00010000) {
+ if (intr0 & 0x001f0000) {
nv50_display_error_handler(dev);
- intr0 &= ~0x00010000;
+ intr0 &= ~0x001f0000;
}
if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
@@ -1156,4 +853,3 @@ nv50_display_irq_handler(struct drm_device *dev)
}
}
}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c551f0b85ee..f0e30b78ef6 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,9 +35,7 @@
#include "nouveau_crtc.h"
#include "nv50_evo.h"
-void nv50_display_irq_handler(struct drm_device *dev);
void nv50_display_irq_handler_bh(struct work_struct *work);
-void nv50_display_irq_hotplug_bh(struct work_struct *work);
int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
new file mode 100644
index 00000000000..14e24e906ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+
+static void
+nv50_evo_channel_del(struct nouveau_channel **pevo)
+{
+ struct drm_nouveau_private *dev_priv;
+ struct nouveau_channel *evo = *pevo;
+
+ if (!evo)
+ return;
+ *pevo = NULL;
+
+ dev_priv = evo->dev->dev_private;
+ dev_priv->evo_alloc &= ~(1 << evo->id);
+
+ nouveau_gpuobj_channel_takedown(evo);
+ nouveau_bo_unmap(evo->pushbuf_bo);
+ nouveau_bo_ref(NULL, &evo->pushbuf_bo);
+
+ if (evo->user)
+ iounmap(evo->user);
+
+ kfree(evo);
+}
+
+int
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
+ u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
+ u32 flags5)
+{
+ struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
+ struct drm_device *dev = evo->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
+ if (ret)
+ return ret;
+ obj->engine = NVOBJ_ENGINE_DISPLAY;
+
+ nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
+ nv_wo32(obj, 4, limit);
+ nv_wo32(obj, 8, offset);
+ nv_wo32(obj, 12, 0x00000000);
+ nv_wo32(obj, 16, 0x00000000);
+ nv_wo32(obj, 20, flags5);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nouveau_ramht_insert(evo, name, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo;
+ int ret;
+
+ evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
+ if (!evo)
+ return -ENOMEM;
+ *pevo = evo;
+
+ for (evo->id = 0; evo->id < 5; evo->id++) {
+ if (dev_priv->evo_alloc & (1 << evo->id))
+ continue;
+
+ dev_priv->evo_alloc |= (1 << evo->id);
+ break;
+ }
+
+ if (evo->id == 5) {
+ kfree(evo);
+ return -ENODEV;
+ }
+
+ evo->dev = dev;
+ evo->user_get = 4;
+ evo->user_put = 0;
+
+ ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+ false, true, &evo->pushbuf_bo);
+ if (ret == 0)
+ ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
+ nv50_evo_channel_del(pevo);
+ return ret;
+ }
+
+ ret = nouveau_bo_map(evo->pushbuf_bo);
+ if (ret) {
+ NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
+ nv50_evo_channel_del(pevo);
+ return ret;
+ }
+
+ evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
+ if (!evo->user) {
+ NV_ERROR(dev, "Error mapping EVO control regs.\n");
+ nv50_evo_channel_del(pevo);
+ return -ENOMEM;
+ }
+
+ /* bind primary evo channel's ramht to the channel */
+ if (dev_priv->evo && evo != dev_priv->evo)
+ nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
+
+ return 0;
+}
+
+static int
+nv50_evo_channel_init(struct nouveau_channel *evo)
+{
+ struct drm_device *dev = evo->dev;
+ int id = evo->id, ret, i;
+ u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
+ u32 tmp;
+
+ tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+ if ((tmp & 0x009f0000) == 0x00020000)
+ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
+
+ tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+ if ((tmp & 0x003f0000) == 0x00030000)
+ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
+
+ /* initialise fifo */
+ nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
+ NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
+ NV50_PDISPLAY_EVO_DMA_CB_VALID);
+ nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
+ nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
+ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
+ NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+
+ nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
+ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
+ NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+ if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
+ NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
+ nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+ return -EBUSY;
+ }
+
+ /* enable error reporting on the channel */
+ nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
+
+ evo->dma.max = (4096/4) - 2;
+ evo->dma.put = 0;
+ evo->dma.cur = evo->dma.put;
+ evo->dma.free = evo->dma.max - evo->dma.cur;
+
+ ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ OUT_RING(evo, 0);
+
+ return 0;
+}
+
+static void
+nv50_evo_channel_fini(struct nouveau_channel *evo)
+{
+ struct drm_device *dev = evo->dev;
+ int id = evo->id;
+
+ nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
+ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
+ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
+ if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
+ NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
+ nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+ }
+}
+
+static int
+nv50_evo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramht = NULL;
+ struct nouveau_channel *evo;
+ int ret;
+
+ /* create primary evo channel, the one we use for modesetting
+ * purporses
+ */
+ ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+ if (ret)
+ return ret;
+ evo = dev_priv->evo;
+
+ /* setup object management on it, any other evo channel will
+ * use this also as there's no per-channel support on the
+ * hardware
+ */
+ ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
+ NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
+ if (ret) {
+ NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
+ if (ret) {
+ NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
+ if (ret) {
+ NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
+ nouveau_gpuobj_ref(NULL, &ramht);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ /* create some default objects for the scanout memtypes we support */
+ if (dev_priv->card_type >= NV_C0) {
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
+ 0, 0xffffffff, 0x00000000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00020000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00000000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+ } else
+ if (dev_priv->chipset != 0x50) {
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
+ 0, 0xffffffff, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
+ 0, 0xffffffff, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+
+ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
+ 0, dev_priv->vram_size, 0x00010000);
+ if (ret) {
+ nv50_evo_channel_del(&dev_priv->evo);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int
+nv50_evo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!dev_priv->evo) {
+ ret = nv50_evo_create(dev);
+ if (ret)
+ return ret;
+ }
+
+ return nv50_evo_channel_init(dev_priv->evo);
+}
+
+void
+nv50_evo_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->evo) {
+ nv50_evo_channel_fini(dev_priv->evo);
+ nv50_evo_channel_del(&dev_priv->evo);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aae13343bce..aa4f0d3cea8 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -24,6 +24,15 @@
*
*/
+#ifndef __NV50_EVO_H__
+#define __NV50_EVO_H__
+
+int nv50_evo_init(struct drm_device *dev);
+void nv50_evo_fini(struct drm_device *dev);
+int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
+ u32 tile_flags, u32 magic_flags,
+ u32 offset, u32 limit);
+
#define NV50_EVO_UPDATE 0x00000080
#define NV50_EVO_UNK84 0x00000084
#define NV50_EVO_UNK84_NOTIFY 0x40000000
@@ -111,3 +120,4 @@
#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index cd1988b15d2..50290dea0ac 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -3,30 +3,75 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+struct nv50_fb_priv {
+ struct page *r100c08_page;
+ dma_addr_t r100c08;
+};
+
+static int
+nv50_fb_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fb_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!priv->r100c08_page) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+
+ priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
+ __free_page(priv->r100c08_page);
+ kfree(priv);
+ return -EFAULT;
+ }
+
+ dev_priv->engine.fb.priv = priv;
+ return 0;
+}
+
int
nv50_fb_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fb_priv *priv;
+ int ret;
+
+ if (!dev_priv->engine.fb.priv) {
+ ret = nv50_fb_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = dev_priv->engine.fb.priv;
/* Not a clue what this is exactly. Without pointing it at a
* scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
* cause IOMMU "read from address 0" errors (rh#561267)
*/
- nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8);
+ nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
/* This is needed to get meaningful information from 100c90
* on traps. No idea what these values mean exactly. */
switch (dev_priv->chipset) {
case 0x50:
- nv_wr32(dev, 0x100c90, 0x0707ff);
+ nv_wr32(dev, 0x100c90, 0x000707ff);
break;
case 0xa3:
case 0xa5:
case 0xa8:
- nv_wr32(dev, 0x100c90, 0x0d0fff);
+ nv_wr32(dev, 0x100c90, 0x000d0fff);
+ break;
+ case 0xaf:
+ nv_wr32(dev, 0x100c90, 0x089d1fff);
break;
default:
- nv_wr32(dev, 0x100c90, 0x1d07ff);
+ nv_wr32(dev, 0x100c90, 0x001d07ff);
break;
}
@@ -36,12 +81,25 @@ nv50_fb_init(struct drm_device *dev)
void
nv50_fb_takedown(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fb_priv *priv;
+
+ priv = dev_priv->engine.fb.priv;
+ if (!priv)
+ return;
+ dev_priv->engine.fb.priv = NULL;
+
+ pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ kfree(priv);
}
void
nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
u32 trap[6], idx, chinst;
int i, ch;
@@ -60,8 +118,10 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
return;
chinst = (trap[2] << 16) | trap[1];
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
- struct nouveau_channel *chan = dev_priv->fifos[ch];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
if (!chan || !chan->ramin)
continue;
@@ -69,6 +129,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
if (chinst == chan->ramin->vinst >> 12)
break;
}
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
"channel %d (0x%08x)\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6dcf048eddb..791ded1c5c6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -1,29 +1,46 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_ramht.h"
#include "nouveau_fbcon.h"
+#include "nouveau_mm.h"
-void
+int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
- RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_fillrect(info, rect);
- return;
- }
+ ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+ if (ret)
+ return ret;
if (rect->rop != ROP_COPY) {
BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
@@ -45,27 +62,21 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
OUT_RING(chan, 3);
}
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_copyarea(info, region);
- return;
- }
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
BEGIN_RING(chan, NvSub2D, 0x0110, 1);
OUT_RING(chan, 0);
@@ -80,9 +91,10 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
OUT_RING(chan, 0);
OUT_RING(chan, region->sy);
FIRE_RING(chan);
+ return 0;
}
-void
+int
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
@@ -92,23 +104,14 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
uint32_t width, dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
+ int ret;
- if (info->state != FBINFO_STATE_RUNNING)
- return;
-
- if (image->depth != 1) {
- cfb_imageblit(info, image);
- return;
- }
+ if (image->depth != 1)
+ return -ENODEV;
- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
- nouveau_fbcon_gpu_lockup(info);
- }
-
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
@@ -134,11 +137,9 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
- if (RING_SPACE(chan, push + 1)) {
- nouveau_fbcon_gpu_lockup(info);
- cfb_imageblit(info, image);
- return;
- }
+ ret = RING_SPACE(chan, push + 1);
+ if (ret)
+ return ret;
dwords -= push;
@@ -148,6 +149,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
FIRE_RING(chan);
+ return 0;
}
int
@@ -157,12 +159,9 @@ nv50_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- struct nouveau_gpuobj *eng2d = NULL;
- uint64_t fb;
+ struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
int ret, format;
- fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
-
switch (info->var.bits_per_pixel) {
case 8:
format = 0xf3;
@@ -190,12 +189,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
- nouveau_gpuobj_ref(NULL, &eng2d);
+ ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d);
if (ret)
return ret;
@@ -253,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb));
- OUT_RING(chan, lower_32_bits(fb));
+ OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
BEGIN_RING(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -262,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb));
- OUT_RING(chan, lower_32_bits(fb));
+ OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 1da65bd60c1..8dd04c5dac6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -28,6 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
+#include "nouveau_vm.h"
static void
nv50_fifo_playlist_update(struct drm_device *dev)
@@ -44,7 +45,8 @@ nv50_fifo_playlist_update(struct drm_device *dev)
/* We never schedule channel 0 or 127 */
for (i = 1, nr = 0; i < 127; i++) {
- if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
+ if (dev_priv->channels.ptr[i] &&
+ dev_priv->channels.ptr[i]->ramfc) {
nv_wo32(cur, (nr * 4), i);
nr++;
}
@@ -60,7 +62,7 @@ static void
nv50_fifo_channel_enable(struct drm_device *dev, int channel)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->fifos[channel];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
uint32_t inst;
NV_DEBUG(dev, "ch%d\n", channel);
@@ -105,6 +107,7 @@ nv50_fifo_init_intr(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
}
@@ -118,7 +121,7 @@ nv50_fifo_init_context_table(struct drm_device *dev)
NV_DEBUG(dev, "\n");
for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
- if (dev_priv->fifos[i])
+ if (dev_priv->channels.ptr[i])
nv50_fifo_channel_enable(dev, i);
else
nv50_fifo_channel_disable(dev, i);
@@ -206,6 +209,9 @@ nv50_fifo_takedown(struct drm_device *dev)
if (!pfifo->playlist[0])
return;
+ nv_wr32(dev, 0x2140, 0x00000000);
+ nouveau_irq_unregister(dev, 8);
+
nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
}
@@ -256,6 +262,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
}
ramfc = chan->ramfc;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_USER(chan->id), PAGE_SIZE);
+ if (!chan->user)
+ return -ENOMEM;
+
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
@@ -291,10 +302,23 @@ void
nv50_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_gpuobj *ramfc = NULL;
+ unsigned long flags;
NV_DEBUG(dev, "ch%d\n", chan->id);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pfifo->reassign(dev, false);
+
+ /* Unload the context if it's the currently active one */
+ if (pfifo->channel_id(dev) == chan->id) {
+ pfifo->disable(dev);
+ pfifo->unload_context(dev);
+ pfifo->enable(dev);
+ }
+
/* This will ensure the channel is seen as disabled. */
nouveau_gpuobj_ref(chan->ramfc, &ramfc);
nouveau_gpuobj_ref(NULL, &chan->ramfc);
@@ -305,6 +329,14 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
nv50_fifo_channel_disable(dev, 127);
nv50_fifo_playlist_update(dev);
+ pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the channel resources */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
nouveau_gpuobj_ref(NULL, &ramfc);
nouveau_gpuobj_ref(NULL, &chan->cache);
}
@@ -392,7 +424,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
return 0;
- chan = dev_priv->fifos[chid];
+ chan = dev_priv->channels.ptr[chid];
if (!chan) {
NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
return -EINVAL;
@@ -467,5 +499,5 @@ nv50_fifo_unload_context(struct drm_device *dev)
void
nv50_fifo_tlb_flush(struct drm_device *dev)
{
- nv50_vm_flush(dev, 5);
+ nv50_vm_flush_engine(dev, 5);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index b2fab2bf3d6..6b149c0cc06 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -26,6 +26,28 @@
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nv50_display.h"
+
+static void nv50_gpio_isr(struct drm_device *dev);
+static void nv50_gpio_isr_bh(struct work_struct *work);
+
+struct nv50_gpio_priv {
+ struct list_head handlers;
+ spinlock_t lock;
+};
+
+struct nv50_gpio_handler {
+ struct drm_device *dev;
+ struct list_head head;
+ struct work_struct work;
+ bool inhibit;
+
+ struct dcb_gpio_entry *gpio;
+
+ void (*handler)(void *data, int state);
+ void *data;
+};
+
static int
nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
{
@@ -75,29 +97,123 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
return 0;
}
+int
+nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
+ void (*handler)(void *, int), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh;
+ struct dcb_gpio_entry *gpio;
+ unsigned long flags;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return -ENOENT;
+
+ gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
+ if (!gpioh)
+ return -ENOMEM;
+
+ INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
+ gpioh->dev = dev;
+ gpioh->gpio = gpio;
+ gpioh->handler = handler;
+ gpioh->data = data;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_add(&gpioh->head, &priv->handlers);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 0;
+}
+
void
-nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
+nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
+ void (*handler)(void *, int), void *data)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh, *tmp;
struct dcb_gpio_entry *gpio;
- u32 reg, mask;
+ unsigned long flags;
gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio) {
- NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag);
+ if (!gpio)
return;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
+ if (gpioh->gpio != gpio ||
+ gpioh->handler != handler ||
+ gpioh->data != data)
+ continue;
+ list_del(&gpioh->head);
+ kfree(gpioh);
}
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+bool
+nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
+{
+ struct dcb_gpio_entry *gpio;
+ u32 reg, mask;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio)
+ return false;
reg = gpio->line < 16 ? 0xe050 : 0xe070;
mask = 0x00010001 << (gpio->line & 0xf);
nv_wr32(dev, reg + 4, mask);
- nv_mask(dev, reg + 0, mask, on ? mask : 0);
+ reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
+ return (reg & mask) == mask;
+}
+
+static int
+nv50_gpio_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&priv->handlers);
+ spin_lock_init(&priv->lock);
+ pgpio->priv = priv;
+ return 0;
+}
+
+static void
+nv50_gpio_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ kfree(pgpio->priv);
+ pgpio->priv = NULL;
}
int
nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv;
+ int ret;
+
+ if (!pgpio->priv) {
+ ret = nv50_gpio_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = pgpio->priv;
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@@ -107,5 +223,77 @@ nv50_gpio_init(struct drm_device *dev)
nv_wr32(dev, 0xe074, 0xffffffff);
}
+ nouveau_irq_register(dev, 21, nv50_gpio_isr);
return 0;
}
+
+void
+nv50_gpio_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nv_wr32(dev, 0xe050, 0x00000000);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe070, 0x00000000);
+ nouveau_irq_unregister(dev, 21);
+
+ nv50_gpio_destroy(dev);
+}
+
+static void
+nv50_gpio_isr_bh(struct work_struct *work)
+{
+ struct nv50_gpio_handler *gpioh =
+ container_of(work, struct nv50_gpio_handler, work);
+ struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ unsigned long flags;
+ int state;
+
+ state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
+ if (state < 0)
+ return;
+
+ gpioh->handler(gpioh->data, state);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ gpioh->inhibit = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nv50_gpio_isr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nv50_gpio_priv *priv = pgpio->priv;
+ struct nv50_gpio_handler *gpioh;
+ u32 intr0, intr1 = 0;
+ u32 hi, lo, ch;
+
+ intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ if (dev_priv->chipset >= 0x90)
+ intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+
+ hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+ lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+ ch = hi | lo;
+
+ nv_wr32(dev, 0xe054, intr0);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe074, intr1);
+
+ spin_lock(&priv->lock);
+ list_for_each_entry(gpioh, &priv->handlers, head) {
+ if (!(ch & (1 << gpioh->gpio->line)))
+ continue;
+
+ if (gpioh->inhibit)
+ continue;
+ gpioh->inhibit = true;
+
+ queue_work(dev_priv->wq, &gpioh->work);
+ }
+ spin_unlock(&priv->lock);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8b669d0af61..2d7ea75a09d 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -29,6 +29,12 @@
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_grctx.h"
+#include "nouveau_dma.h"
+#include "nouveau_vm.h"
+#include "nv50_evo.h"
+
+static int nv50_graph_register(struct drm_device *);
+static void nv50_graph_isr(struct drm_device *);
static void
nv50_graph_init_reset(struct drm_device *dev)
@@ -46,6 +52,7 @@ nv50_graph_init_intr(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
+ nouveau_irq_register(dev, 12, nv50_graph_isr);
nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
nv_wr32(dev, 0x400138, 0xffffffff);
nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -145,12 +152,15 @@ nv50_graph_init(struct drm_device *dev)
nv50_graph_init_reset(dev);
nv50_graph_init_regs__nv(dev);
nv50_graph_init_regs(dev);
- nv50_graph_init_intr(dev);
ret = nv50_graph_init_ctxctl(dev);
if (ret)
return ret;
+ ret = nv50_graph_register(dev);
+ if (ret)
+ return ret;
+ nv50_graph_init_intr(dev);
return 0;
}
@@ -158,6 +168,8 @@ void
nv50_graph_takedown(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
+ nv_wr32(dev, 0x40013c, 0x00000000);
+ nouveau_irq_unregister(dev, 12);
}
void
@@ -190,7 +202,7 @@ nv50_graph_channel(struct drm_device *dev)
inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->fifos[i];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && chan->ramin && chan->ramin->vinst == inst)
return chan;
@@ -211,7 +223,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000,
+ ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
if (ret)
@@ -234,6 +246,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
dev_priv->engine.instmem.flush(dev);
+ atomic_inc(&chan->vm->pgraph_refs);
return 0;
}
@@ -242,18 +255,31 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
+ unsigned long flags;
NV_DEBUG(dev, "ch%d\n", chan->id);
if (!chan->ramin)
return;
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ pgraph->fifo_access(dev, false);
+
+ if (pgraph->channel(dev) == chan)
+ pgraph->unload_context(dev);
+
for (i = hdr; i < hdr + 24; i += 4)
nv_wo32(chan->ramin, i, 0);
dev_priv->engine.instmem.flush(dev);
+ pgraph->fifo_access(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+
+ atomic_dec(&chan->vm->pgraph_refs);
}
static int
@@ -306,7 +332,7 @@ nv50_graph_unload_context(struct drm_device *dev)
return 0;
}
-void
+static void
nv50_graph_context_switch(struct drm_device *dev)
{
uint32_t inst;
@@ -322,8 +348,8 @@ nv50_graph_context_switch(struct drm_device *dev)
}
static int
-nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct nouveau_gpuobj *gpuobj;
@@ -340,8 +366,8 @@ nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
}
static int
-nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
return -ERANGE;
@@ -351,16 +377,16 @@ nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
}
static int
-nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
chan->nvsw.vblsem_rval = data;
return 0;
}
static int
-nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,45 +394,85 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
return -EINVAL;
- if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
- NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
- nv_wr32(dev, NV50_PDISPLAY_INTR_1,
- NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
- NV50_PDISPLAY_INTR_EN) |
- NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
- }
+ drm_vblank_get(dev, data);
+ chan->nvsw.vblsem_head = data;
list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
+
return 0;
}
-static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
- { 0x018c, nv50_graph_nvsw_dma_vblsem },
- { 0x0400, nv50_graph_nvsw_vblsem_offset },
- { 0x0404, nv50_graph_nvsw_vblsem_release_val },
- { 0x0408, nv50_graph_nvsw_vblsem_release },
- {}
-};
+static int
+nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ struct nouveau_page_flip_state s;
-struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
- { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
- { 0x0030, false, NULL }, /* null */
- { 0x5039, false, NULL }, /* m2mf */
- { 0x502d, false, NULL }, /* 2d */
- { 0x50c0, false, NULL }, /* compute */
- { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */
- { 0x5097, false, NULL }, /* tesla (nv50) */
- { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */
- { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */
- { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
- {}
-};
+ if (!nouveau_finish_page_flip(chan, &s)) {
+ /* XXX - Do something here */
+ }
+
+ return 0;
+}
+
+static int
+nv50_graph_register(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->engine.graph.registered)
+ return 0;
+
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
+ NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
+ NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
+ NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
+
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
+
+ /* tesla */
+ if (dev_priv->chipset == 0x50)
+ NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
+ else
+ if (dev_priv->chipset < 0xa0)
+ NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
+ else {
+ switch (dev_priv->chipset) {
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ NVOBJ_CLASS(dev, 0x8397, GR);
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ NVOBJ_CLASS(dev, 0x8597, GR);
+ break;
+ case 0xaf:
+ NVOBJ_CLASS(dev, 0x8697, GR);
+ break;
+ }
+ }
+
+ /* compute */
+ NVOBJ_CLASS(dev, 0x50c0, GR);
+ if (dev_priv->chipset > 0xa0 &&
+ dev_priv->chipset != 0xaa &&
+ dev_priv->chipset != 0xac)
+ NVOBJ_CLASS(dev, 0x85c0, GR);
+
+ dev_priv->engine.graph.registered = true;
+ return 0;
+}
void
nv50_graph_tlb_flush(struct drm_device *dev)
{
- nv50_vm_flush(dev, 0);
+ nv50_vm_flush_engine(dev, 0);
}
void
@@ -449,8 +515,535 @@ nv86_graph_tlb_flush(struct drm_device *dev)
nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
}
- nv50_vm_flush(dev, 0);
+ nv50_vm_flush_engine(dev, 0);
nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
}
+
+static struct nouveau_enum nv50_mp_exec_error_names[] =
+{
+ { 3, "STACK_UNDERFLOW" },
+ { 4, "QUADON_ACTIVE" },
+ { 8, "TIMEOUT" },
+ { 0x10, "INVALID_OPCODE" },
+ { 0x40, "BREAKPOINT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
+ { 0x00000001, "NOTIFY" },
+ { 0x00000002, "IN" },
+ { 0x00000004, "OUT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+struct nouveau_enum nv50_data_error_names[] = {
+ { 0x00000003, "INVALID_QUERY_OR_TEXTURE" },
+ { 0x00000004, "INVALID_VALUE" },
+ { 0x00000005, "INVALID_ENUM" },
+ { 0x00000008, "INVALID_OBJECT" },
+ { 0x00000009, "READ_ONLY_OBJECT" },
+ { 0x0000000a, "SUPERVISOR_OBJECT" },
+ { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT" },
+ { 0x0000000c, "INVALID_BITFIELD" },
+ { 0x0000000d, "BEGIN_END_ACTIVE" },
+ { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT" },
+ { 0x0000000f, "VIEWPORT_ID_NEEDS_GP" },
+ { 0x00000010, "RT_DOUBLE_BIND" },
+ { 0x00000011, "RT_TYPES_MISMATCH" },
+ { 0x00000012, "RT_LINEAR_WITH_ZETA" },
+ { 0x00000015, "FP_TOO_FEW_REGS" },
+ { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH" },
+ { 0x00000017, "RT_LINEAR_WITH_MSAA" },
+ { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT" },
+ { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT" },
+ { 0x0000001a, "RT_INVALID_ALIGNMENT" },
+ { 0x0000001b, "SAMPLER_OVER_LIMIT" },
+ { 0x0000001c, "TEXTURE_OVER_LIMIT" },
+ { 0x0000001e, "GP_TOO_MANY_OUTPUTS" },
+ { 0x0000001f, "RT_BPP128_WITH_MS8" },
+ { 0x00000021, "Z_OUT_OF_BOUNDS" },
+ { 0x00000023, "XY_OUT_OF_BOUNDS" },
+ { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED" },
+ { 0x00000028, "CP_NO_REG_SPACE_STRIPED" },
+ { 0x00000029, "CP_NO_REG_SPACE_PACKED" },
+ { 0x0000002a, "CP_NOT_ENOUGH_WARPS" },
+ { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH" },
+ { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS" },
+ { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS" },
+ { 0x0000002e, "CP_NO_BLOCKDIM_LATCH" },
+ { 0x00000031, "ENG2D_FORMAT_MISMATCH" },
+ { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP" },
+ { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT" },
+ { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT" },
+ { 0x00000046, "LAYER_ID_NEEDS_GP" },
+ { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT" },
+ { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_intr[] = {
+ { 0x00000001, "NOTIFY" },
+ { 0x00000002, "COMPUTE_QUERY" },
+ { 0x00000010, "ILLEGAL_MTHD" },
+ { 0x00000020, "ILLEGAL_CLASS" },
+ { 0x00000040, "DOUBLE_NOTIFY" },
+ { 0x00001000, "CONTEXT_SWITCH" },
+ { 0x00010000, "BUFFER_NOTIFY" },
+ { 0x00100000, "DATA_ERROR" },
+ { 0x00200000, "TRAP" },
+ { 0x01000000, "SINGLE_STEP" },
+ {}
+};
+
+static void
+nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ uint32_t addr, mp10, status, pc, oplow, ophigh;
+ int i;
+ int mps = 0;
+ for (i = 0; i < 4; i++) {
+ if (!(units & 1 << (i+24)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ addr = 0x408200 + (tpid << 12) + (i << 7);
+ else
+ addr = 0x408100 + (tpid << 11) + (i << 7);
+ mp10 = nv_rd32(dev, addr + 0x10);
+ status = nv_rd32(dev, addr + 0x14);
+ if (!status)
+ continue;
+ if (display) {
+ nv_rd32(dev, addr + 0x20);
+ pc = nv_rd32(dev, addr + 0x24);
+ oplow = nv_rd32(dev, addr + 0x70);
+ ophigh= nv_rd32(dev, addr + 0x74);
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+ "TP %d MP %d: ", tpid, i);
+ nouveau_enum_print(nv50_mp_exec_error_names, status);
+ printk(" at %06x warp %d, opcode %08x %08x\n",
+ pc&0xffffff, pc >> 24,
+ oplow, ophigh);
+ }
+ nv_wr32(dev, addr + 0x10, mp10);
+ nv_wr32(dev, addr + 0x14, 0);
+ mps++;
+ }
+ if (!mps && display)
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+ "No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+ uint32_t ustatus_new, int display, const char *name)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int tps = 0;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ int i, r;
+ uint32_t ustatus_addr, ustatus;
+ for (i = 0; i < 16; i++) {
+ if (!(units & (1 << i)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ ustatus_addr = ustatus_old + (i << 12);
+ else
+ ustatus_addr = ustatus_new + (i << 11);
+ ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+ if (!ustatus)
+ continue;
+ tps++;
+ switch (type) {
+ case 6: /* texture error... unknown for now */
+ nv50_fb_vm_trap(dev, display, name);
+ if (display) {
+ NV_ERROR(dev, "magic set %d:\n", i);
+ for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ }
+ break;
+ case 7: /* MP error */
+ if (ustatus & 0x00010000) {
+ nv50_pgraph_mp_trap(dev, i, display);
+ ustatus &= ~0x00010000;
+ }
+ break;
+ case 8: /* TPDMA error */
+ {
+ uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
+ uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
+ uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
+ uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
+ uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
+ uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
+ uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+ nv50_fb_vm_trap(dev, display, name);
+ /* 2d engine destination */
+ if (ustatus & 0x00000010) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000010;
+ }
+ /* Render target */
+ if (ustatus & 0x00000040) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000040;
+ }
+ /* CUDA memory: l[], g[] or stack. */
+ if (ustatus & 0x00000080) {
+ if (display) {
+ if (e18 & 0x80000000) {
+ /* g[] read fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 24) & 0x1f));
+ e18 &= ~0x1f000000;
+ } else if (e18 & 0xc) {
+ /* g[] write fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 7) & 0x1f));
+ e18 &= ~0x00000f80;
+ } else {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+ i, e14, e10);
+ }
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000080;
+ }
+ }
+ break;
+ }
+ if (ustatus) {
+ if (display)
+ NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ }
+ nv_wr32(dev, ustatus_addr, 0xc0000000);
+ }
+
+ if (!tps && display)
+ NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+}
+
+static int
+nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
+{
+ u32 status = nv_rd32(dev, 0x400108);
+ u32 ustatus;
+
+ if (!status && display) {
+ NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
+ return 1;
+ }
+
+ /* DISPATCH: Relays commands to other units and handles NOTIFY,
+ * COND, QUERY. If you get a trap from it, the command is still stuck
+ * in DISPATCH and you need to do something about it. */
+ if (status & 0x001) {
+ ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+ }
+
+ nv_wr32(dev, 0x400500, 0x00000000);
+
+ /* Known to be triggered by screwed up NOTIFY and COND... */
+ if (ustatus & 0x00000001) {
+ u32 addr = nv_rd32(dev, 0x400808);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 datal = nv_rd32(dev, 0x40080c);
+ u32 datah = nv_rd32(dev, 0x400810);
+ u32 class = nv_rd32(dev, 0x400814);
+ u32 r848 = nv_rd32(dev, 0x400848);
+
+ NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
+ if (display && (addr & 0x80000000)) {
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x%08x "
+ "400808 0x%08x 400848 0x%08x\n",
+ chid, inst, subc, class, mthd, datah,
+ datal, addr, r848);
+ } else
+ if (display) {
+ NV_INFO(dev, "PGRAPH - no stuck command?\n");
+ }
+
+ nv_wr32(dev, 0x400808, 0);
+ nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
+ nv_wr32(dev, 0x400848, 0);
+ ustatus &= ~0x00000001;
+ }
+
+ if (ustatus & 0x00000002) {
+ u32 addr = nv_rd32(dev, 0x40084c);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, 0x40085c);
+ u32 class = nv_rd32(dev, 0x400814);
+
+ NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
+ if (display && (addr & 0x80000000)) {
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x 40084c 0x%08x\n",
+ chid, inst, subc, class, mthd,
+ data, addr);
+ } else
+ if (display) {
+ NV_INFO(dev, "PGRAPH - no stuck command?\n");
+ }
+
+ nv_wr32(dev, 0x40084c, 0);
+ ustatus &= ~0x00000002;
+ }
+
+ if (ustatus && display) {
+ NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
+ "0x%08x)\n", ustatus);
+ }
+
+ nv_wr32(dev, 0x400804, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x001);
+ status &= ~0x001;
+ if (!status)
+ return 0;
+ }
+
+ /* M2MF: Memory to memory copy engine. */
+ if (status & 0x002) {
+ u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_M2MF");
+ nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
+ nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
+
+ }
+
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 2);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x406800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x002);
+ status &= ~0x002;
+ }
+
+ /* VFETCH: Fetches data from vertex buffers. */
+ if (status & 0x004) {
+ u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
+ nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
+ nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
+ }
+
+ nv_wr32(dev, 0x400c04, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x004);
+ status &= ~0x004;
+ }
+
+ /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+ if (status & 0x008) {
+ ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
+ nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
+ nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
+
+ }
+
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 0x80);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x401800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x008);
+ status &= ~0x008;
+ }
+
+ /* CCACHE: Handles code and c[] caches and fills them. */
+ if (status & 0x010) {
+ ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
+ nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
+ " %08x %08x %08x\n",
+ nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
+ nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
+ nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
+ nv_rd32(dev, 0x40581c));
+
+ }
+
+ nv_wr32(dev, 0x405018, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x010);
+ status &= ~0x010;
+ }
+
+ /* Unknown, not seen yet... 0x402000 is the only trap status reg
+ * remaining, so try to handle it anyway. Perhaps related to that
+ * unknown DMA slot on tesla? */
+ if (status & 0x20) {
+ ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+ if (display)
+ NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x402000, 0xc0000000);
+ /* no status modifiction on purpose */
+ }
+
+ /* TEXTURE: CUDA texturing units */
+ if (status & 0x040) {
+ nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
+ "PGRAPH - TRAP_TEXTURE");
+ nv_wr32(dev, 0x400108, 0x040);
+ status &= ~0x040;
+ }
+
+ /* MP: CUDA execution engines. */
+ if (status & 0x080) {
+ nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
+ "PGRAPH - TRAP_MP");
+ nv_wr32(dev, 0x400108, 0x080);
+ status &= ~0x080;
+ }
+
+ /* TPDMA: Handles TP-initiated uncached memory accesses:
+ * l[], g[], stack, 2d surfaces, render targets. */
+ if (status & 0x100) {
+ nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
+ "PGRAPH - TRAP_TPDMA");
+ nv_wr32(dev, 0x400108, 0x100);
+ status &= ~0x100;
+ }
+
+ if (status) {
+ if (display)
+ NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
+ nv_wr32(dev, 0x400108, status);
+ }
+
+ return 1;
+}
+
+static int
+nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nv50_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, 0x400100))) {
+ u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
+ u32 chid = nv50_graph_isr_chid(dev, inst);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400814);
+ u32 show = stat;
+
+ if (stat & 0x00000010) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
+ mthd, data))
+ show &= ~0x00000010;
+ }
+
+ if (stat & 0x00001000) {
+ nv_wr32(dev, 0x400500, 0x00000000);
+ nv_wr32(dev, 0x400100, 0x00001000);
+ nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
+ nv50_graph_context_switch(dev);
+ stat &= ~0x00001000;
+ show &= ~0x00001000;
+ }
+
+ show = (show && nouveau_ratelimit()) ? show : 0;
+
+ if (show & 0x00100000) {
+ u32 ecode = nv_rd32(dev, 0x400110);
+ NV_INFO(dev, "PGRAPH - DATA_ERROR ");
+ nouveau_enum_print(nv50_data_error_names, ecode);
+ printk("\n");
+ }
+
+ if (stat & 0x00200000) {
+ if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
+ show &= ~0x00200000;
+ }
+
+ nv_wr32(dev, 0x400100, stat);
+ nv_wr32(dev, 0x400500, 0x00010001);
+
+ if (show) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv50_graph_intr, show);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ }
+
+ if (nv_rd32(dev, 0x400824) & (1 << 31))
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index b773229b764..2e1b1cd19a4 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -27,14 +27,20 @@
#include "drmP.h"
#include "drm.h"
+
#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+#define BAR1_VM_BASE 0x0020000000ULL
+#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
+#define BAR3_VM_BASE 0x0000000000ULL
+#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
struct nv50_instmem_priv {
uint32_t save1700[5]; /* 0x1700->0x1710 */
- struct nouveau_gpuobj *pramin_pt;
- struct nouveau_gpuobj *pramin_bar;
- struct nouveau_gpuobj *fb_bar;
+ struct nouveau_gpuobj *bar1_dmaobj;
+ struct nouveau_gpuobj *bar3_dmaobj;
};
static void
@@ -48,6 +54,7 @@ nv50_channel_del(struct nouveau_channel **pchan)
return;
nouveau_gpuobj_ref(NULL, &chan->ramfc);
+ nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
if (chan->ramin_heap.free_stack.next)
drm_mm_takedown(&chan->ramin_heap);
@@ -56,14 +63,14 @@ nv50_channel_del(struct nouveau_channel **pchan)
}
static int
-nv50_channel_new(struct drm_device *dev, u32 size,
+nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
struct nouveau_channel **pchan)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
struct nouveau_channel *chan;
- int ret;
+ int ret, i;
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
@@ -92,6 +99,17 @@ nv50_channel_new(struct drm_device *dev, u32 size,
return ret;
}
+ for (i = 0; i < 0x4000; i += 8) {
+ nv_wo32(chan->vm_pd, i + 0, 0x00000000);
+ nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
+ }
+
+ ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
chan->ramin->pinst + fc,
chan->ramin->vinst + fc, 0x100,
@@ -111,6 +129,7 @@ nv50_instmem_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv;
struct nouveau_channel *chan;
+ struct nouveau_vm *vm;
int ret, i;
u32 tmp;
@@ -127,112 +146,87 @@ nv50_instmem_init(struct drm_device *dev)
ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
if (ret) {
NV_ERROR(dev, "Failed to init RAMIN heap\n");
- return -ENOMEM;
+ goto error;
}
- /* we need a channel to plug into the hw to control the BARs */
- ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]);
+ /* BAR3 */
+ ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
+ &dev_priv->bar3_vm);
if (ret)
- return ret;
- chan = dev_priv->fifos[127] = dev_priv->fifos[0];
+ goto error;
- /* allocate page table for PRAMIN BAR */
- ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
- &priv->pramin_pt);
+ ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
+ 0x1000, NVOBJ_FLAG_DONT_MAP |
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->bar3_vm->pgt[0].obj[0]);
if (ret)
- return ret;
+ goto error;
+ dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
- nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63);
- nv_wo32(chan->vm_pd, 0x0004, 0);
+ nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
- /* DMA object for PRAMIN BAR */
- ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
+ ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
if (ret)
- return ret;
- nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000);
- nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1);
- nv_wo32(priv->pramin_bar, 0x08, 0x00000000);
- nv_wo32(priv->pramin_bar, 0x0c, 0x00000000);
- nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
- nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
-
- /* map channel into PRAMIN, gpuobj didn't do it for us */
- ret = nv50_instmem_bind(dev, chan->ramin);
+ goto error;
+ dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
+
+ ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
+ NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+ NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+ &priv->bar3_dmaobj);
if (ret)
- return ret;
+ goto error;
- /* poke regs... */
nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
- nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4));
-
- tmp = nv_ri32(dev, 0);
- nv_wi32(dev, 0, ~tmp);
- if (nv_ri32(dev, 0) != ~tmp) {
- NV_ERROR(dev, "PRAMIN readback failed\n");
- return -EIO;
- }
- nv_wi32(dev, 0, tmp);
+ nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
+ dev_priv->engine.instmem.flush(dev);
dev_priv->ramin_available = true;
- /* Determine VM layout */
- dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
- dev_priv->vm_gart_size = NV50_VM_BLOCK;
-
- dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
- dev_priv->vm_vram_size = dev_priv->vram_size;
- if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
- dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
- dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
- dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
-
- dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
-
- NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
- dev_priv->vm_gart_base,
- dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
- NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
- dev_priv->vm_vram_base,
- dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
-
- /* VRAM page table(s), mapped into VM at +1GiB */
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
- ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
- 0, NVOBJ_FLAG_ZERO_ALLOC,
- &chan->vm_vram_pt[i]);
- if (ret) {
- NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
- dev_priv->vm_vram_pt_nr = i;
- return ret;
- }
- dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
-
- nv_wo32(chan->vm_pd, 0x10 + (i*8),
- chan->vm_vram_pt[i]->vinst | 0x61);
- nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
+ tmp = nv_ro32(chan->ramin, 0);
+ nv_wo32(chan->ramin, 0, ~tmp);
+ if (nv_ro32(chan->ramin, 0) != ~tmp) {
+ NV_ERROR(dev, "PRAMIN readback failed\n");
+ ret = -EIO;
+ goto error;
}
+ nv_wo32(chan->ramin, 0, tmp);
- /* DMA object for FB BAR */
- ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar);
+ /* BAR1 */
+ ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
if (ret)
- return ret;
- nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
- nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
- pci_resource_len(dev->pdev, 1) - 1);
- nv_wo32(priv->fb_bar, 0x08, 0x40000000);
- nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
- nv_wo32(priv->fb_bar, 0x10, 0x00000000);
- nv_wo32(priv->fb_bar, 0x14, 0x00000000);
+ goto error;
- dev_priv->engine.instmem.flush(dev);
+ ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
+ NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+ NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+ &priv->bar1_dmaobj);
+ if (ret)
+ goto error;
- nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4));
+ nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
for (i = 0; i < 8; i++)
nv_wr32(dev, 0x1900 + (i*4), 0);
+ /* Create shared channel VM, space is reserved at the beginning
+ * to catch "NULL pointer" references
+ */
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
+ &dev_priv->chan_vm);
+ if (ret)
+ return ret;
+
return 0;
+
+error:
+ nv50_instmem_takedown(dev);
+ return ret;
}
void
@@ -240,7 +234,7 @@ nv50_instmem_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_channel *chan = dev_priv->fifos[0];
+ struct nouveau_channel *chan = dev_priv->channels.ptr[0];
int i;
NV_DEBUG(dev, "\n");
@@ -250,23 +244,23 @@ nv50_instmem_takedown(struct drm_device *dev)
dev_priv->ramin_available = false;
- /* Restore state from before init */
+ nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
+
for (i = 0x1700; i <= 0x1710; i += 4)
nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
- nouveau_gpuobj_ref(NULL, &priv->fb_bar);
- nouveau_gpuobj_ref(NULL, &priv->pramin_bar);
- nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
+ nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
+ nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
- /* Destroy dummy channel */
- if (chan) {
- for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
- nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
- dev_priv->vm_vram_pt_nr = 0;
+ nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
+ dev_priv->channels.ptr[127] = 0;
+ nv50_channel_del(&dev_priv->channels.ptr[0]);
- nv50_channel_del(&dev_priv->fifos[0]);
- dev_priv->fifos[127] = NULL;
- }
+ nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+ nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
+
+ if (dev_priv->ramin_heap.free_stack.next)
+ drm_mm_takedown(&dev_priv->ramin_heap);
dev_priv->engine.instmem.priv = NULL;
kfree(priv);
@@ -276,16 +270,8 @@ int
nv50_instmem_suspend(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->fifos[0];
- struct nouveau_gpuobj *ramin = chan->ramin;
- int i;
- ramin->im_backing_suspend = vmalloc(ramin->size);
- if (!ramin->im_backing_suspend)
- return -ENOMEM;
-
- for (i = 0; i < ramin->size; i += 4)
- ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
+ dev_priv->ramin_available = false;
return 0;
}
@@ -294,146 +280,121 @@ nv50_instmem_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_channel *chan = dev_priv->fifos[0];
- struct nouveau_gpuobj *ramin = chan->ramin;
+ struct nouveau_channel *chan = dev_priv->channels.ptr[0];
int i;
- dev_priv->ramin_available = false;
- dev_priv->ramin_base = ~0;
- for (i = 0; i < ramin->size; i += 4)
- nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
- dev_priv->ramin_available = true;
- vfree(ramin->im_backing_suspend);
- ramin->im_backing_suspend = NULL;
-
/* Poke the relevant regs, and pray it works :) */
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
nv_wr32(dev, NV50_PUNK_UNK1710, 0);
nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
NV50_PUNK_BAR_CFG_BASE_VALID);
- nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
+ nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
NV50_PUNK_BAR1_CTXDMA_VALID);
- nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
+ nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
NV50_PUNK_BAR3_CTXDMA_VALID);
for (i = 0; i < 8; i++)
nv_wr32(dev, 0x1900 + (i*4), 0);
+
+ dev_priv->ramin_available = true;
}
+struct nv50_gpuobj_node {
+ struct nouveau_vram *vram;
+ struct nouveau_vma chan_vma;
+ u32 align;
+};
+
+
int
-nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
- uint32_t *sz)
+nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
{
+ struct drm_device *dev = gpuobj->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nv50_gpuobj_node *node = NULL;
int ret;
- if (gpuobj->im_backing)
- return -EINVAL;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+ node->align = align;
- *sz = ALIGN(*sz, 4096);
- if (*sz == 0)
- return -EINVAL;
+ size = (size + 4095) & ~4095;
+ align = max(align, (u32)4096);
- ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
- true, false, &gpuobj->im_backing);
+ ret = vram->get(dev, size, align, 0, 0, &node->vram);
if (ret) {
- NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
+ kfree(node);
return ret;
}
- ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- return ret;
+ gpuobj->vinst = node->vram->offset;
+
+ if (gpuobj->flags & NVOBJ_FLAG_VM) {
+ ret = nouveau_vm_get(dev_priv->chan_vm, size, 12,
+ NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
+ &node->chan_vma);
+ if (ret) {
+ vram->put(dev, &node->vram);
+ kfree(node);
+ return ret;
+ }
+
+ nouveau_vm_map(&node->chan_vma, node->vram);
+ gpuobj->vinst = node->chan_vma.offset;
}
- gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
+ gpuobj->size = size;
+ gpuobj->node = node;
return 0;
}
void
-nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
{
+ struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nv50_gpuobj_node *node;
+
+ node = gpuobj->node;
+ gpuobj->node = NULL;
- if (gpuobj && gpuobj->im_backing) {
- if (gpuobj->im_bound)
- dev_priv->engine.instmem.unbind(dev, gpuobj);
- nouveau_bo_unpin(gpuobj->im_backing);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- gpuobj->im_backing = NULL;
+ if (node->chan_vma.node) {
+ nouveau_vm_unmap(&node->chan_vma);
+ nouveau_vm_put(&node->chan_vma);
}
+ vram->put(dev, &node->vram);
+ kfree(node);
}
int
-nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
- uint32_t pte, pte_end;
- uint64_t vram;
-
- if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
- return -EINVAL;
-
- NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
- gpuobj->im_pramin->start, gpuobj->im_pramin->size);
-
- pte = (gpuobj->im_pramin->start >> 12) << 1;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
- vram = gpuobj->vinst;
-
- NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
- gpuobj->im_pramin->start, pte, pte_end);
- NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
-
- vram |= 1;
- if (dev_priv->vram_sys_base) {
- vram += dev_priv->vram_sys_base;
- vram |= 0x30;
- }
-
- while (pte < pte_end) {
- nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
- nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
- vram += 0x1000;
- pte += 2;
- }
- dev_priv->engine.instmem.flush(dev);
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct nv50_gpuobj_node *node = gpuobj->node;
+ int ret;
- nv50_vm_flush(dev, 6);
+ ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
+ NV_MEM_ACCESS_RW, &node->vram->bar_vma);
+ if (ret)
+ return ret;
- gpuobj->im_bound = 1;
+ nouveau_vm_map(&node->vram->bar_vma, node->vram);
+ gpuobj->pinst = node->vram->bar_vma.offset;
return 0;
}
-int
-nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+void
+nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- uint32_t pte, pte_end;
-
- if (gpuobj->im_bound == 0)
- return -EINVAL;
-
- /* can happen during late takedown */
- if (unlikely(!dev_priv->ramin_available))
- return 0;
+ struct nv50_gpuobj_node *node = gpuobj->node;
- pte = (gpuobj->im_pramin->start >> 12) << 1;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
-
- while (pte < pte_end) {
- nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
- nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
- pte += 2;
+ if (node->vram->bar_vma.node) {
+ nouveau_vm_unmap(&node->vram->bar_vma);
+ nouveau_vm_put(&node->vram->bar_vma);
}
- dev_priv->engine.instmem.flush(dev);
-
- gpuobj->im_bound = 0;
- return 0;
}
void
@@ -452,11 +413,3 @@ nv84_instmem_flush(struct drm_device *dev)
NV_ERROR(dev, "PRAMIN flush timeout\n");
}
-void
-nv50_vm_flush(struct drm_device *dev, int engine)
-{
- nv_wr32(dev, 0x100c80, (engine << 16) | 1);
- if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
- NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
new file mode 100644
index 00000000000..38e523e1099
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+void
+nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2])
+{
+ struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
+ u64 phys = 0xdeadcafe00000000ULL;
+ u32 coverage = 0;
+
+ if (pgt[0]) {
+ phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
+ coverage = (pgt[0]->size >> 3) << 12;
+ } else
+ if (pgt[1]) {
+ phys = 0x00000001 | pgt[1]->vinst; /* present */
+ coverage = (pgt[1]->size >> 3) << 16;
+ }
+
+ if (phys & 1) {
+ if (dev_priv->vram_sys_base) {
+ phys += dev_priv->vram_sys_base;
+ phys |= 0x30;
+ }
+
+ if (coverage <= 32 * 1024 * 1024)
+ phys |= 0x60;
+ else if (coverage <= 64 * 1024 * 1024)
+ phys |= 0x40;
+ else if (coverage < 128 * 1024 * 1024)
+ phys |= 0x20;
+ }
+
+ nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+ nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+}
+
+static inline u64
+nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ u64 phys, u32 memtype, u32 target)
+{
+ struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+
+ phys |= 1; /* present */
+ phys |= (u64)memtype << 40;
+
+ /* IGPs don't have real VRAM, re-target to stolen system memory */
+ if (target == 0 && dev_priv->vram_sys_base) {
+ phys += dev_priv->vram_sys_base;
+ target = 3;
+ }
+
+ phys |= target << 4;
+
+ if (vma->access & NV_MEM_ACCESS_SYS)
+ phys |= (1 << 6);
+
+ if (!(vma->access & NV_MEM_ACCESS_WO))
+ phys |= (1 << 3);
+
+ return phys;
+}
+
+void
+nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+{
+ u32 block;
+ int i;
+
+ phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
+ pte <<= 3;
+ cnt <<= 3;
+
+ while (cnt) {
+ u32 offset_h = upper_32_bits(phys);
+ u32 offset_l = lower_32_bits(phys);
+
+ for (i = 7; i >= 0; i--) {
+ block = 1 << (i + 3);
+ if (cnt >= block && !(pte & (block - 1)))
+ break;
+ }
+ offset_l |= (i << 7);
+
+ phys += block << (vma->node->type - 3);
+ cnt -= block;
+
+ while (block) {
+ nv_wo32(pgt, pte + 0, offset_l);
+ nv_wo32(pgt, pte + 4, offset_h);
+ pte += 8;
+ block -= 8;
+ }
+ }
+}
+
+void
+nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ u32 pte, dma_addr_t *list, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ pte += 8;
+ }
+}
+
+void
+nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, 0x00000000);
+ nv_wo32(pgt, pte + 4, 0x00000000);
+ pte += 8;
+ }
+}
+
+void
+nv50_vm_flush(struct nouveau_vm *vm)
+{
+ struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+ pinstmem->flush(vm->dev);
+
+ /* BAR */
+ if (vm != dev_priv->chan_vm) {
+ nv50_vm_flush_engine(vm->dev, 6);
+ return;
+ }
+
+ pfifo->tlb_flush(vm->dev);
+
+ if (atomic_read(&vm->pgraph_refs))
+ pgraph->tlb_flush(vm->dev);
+ if (atomic_read(&vm->pcrypt_refs))
+ pcrypt->tlb_flush(vm->dev);
+}
+
+void
+nv50_vm_flush_engine(struct drm_device *dev, int engine)
+{
+ nv_wr32(dev, 0x100c80, (engine << 16) | 1);
+ if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
new file mode 100644
index 00000000000..58e98ad3634
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static int types[0x80] = {
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
+};
+
+bool
+nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+
+ if (likely(type < ARRAY_SIZE(types) && types[type]))
+ return true;
+ return false;
+}
+
+void
+nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *this;
+ struct nouveau_vram *vram;
+
+ vram = *pvram;
+ *pvram = NULL;
+ if (unlikely(vram == NULL))
+ return;
+
+ mutex_lock(&mm->mutex);
+ while (!list_empty(&vram->regions)) {
+ this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+
+ list_del(&this->rl_entry);
+ nouveau_mm_put(mm, this);
+ }
+ mutex_unlock(&mm->mutex);
+
+ kfree(vram);
+}
+
+int
+nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
+ u32 type, struct nouveau_vram **pvram)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ struct nouveau_vram *vram;
+ int ret;
+
+ if (!types[type])
+ return -EINVAL;
+ size >>= 12;
+ align >>= 12;
+ size_nc >>= 12;
+
+ vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vram->regions);
+ vram->dev = dev_priv->dev;
+ vram->memtype = type;
+ vram->size = size;
+
+ mutex_lock(&mm->mutex);
+ do {
+ ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
+ if (ret) {
+ mutex_unlock(&mm->mutex);
+ nv50_vram_del(dev, &vram);
+ return ret;
+ }
+
+ list_add_tail(&r->rl_entry, &vram->regions);
+ size -= r->length;
+ } while (size);
+ mutex_unlock(&mm->mutex);
+
+ r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+ vram->offset = (u64)r->offset << 12;
+ *pvram = vram;
+ return 0;
+}
+
+static u32
+nv50_vram_rblock(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, parts, colbits, rowbitsa, rowbitsb, banks;
+ u64 rowsize, predicted;
+ u32 r0, r4, rt, ru, rblock_size;
+
+ r0 = nv_rd32(dev, 0x100200);
+ r4 = nv_rd32(dev, 0x100204);
+ rt = nv_rd32(dev, 0x100250);
+ ru = nv_rd32(dev, 0x001540);
+ NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+ for (i = 0, parts = 0; i < 8; i++) {
+ if (ru & (0x00010000 << i))
+ parts++;
+ }
+
+ colbits = (r4 & 0x0000f000) >> 12;
+ rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+ banks = ((r4 & 0x01000000) ? 8 : 4);
+
+ rowsize = parts * banks * (1 << colbits) * 8;
+ predicted = rowsize << rowbitsa;
+ if (r0 & 0x00000004)
+ predicted += rowsize << rowbitsb;
+
+ if (predicted != dev_priv->vram_size) {
+ NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+ (u32)(dev_priv->vram_size >> 20));
+ NV_WARN(dev, "we calculated %dMiB VRAM\n",
+ (u32)(predicted >> 20));
+ }
+
+ rblock_size = rowsize;
+ if (rt & 1)
+ rblock_size *= 3;
+
+ NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
+ return rblock_size;
+}
+
+int
+nv50_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->vram_size = nv_rd32(dev, 0x10020c);
+ dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
+ dev_priv->vram_size &= 0xffffffff00ULL;
+
+ switch (dev_priv->chipset) {
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
+ dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
+ dev_priv->vram_rblock_size = 4096;
+ break;
+ default:
+ dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
new file mode 100644
index 00000000000..ec18ae1c388
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+
+static void nv84_crypt_isr(struct drm_device *);
+
+int
+nv84_crypt_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramin = chan->ramin;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 0,
+ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+ &chan->crypt_ctx);
+ if (ret)
+ return ret;
+
+ nv_wo32(ramin, 0xa0, 0x00190000);
+ nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff);
+ nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst);
+ nv_wo32(ramin, 0xac, 0);
+ nv_wo32(ramin, 0xb0, 0);
+ nv_wo32(ramin, 0xb4, 0);
+
+ dev_priv->engine.instmem.flush(dev);
+ atomic_inc(&chan->vm->pcrypt_refs);
+ return 0;
+}
+
+void
+nv84_crypt_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ u32 inst;
+
+ if (!chan->crypt_ctx)
+ return;
+
+ inst = (chan->ramin->vinst >> 12);
+ inst |= 0x80000000;
+
+ /* mark context as invalid if still on the hardware, not
+ * doing this causes issues the next time PCRYPT is used,
+ * unsurprisingly :)
+ */
+ nv_wr32(dev, 0x10200c, 0x00000000);
+ if (nv_rd32(dev, 0x102188) == inst)
+ nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
+ if (nv_rd32(dev, 0x10218c) == inst)
+ nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
+ nv_wr32(dev, 0x10200c, 0x00000010);
+
+ nouveau_gpuobj_ref(NULL, &chan->crypt_ctx);
+ atomic_dec(&chan->vm->pcrypt_refs);
+}
+
+void
+nv84_crypt_tlb_flush(struct drm_device *dev)
+{
+ nv50_vm_flush_engine(dev, 0x0a);
+}
+
+int
+nv84_crypt_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+ if (!pcrypt->registered) {
+ NVOBJ_CLASS(dev, 0x74c1, CRYPT);
+ pcrypt->registered = true;
+ }
+
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+
+ nouveau_irq_register(dev, 14, nv84_crypt_isr);
+ nv_wr32(dev, 0x102130, 0xffffffff);
+ nv_wr32(dev, 0x102140, 0xffffffbf);
+
+ nv_wr32(dev, 0x10200c, 0x00000010);
+ return 0;
+}
+
+void
+nv84_crypt_fini(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x102140, 0x00000000);
+ nouveau_irq_unregister(dev, 14);
+}
+
+static void
+nv84_crypt_isr(struct drm_device *dev)
+{
+ u32 stat = nv_rd32(dev, 0x102130);
+ u32 mthd = nv_rd32(dev, 0x102190);
+ u32 data = nv_rd32(dev, 0x102194);
+ u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff;
+ int show = nouveau_ratelimit();
+
+ if (show) {
+ NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ stat, mthd, data, inst);
+ }
+
+ nv_wr32(dev, 0x102130, stat);
+ nv_wr32(dev, 0x10200c, 0x10);
+
+ nv50_fb_vm_trap(dev, show, "PCRYPT");
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
new file mode 100644
index 00000000000..fa5d4c23438
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_mm.h"
+
+int
+nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
+
+ ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+ if (ret)
+ return ret;
+
+ if (rect->rop != ROP_COPY) {
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ OUT_RING (chan, 1);
+ }
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+ else
+ OUT_RING (chan, rect->color);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4);
+ OUT_RING (chan, rect->dx);
+ OUT_RING (chan, rect->dy);
+ OUT_RING (chan, rect->dx + rect->width);
+ OUT_RING (chan, rect->dy + rect->height);
+ if (rect->rop != ROP_COPY) {
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ OUT_RING (chan, 3);
+ }
+ FIRE_RING(chan);
+ return 0;
+}
+
+int
+nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ int ret;
+
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4);
+ OUT_RING (chan, region->dx);
+ OUT_RING (chan, region->dy);
+ OUT_RING (chan, region->width);
+ OUT_RING (chan, region->height);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, region->sx);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, region->sy);
+ FIRE_RING(chan);
+ return 0;
+}
+
+int
+nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+ uint32_t *palette = info->pseudo_palette;
+ int ret;
+
+ if (image->depth != 1)
+ return -ENODEV;
+
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+
+ width = ALIGN(image->width, 32);
+ dwords = (width * image->height) >> 5;
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ OUT_RING (chan, palette[image->bg_color] | mask);
+ OUT_RING (chan, palette[image->fg_color] | mask);
+ } else {
+ OUT_RING (chan, image->bg_color);
+ OUT_RING (chan, image->fg_color);
+ }
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2);
+ OUT_RING (chan, image->width);
+ OUT_RING (chan, image->height);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, image->dx);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, image->dy);
+
+ while (dwords) {
+ int push = dwords > 2047 ? 2047 : dwords;
+
+ ret = RING_SPACE(chan, push + 1);
+ if (ret)
+ return ret;
+
+ dwords -= push;
+
+ BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push);
+ OUT_RINGp(chan, data, push);
+ data += push;
+ }
+
+ FIRE_RING(chan);
+ return 0;
+}
+
+int
+nvc0_fbcon_accel_init(struct fb_info *info)
+{
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
+ int ret, format;
+
+ ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
+ if (ret)
+ return ret;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ format = 0xf3;
+ break;
+ case 15:
+ format = 0xf8;
+ break;
+ case 16:
+ format = 0xe8;
+ break;
+ case 32:
+ switch (info->var.transp.length) {
+ case 0: /* depth 24 */
+ case 8: /* depth 32, just use 24.. */
+ format = 0xe6;
+ break;
+ case 2: /* depth 30 */
+ format = 0xd1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = RING_SPACE(chan, 60);
+ if (ret) {
+ WARN_ON(1);
+ nouveau_fbcon_gpu_lockup(info);
+ return ret;
+ }
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
+ OUT_RING (chan, 0x0000902d);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
+ OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset));
+ OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset));
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ OUT_RING (chan, 3);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1);
+ OUT_RING (chan, 0x55);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2);
+ OUT_RING (chan, 4);
+ OUT_RING (chan, format);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2);
+ OUT_RING (chan, 2);
+ OUT_RING (chan, 1);
+
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1);
+ OUT_RING (chan, format);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10);
+ OUT_RING (chan, format);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, info->fix.line_length);
+ OUT_RING (chan, info->var.xres_virtual);
+ OUT_RING (chan, info->var.yres_virtual);
+ OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
+ BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
+ OUT_RING (chan, format);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, info->fix.line_length);
+ OUT_RING (chan, info->var.xres_virtual);
+ OUT_RING (chan, info->var.yres_virtual);
+ OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
+ FIRE_RING (chan);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 890c2b95fbc..e6f92c541db 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -25,6 +25,49 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static void nvc0_fifo_isr(struct drm_device *);
+
+struct nvc0_fifo_priv {
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+ struct nouveau_vma user_vma;
+ int spoon_nr;
+};
+
+struct nvc0_fifo_chan {
+ struct nouveau_bo *user;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static void
+nvc0_fifo_playlist_update(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv = pfifo->priv;
+ struct nouveau_gpuobj *cur;
+ int i, p;
+
+ cur = priv->playlist[priv->cur_playlist];
+ priv->cur_playlist = !priv->cur_playlist;
+
+ for (i = 0, p = 0; i < 128; i++) {
+ if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
+ continue;
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000004);
+ p += 8;
+ }
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x002270, cur->vinst >> 12);
+ nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
+ if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
+ NV_ERROR(dev, "PFIFO - playlist update failed\n");
+}
void
nvc0_fifo_disable(struct drm_device *dev)
@@ -57,12 +100,135 @@ nvc0_fifo_channel_id(struct drm_device *dev)
int
nvc0_fifo_create_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv = pfifo->priv;
+ struct nvc0_fifo_chan *fifoch;
+ u64 ib_virt, user_vinst;
+ int ret;
+
+ chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
+ if (!chan->fifo_priv)
+ return -ENOMEM;
+ fifoch = chan->fifo_priv;
+
+ /* allocate vram for control regs, map into polling area */
+ ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, true, true, &fifoch->user);
+ if (ret)
+ goto error;
+
+ ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ nouveau_bo_ref(NULL, &fifoch->user);
+ goto error;
+ }
+
+ user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT;
+
+ ret = nouveau_bo_map(fifoch->user);
+ if (ret) {
+ nouveau_bo_unpin(fifoch->user);
+ nouveau_bo_ref(NULL, &fifoch->user);
+ goto error;
+ }
+
+ nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
+ fifoch->user->bo.mem.mm_node);
+
+ chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
+ priv->user_vma.offset + (chan->id * 0x1000),
+ PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
+
+ /* zero channel regs */
+ nouveau_bo_wr32(fifoch->user, 0x0040/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0044/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0048/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x004c/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0050/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0058/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x005c/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0060/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x0088/4, 0);
+ nouveau_bo_wr32(fifoch->user, 0x008c/4, 0);
+
+ /* ramfc */
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
+ chan->ramin->vinst, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst));
+ nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst));
+ nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
+ nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
+ nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
+ nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+ upper_32_bits(ib_virt));
+ nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
+ nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
+ nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
+ nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
+ nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
+ nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
+ nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
+ nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
+ nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
+ (chan->ramin->vinst >> 12));
+ nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
+ nvc0_fifo_playlist_update(dev);
return 0;
+
+error:
+ pfifo->destroy_context(chan);
+ return ret;
}
void
nvc0_fifo_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+ struct nvc0_fifo_chan *fifoch;
+
+ nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x002634, chan->id);
+ if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
+ NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
+
+ nvc0_fifo_playlist_update(dev);
+
+ nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
+
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ fifoch = chan->fifo_priv;
+ chan->fifo_priv = NULL;
+ if (!fifoch)
+ return;
+
+ nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
+ if (fifoch->user) {
+ nouveau_bo_unmap(fifoch->user);
+ nouveau_bo_unpin(fifoch->user);
+ nouveau_bo_ref(NULL, &fifoch->user);
+ }
+ kfree(fifoch);
}
int
@@ -77,14 +243,213 @@ nvc0_fifo_unload_context(struct drm_device *dev)
return 0;
}
+static void
+nvc0_fifo_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv;
+
+ priv = pfifo->priv;
+ if (!priv)
+ return;
+
+ nouveau_vm_put(&priv->user_vma);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+ kfree(priv);
+}
+
void
nvc0_fifo_takedown(struct drm_device *dev)
{
+ nv_wr32(dev, 0x002140, 0x00000000);
+ nvc0_fifo_destroy(dev);
+}
+
+static int
+nvc0_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ pfifo->priv = priv;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
+ &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
+ &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
+ 12, NV_MEM_ACCESS_RW, &priv->user_vma);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nvc0_fifo_isr);
+ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+ return 0;
+
+error:
+ nvc0_fifo_destroy(dev);
+ return ret;
}
int
nvc0_fifo_init(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv;
+ int ret, i;
+
+ if (!pfifo->priv) {
+ ret = nvc0_fifo_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = pfifo->priv;
+
+ /* reset PFIFO, enable all available PSUBFIFO areas */
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x000204, 0xffffffff);
+ nv_wr32(dev, 0x002204, 0xffffffff);
+
+ priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
+ NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+
+ /* assign engines to subfifos */
+ if (priv->spoon_nr >= 3) {
+ nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
+ nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
+ nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
+ nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
+ nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
+ nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
+ }
+
+ /* PSUBFIFO[n] */
+ for (i = 0; i < 3; i++) {
+ nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
+ }
+
+ nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
+
+ nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xbfffffff);
return 0;
}
+struct nouveau_enum nvc0_fifo_fault_unit[] = {
+ { 0, "PGRAPH" },
+ { 3, "PEEPHOLE" },
+ { 4, "BAR1" },
+ { 5, "BAR3" },
+ { 7, "PFIFO" },
+ {}
+};
+
+struct nouveau_enum nvc0_fifo_fault_reason[] = {
+ { 0, "PT_NOT_PRESENT" },
+ { 1, "PT_TOO_SHORT" },
+ { 2, "PAGE_NOT_PRESENT" },
+ { 3, "VM_LIMIT_EXCEEDED" },
+ {}
+};
+
+struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
+/* { 0x00008000, "" } seen with null ib push */
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+static void
+nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+{
+ u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
+ u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
+ u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
+ u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+
+ NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
+ (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+ nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
+ printk("] from ");
+ nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+ printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static void
+nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+{
+ u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
+ u32 subc = (addr & 0x00070000);
+ u32 mthd = (addr & 0x00003ffc);
+
+ NV_INFO(dev, "PSUBFIFO %d:", unit);
+ nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat);
+ NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+
+ nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nvc0_fifo_isr(struct drm_device *dev)
+{
+ u32 stat = nv_rd32(dev, 0x002100);
+
+ if (stat & 0x10000000) {
+ u32 units = nv_rd32(dev, 0x00259c);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nvc0_fifo_isr_vm_fault(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x00259c, units);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 units = nv_rd32(dev, 0x0025a0);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nvc0_fifo_isr_subfifo_intr(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x0025a0, units);
+ stat &= ~0x20000000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
+ nv_wr32(dev, 0x002100, stat);
+ }
+
+ nv_wr32(dev, 0x2140, 0);
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 717a5177a8d..5feacd5d5fa 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -22,9 +22,16 @@
* Authors: Ben Skeggs
*/
+#include <linux/firmware.h>
+
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nvc0_graph.h"
+
+static void nvc0_graph_isr(struct drm_device *);
+static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
void
nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
@@ -37,39 +44,735 @@ nvc0_graph_channel(struct drm_device *dev)
return NULL;
}
+static int
+nvc0_graph_construct_context(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+ struct drm_device *dev = chan->dev;
+ int ret, i;
+ u32 *ctx;
+
+ ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ nvc0_graph_load_context(chan);
+
+ nv_wo32(grch->grctx, 0x1c, 1);
+ nv_wo32(grch->grctx, 0x20, 0);
+ nv_wo32(grch->grctx, 0x28, 0);
+ nv_wo32(grch->grctx, 0x2c, 0);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nvc0_grctx_generate(chan);
+ if (ret) {
+ kfree(ctx);
+ return ret;
+ }
+
+ ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
+ if (ret) {
+ kfree(ctx);
+ return ret;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ ctx[i / 4] = nv_ro32(grch->grctx, i);
+
+ priv->grctx_vals = ctx;
+ return 0;
+}
+
+static int
+nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+ struct drm_device *dev = chan->dev;
+ int i = 0, gpc, tp, ret;
+ u32 magic;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM,
+ &grch->unk408004);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM,
+ &grch->unk40800c);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM,
+ &grch->unk418810);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM,
+ &grch->mmio);
+ if (ret)
+ return ret;
+
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00408004);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00408008);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00408010);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000000);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00418810);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12);
+ nv_wo32(grch->mmio, i++ * 4, 0x00419848);
+ nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00419004);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00419008);
+ nv_wo32(grch->mmio, i++ * 4, 0x00000000);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00418808);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+ magic = 0x02180000;
+ nv_wo32(grch->mmio, i++ * 4, 0x00405830);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) {
+ u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
+ nv_wo32(grch->mmio, i++ * 4, reg);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ }
+ }
+
+ grch->mmio_nr = i / 2;
+ return 0;
+}
+
int
nvc0_graph_create_context(struct nouveau_channel *chan)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv = pgraph->priv;
+ struct nvc0_graph_chan *grch;
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *grctx;
+ int ret, i;
+
+ chan->pgraph_ctx = kzalloc(sizeof(*grch), GFP_KERNEL);
+ if (!chan->pgraph_ctx)
+ return -ENOMEM;
+ grch = chan->pgraph_ctx;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
+ &grch->grctx);
+ if (ret)
+ goto error;
+ chan->ramin_grctx = grch->grctx;
+ grctx = grch->grctx;
+
+ ret = nvc0_graph_create_context_mmio_list(chan);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4);
+ nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst));
+ pinstmem->flush(dev);
+
+ if (!priv->grctx_vals) {
+ ret = nvc0_graph_construct_context(chan);
+ if (ret)
+ goto error;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
+
+ nv_wo32(grctx, 0xf4, 0);
+ nv_wo32(grctx, 0xf8, 0);
+ nv_wo32(grctx, 0x10, grch->mmio_nr);
+ nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
+ nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
+ nv_wo32(grctx, 0x1c, 1);
+ nv_wo32(grctx, 0x20, 0);
+ nv_wo32(grctx, 0x28, 0);
+ nv_wo32(grctx, 0x2c, 0);
+ pinstmem->flush(dev);
return 0;
+
+error:
+ pgraph->destroy_context(chan);
+ return ret;
}
void
nvc0_graph_destroy_context(struct nouveau_channel *chan)
{
+ struct nvc0_graph_chan *grch;
+
+ grch = chan->pgraph_ctx;
+ chan->pgraph_ctx = NULL;
+ if (!grch)
+ return;
+
+ nouveau_gpuobj_ref(NULL, &grch->mmio);
+ nouveau_gpuobj_ref(NULL, &grch->unk418810);
+ nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+ nouveau_gpuobj_ref(NULL, &grch->unk408004);
+ nouveau_gpuobj_ref(NULL, &grch->grctx);
+ chan->ramin_grctx = NULL;
}
int
nvc0_graph_load_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, 0x409840, 0x00000030);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000003);
+ if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
+ NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
+
+ return 0;
+}
+
+static int
+nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
+{
+ nv_wr32(dev, 0x409840, 0x00000003);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
+ nv_wr32(dev, 0x409504, 0x00000009);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
+ return -EBUSY;
+ }
+
return 0;
}
int
nvc0_graph_unload_context(struct drm_device *dev)
{
- return 0;
+ u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+ return nvc0_graph_unload_context_to(dev, inst);
+}
+
+static void
+nvc0_graph_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv;
+
+ priv = pgraph->priv;
+ if (!priv)
+ return;
+
+ nouveau_irq_unregister(dev, 12);
+
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+ if (priv->grctx_vals)
+ kfree(priv->grctx_vals);
+ kfree(priv);
}
void
nvc0_graph_takedown(struct drm_device *dev)
{
+ nvc0_graph_destroy(dev);
+}
+
+static int
+nvc0_graph_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv;
+ int ret, gpc, i;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ pgraph->priv = priv;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < 0x1000; i += 4) {
+ nv_wo32(priv->unk4188b4, i, 0x00000010);
+ nv_wo32(priv->unk4188b8, i, 0x00000010);
+ }
+
+ priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
+ priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
+ priv->tp_total += priv->tp_nr[gpc];
+ }
+
+ /*XXX: these need figuring out... */
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
+ priv->magic_not_rop_nr = 0x07;
+ /* filled values up to tp_total, the rest 0 */
+ priv->magicgpc980[0] = 0x22111000;
+ priv->magicgpc980[1] = 0x00000233;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x000ba2e9;
+ } else
+ if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
+ priv->magic_not_rop_nr = 0x05;
+ priv->magicgpc980[0] = 0x11110000;
+ priv->magicgpc980[1] = 0x00233222;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00092493;
+ } else
+ if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
+ priv->magic_not_rop_nr = 0x06;
+ priv->magicgpc980[0] = 0x11110000;
+ priv->magicgpc980[1] = 0x03332222;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00088889;
+ }
+ break;
+ case 0xc3: /* 450, 4/0/0/0, 2 */
+ priv->magic_not_rop_nr = 0x03;
+ priv->magicgpc980[0] = 0x00003210;
+ priv->magicgpc980[1] = 0x00000000;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00200000;
+ break;
+ case 0xc4: /* 460, 3/4/0/0, 4 */
+ priv->magic_not_rop_nr = 0x01;
+ priv->magicgpc980[0] = 0x02321100;
+ priv->magicgpc980[1] = 0x00000000;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00124925;
+ break;
+ }
+
+ if (!priv->magic_not_rop_nr) {
+ NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
+ priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
+ priv->tp_nr[3], priv->rop_nr);
+ /* use 0xc3's values... */
+ priv->magic_not_rop_nr = 0x03;
+ priv->magicgpc980[0] = 0x00003210;
+ priv->magicgpc980[1] = 0x00000000;
+ priv->magicgpc980[2] = 0x00000000;
+ priv->magicgpc980[3] = 0x00000000;
+ priv->magicgpc918 = 0x00200000;
+ }
+
+ nouveau_irq_register(dev, 12, nvc0_graph_isr);
+ NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
+ NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
+ NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
+ NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
+ return 0;
+
+error:
+ nvc0_graph_destroy(dev);
+ return ret;
+}
+
+static void
+nvc0_graph_init_obj418880(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv = pgraph->priv;
+ int i;
+
+ nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
+ nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
+}
+
+static void
+nvc0_graph_init_regs(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400080, 0x003083c2);
+ nv_wr32(dev, 0x400088, 0x00006fe7);
+ nv_wr32(dev, 0x40008c, 0x00000000);
+ nv_wr32(dev, 0x400090, 0x00000030);
+ nv_wr32(dev, 0x40013c, 0x013901f7);
+ nv_wr32(dev, 0x400140, 0x00000100);
+ nv_wr32(dev, 0x400144, 0x00000000);
+ nv_wr32(dev, 0x400148, 0x00000110);
+ nv_wr32(dev, 0x400138, 0x00000000);
+ nv_wr32(dev, 0x400130, 0x00000000);
+ nv_wr32(dev, 0x400134, 0x00000000);
+ nv_wr32(dev, 0x400124, 0x00000002);
+}
+
+static void
+nvc0_graph_init_gpc_0(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ int gpc;
+
+ // TP ROP UNKVAL(magic_not_rop_nr)
+ // 450: 4/0/0/0 2 3
+ // 460: 3/4/0/0 4 1
+ // 465: 3/4/4/0 4 7
+ // 470: 3/3/4/4 5 5
+ // 480: 3/4/4/4 6 6
+
+ // magicgpc918
+ // 450: 00200000 00000000001000000000000000000000
+ // 460: 00124925 00000000000100100100100100100101
+ // 465: 000ba2e9 00000000000010111010001011101001
+ // 470: 00092493 00000000000010010010010010010011
+ // 480: 00088889 00000000000010001000100010001001
+
+ /* filled values up to tp_total, remainder 0 */
+ // 450: 00003210 00000000 00000000 00000000
+ // 460: 02321100 00000000 00000000 00000000
+ // 465: 22111000 00000233 00000000 00000000
+ // 470: 11110000 00233222 00000000 00000000
+ // 480: 11110000 03332222 00000000 00000000
+
+ nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]);
+ nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]);
+ nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]);
+ nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+ priv->tp_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918);
+ nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
+}
+
+static void
+nvc0_graph_init_units(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x409c24, 0x000f0000);
+ nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
+ nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
+ nv_wr32(dev, 0x408030, 0xc0000000);
+ nv_wr32(dev, 0x40601c, 0xc0000000);
+ nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
+ nv_wr32(dev, 0x406018, 0xc0000000);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x405844, 0x00ffffff);
+ nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
+ nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+}
+
+static void
+nvc0_graph_init_gpc_1(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ int gpc, tp;
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f);
+ }
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+}
+
+static void
+nvc0_graph_init_rop(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ int rop;
+
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+}
+
+static int
+nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base,
+ const char *code_fw, const char *data_fw)
+{
+ const struct firmware *fw;
+ char name[32];
+ int ret, i;
+
+ snprintf(name, sizeof(name), "nouveau/%s", data_fw);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "failed to load %s\n", data_fw);
+ return ret;
+ }
+
+ nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
+ for (i = 0; i < fw->size / 4; i++)
+ nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]);
+ release_firmware(fw);
+
+ snprintf(name, sizeof(name), "nouveau/%s", code_fw);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "failed to load %s\n", code_fw);
+ return ret;
+ }
+
+ nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
+ for (i = 0; i < fw->size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, fuc_base + 0x0188, i >> 6);
+ nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]);
+ }
+ release_firmware(fw);
+
+ return 0;
+}
+
+static int
+nvc0_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ u32 r000260;
+ int ret;
+
+ /* load fuc microcode */
+ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d");
+ if (ret == 0)
+ ret = nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad");
+ nv_wr32(dev, 0x000260, r000260);
+
+ if (ret)
+ return ret;
+
+ /* start both of them running */
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x41a10c, 0x00000000);
+ nv_wr32(dev, 0x40910c, 0x00000000);
+ nv_wr32(dev, 0x41a100, 0x00000002);
+ nv_wr32(dev, 0x409100, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
+ NV_INFO(dev, "0x409800 wait failed\n");
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x7fffffff);
+ nv_wr32(dev, 0x409504, 0x00000021);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000010);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+ return -EBUSY;
+ }
+ priv->grctx_size = nv_rd32(dev, 0x409800);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000016);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000025);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
}
int
nvc0_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nvc0_graph_priv *priv;
+ int ret;
+
dev_priv->engine.graph.accel_blocked = true;
+
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ case 0xc3:
+ case 0xc4:
+ break;
+ default:
+ NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
+ if (nouveau_noaccel != 0)
+ return 0;
+ break;
+ }
+
+ nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
+
+ if (!pgraph->priv) {
+ ret = nvc0_graph_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = pgraph->priv;
+
+ nvc0_graph_init_obj418880(dev);
+ nvc0_graph_init_regs(dev);
+ //nvc0_graph_init_unitplemented_magics(dev);
+ nvc0_graph_init_gpc_0(dev);
+ //nvc0_graph_init_unitplemented_c242(dev);
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+ nv_wr32(dev, 0x400100, 0xffffffff);
+ nv_wr32(dev, 0x40013c, 0xffffffff);
+
+ nvc0_graph_init_units(dev);
+ nvc0_graph_init_gpc_1(dev);
+ nvc0_graph_init_rop(dev);
+
+ nv_wr32(dev, 0x400108, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, 0x400118, 0xffffffff);
+ nv_wr32(dev, 0x400130, 0xffffffff);
+ nv_wr32(dev, 0x40011c, 0xffffffff);
+ nv_wr32(dev, 0x400134, 0xffffffff);
+ nv_wr32(dev, 0x400054, 0x34ce3464);
+
+ ret = nvc0_graph_init_ctxctl(dev);
+ if (ret == 0)
+ dev_priv->engine.graph.accel_blocked = false;
return 0;
}
+static int
+nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nvc0_graph_isr(struct drm_device *dev)
+{
+ u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+ u32 chid = nvc0_graph_isr_chid(dev, inst);
+ u32 stat = nv_rd32(dev, 0x400100);
+ u32 addr = nv_rd32(dev, 0x400704);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 data = nv_rd32(dev, 0x400708);
+ u32 code = nv_rd32(dev, 0x400110);
+ u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
+
+ if (stat & 0x00000010) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000020) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00000020);
+ stat &= ~0x00000020;
+ }
+
+ if (stat & 0x00100000) {
+ NV_INFO(dev, "PGRAPH: DATA_ERROR [");
+ nouveau_enum_print(nv50_data_error_names, code);
+ printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00100000);
+ stat &= ~0x00100000;
+ }
+
+ if (stat & 0x00200000) {
+ u32 trap = nv_rd32(dev, 0x400108);
+ NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
+ nv_wr32(dev, 0x400108, trap);
+ nv_wr32(dev, 0x400100, 0x00200000);
+ stat &= ~0x00200000;
+ }
+
+ if (stat & 0x00080000) {
+ u32 ustat = nv_rd32(dev, 0x409c18);
+
+ NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
+
+ nv_wr32(dev, 0x409c20, ustat);
+ nv_wr32(dev, 0x400100, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
+ nv_wr32(dev, 0x400100, stat);
+ }
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
new file mode 100644
index 00000000000..40e26f9c56c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVC0_GRAPH_H__
+#define __NVC0_GRAPH_H__
+
+#define GPC_MAX 4
+#define TP_MAX 32
+
+#define ROP_BCAST(r) (0x408800 + (r))
+#define ROP_UNIT(u,r) (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r) (0x418000 + (r))
+#define GPC_UNIT(t,r) (0x500000 + (t) * 0x8000 + (r))
+#define TP_UNIT(t,m,r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nvc0_graph_priv {
+ u8 gpc_nr;
+ u8 rop_nr;
+ u8 tp_nr[GPC_MAX];
+ u8 tp_total;
+
+ u32 grctx_size;
+ u32 *grctx_vals;
+ struct nouveau_gpuobj *unk4188b4;
+ struct nouveau_gpuobj *unk4188b8;
+
+ u8 magic_not_rop_nr;
+ u32 magicgpc980[4];
+ u32 magicgpc918;
+};
+
+struct nvc0_graph_chan {
+ struct nouveau_gpuobj *grctx;
+ struct nouveau_gpuobj *unk408004; // 0x418810 too
+ struct nouveau_gpuobj *unk40800c; // 0x419004 too
+ struct nouveau_gpuobj *unk418810; // 0x419848 too
+ struct nouveau_gpuobj *mmio;
+ int mmio_nr;
+};
+
+int nvc0_grctx_generate(struct nouveau_channel *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
new file mode 100644
index 00000000000..b9e68b2d30a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -0,0 +1,2874 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nvc0_graph.h"
+
+static void
+nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
+{
+ nv_wr32(dev, 0x400204, data);
+ nv_wr32(dev, 0x400200, icmd);
+ while (nv_rd32(dev, 0x400700) & 2) {}
+}
+
+static void
+nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
+{
+ nv_wr32(dev, 0x40448c, data);
+ nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+static void
+nvc0_grctx_generate_9097(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0900, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0980, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0804, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0844, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0904, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0808, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0848, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0888, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x08c8, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0908, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0948, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0988, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x09c8, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x080c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x084c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x088c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x08cc, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x090c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x094c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x098c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x09cc, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x0810, 0x000000cf);
+ nv_mthd(dev, 0x9097, 0x0850, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0910, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0990, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0814, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0854, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0894, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x08d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0914, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0954, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0994, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x09d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0818, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0858, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0898, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x08d8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0918, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0958, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0998, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x09d8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x081c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x085c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x089c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x091c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x095c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x099c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0820, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0860, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0920, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0960, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2700, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2720, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2740, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2760, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2780, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2704, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2724, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2744, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2764, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2784, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2708, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2728, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2748, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2768, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2788, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x270c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x272c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x274c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x276c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x278c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2710, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2730, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2750, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2770, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2790, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27b0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27d0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27f0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2714, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2734, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2754, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2774, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2794, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27b4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27f4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1c00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2200, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2210, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2220, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2230, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2240, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2000, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2040, 0x00000011);
+ nv_mthd(dev, 0x9097, 0x2080, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x20c0, 0x00000030);
+ nv_mthd(dev, 0x9097, 0x2100, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2140, 0x00000051);
+ nv_mthd(dev, 0x9097, 0x200c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x204c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x208c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x20cc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x210c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x214c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x2010, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2050, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2090, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x20d0, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x2110, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x2150, 0x00000004);
+ nv_mthd(dev, 0x9097, 0x0380, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0384, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0388, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x038c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0700, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0710, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0720, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0730, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0704, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0714, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0724, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0734, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0708, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0718, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0728, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0738, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2800, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2804, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2808, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x280c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2810, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2814, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2818, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x281c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2820, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2824, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2828, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x282c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2830, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2834, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2838, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x283c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2840, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2844, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2848, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x284c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2850, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2854, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2858, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x285c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2860, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2864, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2868, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x286c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2870, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2874, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2878, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x287c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2888, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x288c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2894, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2898, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x289c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2900, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2904, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2908, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x290c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2910, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2914, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2918, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x291c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2920, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2924, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2928, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x292c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2930, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2934, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2938, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x293c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2948, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x294c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2954, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2958, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x295c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2960, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2964, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2968, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x296c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2970, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2974, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2978, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x297c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2980, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2988, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x298c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2990, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2994, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2998, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x299c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0acc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0af0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0af4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1e00, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e20, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e40, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e60, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e80, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e04, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e24, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e44, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e64, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e84, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e08, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e28, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e48, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e68, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e88, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e10, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e30, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e50, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e70, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e90, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e14, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e34, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e54, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e74, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e94, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e18, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e38, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e58, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e78, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e98, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x3400, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3404, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3408, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x340c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881);
+ nv_mthd(dev, 0x9097, 0x0fac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1538, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014);
+ nv_mthd(dev, 0x9097, 0x0fec, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x179c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1228, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x122c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x1230, 0x00010001);
+ nv_mthd(dev, 0x9097, 0x07f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15b4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1534, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x153c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x16b4, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0df8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1948, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1970, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x161c, 0x000009f0);
+ nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x163c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1160, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1164, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1168, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x116c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1170, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1174, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1178, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x117c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1180, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1184, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1188, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x118c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1190, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1194, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1198, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x119c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1888, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x188c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1894, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1898, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x189c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff);
+ nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff);
+ nv_mthd(dev, 0x9097, 0x17d8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x17dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1434, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1438, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x13a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1318, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1644, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0748, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0de8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1648, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1120, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1124, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1128, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x112c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1118, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x164c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1658, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1910, 0x00000290);
+ nv_mthd(dev, 0x9097, 0x1518, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x165c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1520, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1604, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1570, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x020c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1670, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x1674, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888);
+ nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8);
+ nv_mthd(dev, 0x9097, 0x166c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00);
+ nv_mthd(dev, 0x9097, 0x12d0, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x12d4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1684, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1688, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02);
+ nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02);
+ nv_mthd(dev, 0x9097, 0x0db4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x168c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x156c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x187c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1110, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1234, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1690, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12ac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x02c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0790, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0794, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0798, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x079c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x077c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1000, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x10fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1290, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0218, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x12d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12dc, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x155c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1560, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1564, 0x00001fff);
+ nv_mthd(dev, 0x9097, 0x1574, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1578, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x157c, 0x003fffff);
+ nv_mthd(dev, 0x9097, 0x1354, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1664, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1610, 0x00000012);
+ nv_mthd(dev, 0x9097, 0x1608, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x160c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x162c, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0320, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0324, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0328, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x032c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0330, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0334, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0338, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0750, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0760, 0x39291909);
+ nv_mthd(dev, 0x9097, 0x0764, 0x79695949);
+ nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989);
+ nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9);
+ nv_mthd(dev, 0x9097, 0x0770, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x0774, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x0778, 0x00009080);
+ nv_mthd(dev, 0x9097, 0x0780, 0x39291909);
+ nv_mthd(dev, 0x9097, 0x0784, 0x79695949);
+ nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989);
+ nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9);
+ nv_mthd(dev, 0x9097, 0x07d0, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x07d4, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x07d8, 0x00009080);
+ nv_mthd(dev, 0x9097, 0x037c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0740, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0744, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2600, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1918, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x191c, 0x00000900);
+ nv_mthd(dev, 0x9097, 0x1920, 0x00000405);
+ nv_mthd(dev, 0x9097, 0x1308, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1924, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x13ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x192c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c);
+ nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02c0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1510, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x194c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1968, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1590, 0x0000003f);
+ nv_mthd(dev, 0x9097, 0x07e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x196c, 0x00000011);
+ nv_mthd(dev, 0x9097, 0x197c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02d8, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1980, 0x00000080);
+ nv_mthd(dev, 0x9097, 0x1504, 0x00000080);
+ nv_mthd(dev, 0x9097, 0x1984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0300, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x13a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1310, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1314, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1380, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1384, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1388, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x138c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1390, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1394, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x139c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1398, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1594, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1598, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x159c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15a0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15a4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0f54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x130c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1360, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1364, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1368, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x136c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1370, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1374, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1378, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x137c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x133c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1340, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1344, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1348, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x134c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1350, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1358, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x12e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x131c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1320, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1324, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1328, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1140, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c8, 0x00001500);
+ nv_mthd(dev, 0x9097, 0x135c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19e0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19e4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19e8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19ec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19fc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19cc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a00, 0x00001111);
+ nv_mthd(dev, 0x9097, 0x1a04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x10f8, 0x00001010);
+ nv_mthd(dev, 0x9097, 0x0d80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0da0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1508, 0x80000000);
+ nv_mthd(dev, 0x9097, 0x150c, 0x40000000);
+ nv_mthd(dev, 0x9097, 0x1668, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0318, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x031c, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x07dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x074c, 0x00000055);
+ nv_mthd(dev, 0x9097, 0x1420, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x17bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1008, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x100c, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1010, 0x0000012c);
+ nv_mthd(dev, 0x9097, 0x0d60, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x075c, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x1018, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x101c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1020, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x1024, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1444, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1448, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x144c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0360, 0x20164010);
+ nv_mthd(dev, 0x9097, 0x0364, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x0368, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0de4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0204, 0x00000006);
+ nv_mthd(dev, 0x9097, 0x0208, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff);
+ nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48);
+ nv_mthd(dev, 0x9097, 0x1220, 0x00000005);
+ nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f98, 0x00300008);
+ nv_mthd(dev, 0x9097, 0x1284, 0x04000080);
+ nv_mthd(dev, 0x9097, 0x1450, 0x00300008);
+ nv_mthd(dev, 0x9097, 0x1454, 0x04000080);
+ nv_mthd(dev, 0x9097, 0x0214, 0x00000000);
+ /* in trace, right after 0x90c0, not here */
+ nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
+}
+
+static void
+nvc0_grctx_generate_902d(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
+}
+
+static void
+nvc0_grctx_generate_9039(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x9039, 0x030c, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0310, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0314, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0320, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0238, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x023c, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0318, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x031c, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_90c0(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x276c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x278c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
+ nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
+ nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0790, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0794, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0798, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x079c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x077c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0204, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0208, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x020c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0214, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x024c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0x90c0, 0x1608, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x160c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x1664, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_dispatch(struct drm_device *dev)
+{
+ int i;
+
+ nv_wr32(dev, 0x404004, 0x00000000);
+ nv_wr32(dev, 0x404008, 0x00000000);
+ nv_wr32(dev, 0x40400c, 0x00000000);
+ nv_wr32(dev, 0x404010, 0x00000000);
+ nv_wr32(dev, 0x404014, 0x00000000);
+ nv_wr32(dev, 0x404018, 0x00000000);
+ nv_wr32(dev, 0x40401c, 0x00000000);
+ nv_wr32(dev, 0x404020, 0x00000000);
+ nv_wr32(dev, 0x404024, 0x00000000);
+ nv_wr32(dev, 0x404028, 0x00000000);
+ nv_wr32(dev, 0x40402c, 0x00000000);
+ nv_wr32(dev, 0x404044, 0x00000000);
+ nv_wr32(dev, 0x404094, 0x00000000);
+ nv_wr32(dev, 0x404098, 0x00000000);
+ nv_wr32(dev, 0x40409c, 0x00000000);
+ nv_wr32(dev, 0x4040a0, 0x00000000);
+ nv_wr32(dev, 0x4040a4, 0x00000000);
+ nv_wr32(dev, 0x4040a8, 0x00000000);
+ nv_wr32(dev, 0x4040ac, 0x00000000);
+ nv_wr32(dev, 0x4040b0, 0x00000000);
+ nv_wr32(dev, 0x4040b4, 0x00000000);
+ nv_wr32(dev, 0x4040b8, 0x00000000);
+ nv_wr32(dev, 0x4040bc, 0x00000000);
+ nv_wr32(dev, 0x4040c0, 0x00000000);
+ nv_wr32(dev, 0x4040c4, 0x00000000);
+ nv_wr32(dev, 0x4040c8, 0xf0000087);
+ nv_wr32(dev, 0x4040d4, 0x00000000);
+ nv_wr32(dev, 0x4040d8, 0x00000000);
+ nv_wr32(dev, 0x4040dc, 0x00000000);
+ nv_wr32(dev, 0x4040e0, 0x00000000);
+ nv_wr32(dev, 0x4040e4, 0x00000000);
+ nv_wr32(dev, 0x4040e8, 0x00001000);
+ nv_wr32(dev, 0x4040f8, 0x00000000);
+ nv_wr32(dev, 0x404130, 0x00000000);
+ nv_wr32(dev, 0x404134, 0x00000000);
+ nv_wr32(dev, 0x404138, 0x20000040);
+ nv_wr32(dev, 0x404150, 0x0000002e);
+ nv_wr32(dev, 0x404154, 0x00000400);
+ nv_wr32(dev, 0x404158, 0x00000200);
+ nv_wr32(dev, 0x404164, 0x00000055);
+ nv_wr32(dev, 0x404168, 0x00000000);
+ nv_wr32(dev, 0x404174, 0x00000000);
+ nv_wr32(dev, 0x404178, 0x00000000);
+ nv_wr32(dev, 0x40417c, 0x00000000);
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */
+}
+
+static void
+nvc0_grctx_generate_macro(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404404, 0x00000000);
+ nv_wr32(dev, 0x404408, 0x00000000);
+ nv_wr32(dev, 0x40440c, 0x00000000);
+ nv_wr32(dev, 0x404410, 0x00000000);
+ nv_wr32(dev, 0x404414, 0x00000000);
+ nv_wr32(dev, 0x404418, 0x00000000);
+ nv_wr32(dev, 0x40441c, 0x00000000);
+ nv_wr32(dev, 0x404420, 0x00000000);
+ nv_wr32(dev, 0x404424, 0x00000000);
+ nv_wr32(dev, 0x404428, 0x00000000);
+ nv_wr32(dev, 0x40442c, 0x00000000);
+ nv_wr32(dev, 0x404430, 0x00000000);
+ nv_wr32(dev, 0x404434, 0x00000000);
+ nv_wr32(dev, 0x404438, 0x00000000);
+ nv_wr32(dev, 0x404460, 0x00000000);
+ nv_wr32(dev, 0x404464, 0x00000000);
+ nv_wr32(dev, 0x404468, 0x00ffffff);
+ nv_wr32(dev, 0x40446c, 0x00000000);
+ nv_wr32(dev, 0x404480, 0x00000001);
+ nv_wr32(dev, 0x404498, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_m2mf(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404604, 0x00000015);
+ nv_wr32(dev, 0x404608, 0x00000000);
+ nv_wr32(dev, 0x40460c, 0x00002e00);
+ nv_wr32(dev, 0x404610, 0x00000100);
+ nv_wr32(dev, 0x404618, 0x00000000);
+ nv_wr32(dev, 0x40461c, 0x00000000);
+ nv_wr32(dev, 0x404620, 0x00000000);
+ nv_wr32(dev, 0x404624, 0x00000000);
+ nv_wr32(dev, 0x404628, 0x00000000);
+ nv_wr32(dev, 0x40462c, 0x00000000);
+ nv_wr32(dev, 0x404630, 0x00000000);
+ nv_wr32(dev, 0x404634, 0x00000000);
+ nv_wr32(dev, 0x404638, 0x00000004);
+ nv_wr32(dev, 0x40463c, 0x00000000);
+ nv_wr32(dev, 0x404640, 0x00000000);
+ nv_wr32(dev, 0x404644, 0x00000000);
+ nv_wr32(dev, 0x404648, 0x00000000);
+ nv_wr32(dev, 0x40464c, 0x00000000);
+ nv_wr32(dev, 0x404650, 0x00000000);
+ nv_wr32(dev, 0x404654, 0x00000000);
+ nv_wr32(dev, 0x404658, 0x00000000);
+ nv_wr32(dev, 0x40465c, 0x007f0100);
+ nv_wr32(dev, 0x404660, 0x00000000);
+ nv_wr32(dev, 0x404664, 0x00000000);
+ nv_wr32(dev, 0x404668, 0x00000000);
+ nv_wr32(dev, 0x40466c, 0x00000000);
+ nv_wr32(dev, 0x404670, 0x00000000);
+ nv_wr32(dev, 0x404674, 0x00000000);
+ nv_wr32(dev, 0x404678, 0x00000000);
+ nv_wr32(dev, 0x40467c, 0x00000002);
+ nv_wr32(dev, 0x404680, 0x00000000);
+ nv_wr32(dev, 0x404684, 0x00000000);
+ nv_wr32(dev, 0x404688, 0x00000000);
+ nv_wr32(dev, 0x40468c, 0x00000000);
+ nv_wr32(dev, 0x404690, 0x00000000);
+ nv_wr32(dev, 0x404694, 0x00000000);
+ nv_wr32(dev, 0x404698, 0x00000000);
+ nv_wr32(dev, 0x40469c, 0x00000000);
+ nv_wr32(dev, 0x4046a0, 0x007f0080);
+ nv_wr32(dev, 0x4046a4, 0x00000000);
+ nv_wr32(dev, 0x4046a8, 0x00000000);
+ nv_wr32(dev, 0x4046ac, 0x00000000);
+ nv_wr32(dev, 0x4046b0, 0x00000000);
+ nv_wr32(dev, 0x4046b4, 0x00000000);
+ nv_wr32(dev, 0x4046b8, 0x00000000);
+ nv_wr32(dev, 0x4046bc, 0x00000000);
+ nv_wr32(dev, 0x4046c0, 0x00000000);
+ nv_wr32(dev, 0x4046c4, 0x00000000);
+ nv_wr32(dev, 0x4046c8, 0x00000000);
+ nv_wr32(dev, 0x4046cc, 0x00000000);
+ nv_wr32(dev, 0x4046d0, 0x00000000);
+ nv_wr32(dev, 0x4046d4, 0x00000000);
+ nv_wr32(dev, 0x4046d8, 0x00000000);
+ nv_wr32(dev, 0x4046dc, 0x00000000);
+ nv_wr32(dev, 0x4046e0, 0x00000000);
+ nv_wr32(dev, 0x4046e4, 0x00000000);
+ nv_wr32(dev, 0x4046e8, 0x00000000);
+ nv_wr32(dev, 0x4046f0, 0x00000000);
+ nv_wr32(dev, 0x4046f4, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk47xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404700, 0x00000000);
+ nv_wr32(dev, 0x404704, 0x00000000);
+ nv_wr32(dev, 0x404708, 0x00000000);
+ nv_wr32(dev, 0x40470c, 0x00000000);
+ nv_wr32(dev, 0x404710, 0x00000000);
+ nv_wr32(dev, 0x404714, 0x00000000);
+ nv_wr32(dev, 0x404718, 0x00000000);
+ nv_wr32(dev, 0x40471c, 0x00000000);
+ nv_wr32(dev, 0x404720, 0x00000000);
+ nv_wr32(dev, 0x404724, 0x00000000);
+ nv_wr32(dev, 0x404728, 0x00000000);
+ nv_wr32(dev, 0x40472c, 0x00000000);
+ nv_wr32(dev, 0x404730, 0x00000000);
+ nv_wr32(dev, 0x404734, 0x00000100);
+ nv_wr32(dev, 0x404738, 0x00000000);
+ nv_wr32(dev, 0x40473c, 0x00000000);
+ nv_wr32(dev, 0x404740, 0x00000000);
+ nv_wr32(dev, 0x404744, 0x00000000);
+ nv_wr32(dev, 0x404748, 0x00000000);
+ nv_wr32(dev, 0x40474c, 0x00000000);
+ nv_wr32(dev, 0x404750, 0x00000000);
+ nv_wr32(dev, 0x404754, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_shaders(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x405800, 0x078000bf);
+ nv_wr32(dev, 0x405830, 0x02180000);
+ nv_wr32(dev, 0x405834, 0x00000000);
+ nv_wr32(dev, 0x405838, 0x00000000);
+ nv_wr32(dev, 0x405854, 0x00000000);
+ nv_wr32(dev, 0x405870, 0x00000001);
+ nv_wr32(dev, 0x405874, 0x00000001);
+ nv_wr32(dev, 0x405878, 0x00000001);
+ nv_wr32(dev, 0x40587c, 0x00000001);
+ nv_wr32(dev, 0x405a00, 0x00000000);
+ nv_wr32(dev, 0x405a04, 0x00000000);
+ nv_wr32(dev, 0x405a18, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk60xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x406020, 0x000103c1);
+ nv_wr32(dev, 0x406028, 0x00000001);
+ nv_wr32(dev, 0x40602c, 0x00000001);
+ nv_wr32(dev, 0x406030, 0x00000001);
+ nv_wr32(dev, 0x406034, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_unk64xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x4064a8, 0x00000000);
+ nv_wr32(dev, 0x4064ac, 0x00003fff);
+ nv_wr32(dev, 0x4064b4, 0x00000000);
+ nv_wr32(dev, 0x4064b8, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_tpbus(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407804, 0x00000023);
+ nv_wr32(dev, 0x40780c, 0x0a418820);
+ nv_wr32(dev, 0x407810, 0x062080e6);
+ nv_wr32(dev, 0x407814, 0x020398a4);
+ nv_wr32(dev, 0x407818, 0x0e629062);
+ nv_wr32(dev, 0x40781c, 0x0a418820);
+ nv_wr32(dev, 0x407820, 0x000000e6);
+ nv_wr32(dev, 0x4078bc, 0x00000103);
+}
+
+static void
+nvc0_grctx_generate_ccache(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408000, 0x00000000);
+ nv_wr32(dev, 0x408004, 0x00000000);
+ nv_wr32(dev, 0x408008, 0x00000018);
+ nv_wr32(dev, 0x40800c, 0x00000000);
+ nv_wr32(dev, 0x408010, 0x00000000);
+ nv_wr32(dev, 0x408014, 0x00000069);
+ nv_wr32(dev, 0x408018, 0xe100e100);
+ nv_wr32(dev, 0x408064, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_rop(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ // ROPC_BROADCAST
+ nv_wr32(dev, 0x408800, 0x02802a3c);
+ nv_wr32(dev, 0x408804, 0x00000040);
+ nv_wr32(dev, 0x408808, 0x0003e00d);
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ nv_wr32(dev, 0x408900, 0x0080b801);
+ break;
+ case 0xc3:
+ case 0xc4:
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ break;
+ }
+ nv_wr32(dev, 0x408904, 0x02000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
+ nv_wr32(dev, 0x40890c, 0x00000000);
+ nv_wr32(dev, 0x408980, 0x0000011d);
+}
+
+static void
+nvc0_grctx_generate_gpc(struct drm_device *dev)
+{
+ int i;
+
+ // GPC_BROADCAST
+ nv_wr32(dev, 0x418380, 0x00000016);
+ nv_wr32(dev, 0x418400, 0x38004e00);
+ nv_wr32(dev, 0x418404, 0x71e0ffff);
+ nv_wr32(dev, 0x418408, 0x00000000);
+ nv_wr32(dev, 0x41840c, 0x00001008);
+ nv_wr32(dev, 0x418410, 0x0fff0fff);
+ nv_wr32(dev, 0x418414, 0x00200fff);
+ nv_wr32(dev, 0x418450, 0x00000000);
+ nv_wr32(dev, 0x418454, 0x00000000);
+ nv_wr32(dev, 0x418458, 0x00000000);
+ nv_wr32(dev, 0x41845c, 0x00000000);
+ nv_wr32(dev, 0x418460, 0x00000000);
+ nv_wr32(dev, 0x418464, 0x00000000);
+ nv_wr32(dev, 0x418468, 0x00000001);
+ nv_wr32(dev, 0x41846c, 0x00000000);
+ nv_wr32(dev, 0x418470, 0x00000000);
+ nv_wr32(dev, 0x418600, 0x0000001f);
+ nv_wr32(dev, 0x418684, 0x0000000f);
+ nv_wr32(dev, 0x418700, 0x00000002);
+ nv_wr32(dev, 0x418704, 0x00000080);
+ nv_wr32(dev, 0x418708, 0x00000000);
+ nv_wr32(dev, 0x41870c, 0x07c80000);
+ nv_wr32(dev, 0x418710, 0x00000000);
+ nv_wr32(dev, 0x418800, 0x0006860a);
+ nv_wr32(dev, 0x418808, 0x00000000);
+ nv_wr32(dev, 0x41880c, 0x00000000);
+ nv_wr32(dev, 0x418810, 0x00000000);
+ nv_wr32(dev, 0x418828, 0x00008442);
+ nv_wr32(dev, 0x418830, 0x00000001);
+ nv_wr32(dev, 0x4188d8, 0x00000008);
+ nv_wr32(dev, 0x4188e0, 0x01000000);
+ nv_wr32(dev, 0x4188e8, 0x00000000);
+ nv_wr32(dev, 0x4188ec, 0x00000000);
+ nv_wr32(dev, 0x4188f0, 0x00000000);
+ nv_wr32(dev, 0x4188f4, 0x00000000);
+ nv_wr32(dev, 0x4188f8, 0x00000000);
+ nv_wr32(dev, 0x4188fc, 0x00100000);
+ nv_wr32(dev, 0x41891c, 0x00ff00ff);
+ nv_wr32(dev, 0x418924, 0x00000000);
+ nv_wr32(dev, 0x418928, 0x00ffff00);
+ nv_wr32(dev, 0x41892c, 0x0000ff00);
+ for (i = 0; i < 8; i++) {
+ nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000);
+ nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
+ }
+ nv_wr32(dev, 0x418b00, 0x00000000);
+ nv_wr32(dev, 0x418b08, 0x0a418820);
+ nv_wr32(dev, 0x418b0c, 0x062080e6);
+ nv_wr32(dev, 0x418b10, 0x020398a4);
+ nv_wr32(dev, 0x418b14, 0x0e629062);
+ nv_wr32(dev, 0x418b18, 0x0a418820);
+ nv_wr32(dev, 0x418b1c, 0x000000e6);
+ nv_wr32(dev, 0x418bb8, 0x00000103);
+ nv_wr32(dev, 0x418c08, 0x00000001);
+ nv_wr32(dev, 0x418c10, 0x00000000);
+ nv_wr32(dev, 0x418c14, 0x00000000);
+ nv_wr32(dev, 0x418c18, 0x00000000);
+ nv_wr32(dev, 0x418c1c, 0x00000000);
+ nv_wr32(dev, 0x418c20, 0x00000000);
+ nv_wr32(dev, 0x418c24, 0x00000000);
+ nv_wr32(dev, 0x418c28, 0x00000000);
+ nv_wr32(dev, 0x418c2c, 0x00000000);
+ nv_wr32(dev, 0x418c80, 0x20200004);
+ nv_wr32(dev, 0x418c8c, 0x00000001);
+ nv_wr32(dev, 0x419000, 0x00000780);
+ nv_wr32(dev, 0x419004, 0x00000000);
+ nv_wr32(dev, 0x419008, 0x00000000);
+ nv_wr32(dev, 0x419014, 0x00000004);
+}
+
+static void
+nvc0_grctx_generate_tp(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ // GPC_BROADCAST.TP_BROADCAST
+ nv_wr32(dev, 0x419848, 0x00000000);
+ nv_wr32(dev, 0x419864, 0x0000012a);
+ nv_wr32(dev, 0x419888, 0x00000000);
+ nv_wr32(dev, 0x419a00, 0x000001f0);
+ nv_wr32(dev, 0x419a04, 0x00000001);
+ nv_wr32(dev, 0x419a08, 0x00000023);
+ nv_wr32(dev, 0x419a0c, 0x00020000);
+ nv_wr32(dev, 0x419a10, 0x00000000);
+ nv_wr32(dev, 0x419a14, 0x00000200);
+ nv_wr32(dev, 0x419a1c, 0x00000000);
+ nv_wr32(dev, 0x419a20, 0x00000800);
+ if (dev_priv->chipset != 0xc0)
+ nv_wr32(dev, 0x00419ac4, 0x0007f440); // 0xc3
+ nv_wr32(dev, 0x419b00, 0x0a418820);
+ nv_wr32(dev, 0x419b04, 0x062080e6);
+ nv_wr32(dev, 0x419b08, 0x020398a4);
+ nv_wr32(dev, 0x419b0c, 0x0e629062);
+ nv_wr32(dev, 0x419b10, 0x0a418820);
+ nv_wr32(dev, 0x419b14, 0x000000e6);
+ nv_wr32(dev, 0x419bd0, 0x00900103);
+ nv_wr32(dev, 0x419be0, 0x00000001);
+ nv_wr32(dev, 0x419be4, 0x00000000);
+ nv_wr32(dev, 0x419c00, 0x00000002);
+ nv_wr32(dev, 0x419c04, 0x00000006);
+ nv_wr32(dev, 0x419c08, 0x00000002);
+ nv_wr32(dev, 0x419c20, 0x00000000);
+ nv_wr32(dev, 0x419cbc, 0x28137606);
+ nv_wr32(dev, 0x419ce8, 0x00000000);
+ nv_wr32(dev, 0x419cf4, 0x00000183);
+ nv_wr32(dev, 0x419d20, 0x02180000);
+ nv_wr32(dev, 0x419d24, 0x00001fff);
+ nv_wr32(dev, 0x419e04, 0x00000000);
+ nv_wr32(dev, 0x419e08, 0x00000000);
+ nv_wr32(dev, 0x419e0c, 0x00000000);
+ nv_wr32(dev, 0x419e10, 0x00000002);
+ nv_wr32(dev, 0x419e44, 0x001beff2);
+ nv_wr32(dev, 0x419e48, 0x00000000);
+ nv_wr32(dev, 0x419e4c, 0x0000000f);
+ nv_wr32(dev, 0x419e50, 0x00000000);
+ nv_wr32(dev, 0x419e54, 0x00000000);
+ nv_wr32(dev, 0x419e58, 0x00000000);
+ nv_wr32(dev, 0x419e5c, 0x00000000);
+ nv_wr32(dev, 0x419e60, 0x00000000);
+ nv_wr32(dev, 0x419e64, 0x00000000);
+ nv_wr32(dev, 0x419e68, 0x00000000);
+ nv_wr32(dev, 0x419e6c, 0x00000000);
+ nv_wr32(dev, 0x419e70, 0x00000000);
+ nv_wr32(dev, 0x419e74, 0x00000000);
+ nv_wr32(dev, 0x419e78, 0x00000000);
+ nv_wr32(dev, 0x419e7c, 0x00000000);
+ nv_wr32(dev, 0x419e80, 0x00000000);
+ nv_wr32(dev, 0x419e84, 0x00000000);
+ nv_wr32(dev, 0x419e88, 0x00000000);
+ nv_wr32(dev, 0x419e8c, 0x00000000);
+ nv_wr32(dev, 0x419e90, 0x00000000);
+ nv_wr32(dev, 0x419e98, 0x00000000);
+ if (dev_priv->chipset != 0xc0)
+ nv_wr32(dev, 0x419ee0, 0x00011110);
+ nv_wr32(dev, 0x419f50, 0x00000000);
+ nv_wr32(dev, 0x419f54, 0x00000000);
+ if (dev_priv->chipset != 0xc0)
+ nv_wr32(dev, 0x419f58, 0x00000000);
+}
+
+int
+nvc0_grctx_generate(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+ struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+ struct drm_device *dev = chan->dev;
+ int i, gpc, tp, id;
+ u32 r000260, tmp;
+
+ r000260 = nv_rd32(dev, 0x000260);
+ nv_wr32(dev, 0x000260, r000260 & ~1);
+ nv_wr32(dev, 0x400208, 0x00000000);
+
+ nvc0_grctx_generate_dispatch(dev);
+ nvc0_grctx_generate_macro(dev);
+ nvc0_grctx_generate_m2mf(dev);
+ nvc0_grctx_generate_unk47xx(dev);
+ nvc0_grctx_generate_shaders(dev);
+ nvc0_grctx_generate_unk60xx(dev);
+ nvc0_grctx_generate_unk64xx(dev);
+ nvc0_grctx_generate_tpbus(dev);
+ nvc0_grctx_generate_ccache(dev);
+ nvc0_grctx_generate_rop(dev);
+ nvc0_grctx_generate_gpc(dev);
+ nvc0_grctx_generate_tp(dev);
+
+ nv_wr32(dev, 0x404154, 0x00000000);
+
+ /* fuc "mmio list" writes */
+ for (i = 0; i < grch->mmio_nr * 8; i += 8) {
+ u32 reg = nv_ro32(grch->mmio, i + 0);
+ nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4));
+ }
+
+ for (tp = 0, id = 0; tp < 4; tp++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tp <= priv->tp_nr[gpc]) {
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id);
+ id++;
+ }
+
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]);
+ }
+ }
+
+ tmp = 0;
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tp_nr[i] << (i * 4);
+ nv_wr32(dev, 0x406028, tmp);
+ nv_wr32(dev, 0x405870, tmp);
+
+ nv_wr32(dev, 0x40602c, 0x00000000);
+ nv_wr32(dev, 0x405874, 0x00000000);
+ nv_wr32(dev, 0x406030, 0x00000000);
+ nv_wr32(dev, 0x405878, 0x00000000);
+ nv_wr32(dev, 0x406034, 0x00000000);
+ nv_wr32(dev, 0x40587c, 0x00000000);
+
+ if (1) {
+ const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 };
+ u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
+ u8 tpnr[GPC_MAX];
+ u8 data[32];
+
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ memset(data, 0x1f, sizeof(data));
+
+ gpc = -1;
+ for (tp = 0; tp < priv->tp_total; tp++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tpnr[gpc]--;
+ data[tp] = gpc;
+ }
+
+ for (i = 0; i < max / 4; i++)
+ nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+ }
+
+ if (1) {
+ u32 data[6] = {}, data2[2] = {};
+ u8 tpnr[GPC_MAX];
+ u8 shift, ntpcv;
+
+ /* calculate first set of magics */
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+
+ for (tp = 0; tp < priv->tp_total; tp++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tpnr[gpc]--;
+
+ data[tp / 6] |= gpc << ((tp % 6) * 5);
+ }
+
+ for (; tp < 32; tp++)
+ data[tp / 6] |= 7 << ((tp % 6) * 5);
+
+ /* and the second... */
+ shift = 0;
+ ntpcv = priv->tp_total;
+ while (!(ntpcv & (1 << 4))) {
+ ntpcv <<= 1;
+ shift++;
+ }
+
+ data2[0] = (ntpcv << 16);
+ data2[0] |= (shift << 21);
+ data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+ for (i = 1; i < 7; i++)
+ data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+ // GPC_BROADCAST
+ nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
+
+ // GPC_BROADCAST.TP_BROADCAST
+ nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr |
+ data2[0]);
+ nv_wr32(dev, 0x419be4, data2[1]);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
+
+ // UNK78xx
+ nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x40780c + (i * 4), data[i]);
+ }
+
+ if (1) {
+ u32 tp_mask = 0, tp_set = 0;
+ u8 tpnr[GPC_MAX];
+
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
+
+ gpc = -1;
+ for (i = 0, gpc = -1; i < 32; i++) {
+ int ltp = i * (priv->tp_total - 1) / 32;
+
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tp = priv->tp_nr[gpc] - tpnr[gpc]--;
+
+ tp_set |= 1 << ((gpc * 8) + tp);
+
+ do {
+ nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
+ tp_set ^= tp_mask;
+ nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
+ tp_set ^= tp_mask;
+ } while (ltp == (++i * (priv->tp_total - 1) / 32));
+ i--;
+ }
+ }
+
+ nv_wr32(dev, 0x400208, 0x80000000);
+
+ nv_icmd(dev, 0x00001000, 0x00000004);
+ nv_icmd(dev, 0x000000a9, 0x0000ffff);
+ nv_icmd(dev, 0x00000038, 0x0fac6881);
+ nv_icmd(dev, 0x0000003d, 0x00000001);
+ nv_icmd(dev, 0x000000e8, 0x00000400);
+ nv_icmd(dev, 0x000000e9, 0x00000400);
+ nv_icmd(dev, 0x000000ea, 0x00000400);
+ nv_icmd(dev, 0x000000eb, 0x00000400);
+ nv_icmd(dev, 0x000000ec, 0x00000400);
+ nv_icmd(dev, 0x000000ed, 0x00000400);
+ nv_icmd(dev, 0x000000ee, 0x00000400);
+ nv_icmd(dev, 0x000000ef, 0x00000400);
+ nv_icmd(dev, 0x00000078, 0x00000300);
+ nv_icmd(dev, 0x00000079, 0x00000300);
+ nv_icmd(dev, 0x0000007a, 0x00000300);
+ nv_icmd(dev, 0x0000007b, 0x00000300);
+ nv_icmd(dev, 0x0000007c, 0x00000300);
+ nv_icmd(dev, 0x0000007d, 0x00000300);
+ nv_icmd(dev, 0x0000007e, 0x00000300);
+ nv_icmd(dev, 0x0000007f, 0x00000300);
+ nv_icmd(dev, 0x00000050, 0x00000011);
+ nv_icmd(dev, 0x00000058, 0x00000008);
+ nv_icmd(dev, 0x00000059, 0x00000008);
+ nv_icmd(dev, 0x0000005a, 0x00000008);
+ nv_icmd(dev, 0x0000005b, 0x00000008);
+ nv_icmd(dev, 0x0000005c, 0x00000008);
+ nv_icmd(dev, 0x0000005d, 0x00000008);
+ nv_icmd(dev, 0x0000005e, 0x00000008);
+ nv_icmd(dev, 0x0000005f, 0x00000008);
+ nv_icmd(dev, 0x00000208, 0x00000001);
+ nv_icmd(dev, 0x00000209, 0x00000001);
+ nv_icmd(dev, 0x0000020a, 0x00000001);
+ nv_icmd(dev, 0x0000020b, 0x00000001);
+ nv_icmd(dev, 0x0000020c, 0x00000001);
+ nv_icmd(dev, 0x0000020d, 0x00000001);
+ nv_icmd(dev, 0x0000020e, 0x00000001);
+ nv_icmd(dev, 0x0000020f, 0x00000001);
+ nv_icmd(dev, 0x00000081, 0x00000001);
+ nv_icmd(dev, 0x00000085, 0x00000004);
+ nv_icmd(dev, 0x00000088, 0x00000400);
+ nv_icmd(dev, 0x00000090, 0x00000300);
+ nv_icmd(dev, 0x00000098, 0x00001001);
+ nv_icmd(dev, 0x000000e3, 0x00000001);
+ nv_icmd(dev, 0x000000da, 0x00000001);
+ nv_icmd(dev, 0x000000f8, 0x00000003);
+ nv_icmd(dev, 0x000000fa, 0x00000001);
+ nv_icmd(dev, 0x0000009f, 0x0000ffff);
+ nv_icmd(dev, 0x000000a0, 0x0000ffff);
+ nv_icmd(dev, 0x000000a1, 0x0000ffff);
+ nv_icmd(dev, 0x000000a2, 0x0000ffff);
+ nv_icmd(dev, 0x000000b1, 0x00000001);
+ nv_icmd(dev, 0x000000b2, 0x00000000);
+ nv_icmd(dev, 0x000000b3, 0x00000000);
+ nv_icmd(dev, 0x000000b4, 0x00000000);
+ nv_icmd(dev, 0x000000b5, 0x00000000);
+ nv_icmd(dev, 0x000000b6, 0x00000000);
+ nv_icmd(dev, 0x000000b7, 0x00000000);
+ nv_icmd(dev, 0x000000b8, 0x00000000);
+ nv_icmd(dev, 0x000000b9, 0x00000000);
+ nv_icmd(dev, 0x000000ba, 0x00000000);
+ nv_icmd(dev, 0x000000bb, 0x00000000);
+ nv_icmd(dev, 0x000000bc, 0x00000000);
+ nv_icmd(dev, 0x000000bd, 0x00000000);
+ nv_icmd(dev, 0x000000be, 0x00000000);
+ nv_icmd(dev, 0x000000bf, 0x00000000);
+ nv_icmd(dev, 0x000000c0, 0x00000000);
+ nv_icmd(dev, 0x000000c1, 0x00000000);
+ nv_icmd(dev, 0x000000c2, 0x00000000);
+ nv_icmd(dev, 0x000000c3, 0x00000000);
+ nv_icmd(dev, 0x000000c4, 0x00000000);
+ nv_icmd(dev, 0x000000c5, 0x00000000);
+ nv_icmd(dev, 0x000000c6, 0x00000000);
+ nv_icmd(dev, 0x000000c7, 0x00000000);
+ nv_icmd(dev, 0x000000c8, 0x00000000);
+ nv_icmd(dev, 0x000000c9, 0x00000000);
+ nv_icmd(dev, 0x000000ca, 0x00000000);
+ nv_icmd(dev, 0x000000cb, 0x00000000);
+ nv_icmd(dev, 0x000000cc, 0x00000000);
+ nv_icmd(dev, 0x000000cd, 0x00000000);
+ nv_icmd(dev, 0x000000ce, 0x00000000);
+ nv_icmd(dev, 0x000000cf, 0x00000000);
+ nv_icmd(dev, 0x000000d0, 0x00000000);
+ nv_icmd(dev, 0x000000d1, 0x00000000);
+ nv_icmd(dev, 0x000000d2, 0x00000000);
+ nv_icmd(dev, 0x000000d3, 0x00000000);
+ nv_icmd(dev, 0x000000d4, 0x00000000);
+ nv_icmd(dev, 0x000000d5, 0x00000000);
+ nv_icmd(dev, 0x000000d6, 0x00000000);
+ nv_icmd(dev, 0x000000d7, 0x00000000);
+ nv_icmd(dev, 0x000000d8, 0x00000000);
+ nv_icmd(dev, 0x000000d9, 0x00000000);
+ nv_icmd(dev, 0x00000210, 0x00000040);
+ nv_icmd(dev, 0x00000211, 0x00000040);
+ nv_icmd(dev, 0x00000212, 0x00000040);
+ nv_icmd(dev, 0x00000213, 0x00000040);
+ nv_icmd(dev, 0x00000214, 0x00000040);
+ nv_icmd(dev, 0x00000215, 0x00000040);
+ nv_icmd(dev, 0x00000216, 0x00000040);
+ nv_icmd(dev, 0x00000217, 0x00000040);
+ nv_icmd(dev, 0x00000218, 0x0000c080);
+ nv_icmd(dev, 0x00000219, 0x0000c080);
+ nv_icmd(dev, 0x0000021a, 0x0000c080);
+ nv_icmd(dev, 0x0000021b, 0x0000c080);
+ nv_icmd(dev, 0x0000021c, 0x0000c080);
+ nv_icmd(dev, 0x0000021d, 0x0000c080);
+ nv_icmd(dev, 0x0000021e, 0x0000c080);
+ nv_icmd(dev, 0x0000021f, 0x0000c080);
+ nv_icmd(dev, 0x000000ad, 0x0000013e);
+ nv_icmd(dev, 0x000000e1, 0x00000010);
+ nv_icmd(dev, 0x00000290, 0x00000000);
+ nv_icmd(dev, 0x00000291, 0x00000000);
+ nv_icmd(dev, 0x00000292, 0x00000000);
+ nv_icmd(dev, 0x00000293, 0x00000000);
+ nv_icmd(dev, 0x00000294, 0x00000000);
+ nv_icmd(dev, 0x00000295, 0x00000000);
+ nv_icmd(dev, 0x00000296, 0x00000000);
+ nv_icmd(dev, 0x00000297, 0x00000000);
+ nv_icmd(dev, 0x00000298, 0x00000000);
+ nv_icmd(dev, 0x00000299, 0x00000000);
+ nv_icmd(dev, 0x0000029a, 0x00000000);
+ nv_icmd(dev, 0x0000029b, 0x00000000);
+ nv_icmd(dev, 0x0000029c, 0x00000000);
+ nv_icmd(dev, 0x0000029d, 0x00000000);
+ nv_icmd(dev, 0x0000029e, 0x00000000);
+ nv_icmd(dev, 0x0000029f, 0x00000000);
+ nv_icmd(dev, 0x000003b0, 0x00000000);
+ nv_icmd(dev, 0x000003b1, 0x00000000);
+ nv_icmd(dev, 0x000003b2, 0x00000000);
+ nv_icmd(dev, 0x000003b3, 0x00000000);
+ nv_icmd(dev, 0x000003b4, 0x00000000);
+ nv_icmd(dev, 0x000003b5, 0x00000000);
+ nv_icmd(dev, 0x000003b6, 0x00000000);
+ nv_icmd(dev, 0x000003b7, 0x00000000);
+ nv_icmd(dev, 0x000003b8, 0x00000000);
+ nv_icmd(dev, 0x000003b9, 0x00000000);
+ nv_icmd(dev, 0x000003ba, 0x00000000);
+ nv_icmd(dev, 0x000003bb, 0x00000000);
+ nv_icmd(dev, 0x000003bc, 0x00000000);
+ nv_icmd(dev, 0x000003bd, 0x00000000);
+ nv_icmd(dev, 0x000003be, 0x00000000);
+ nv_icmd(dev, 0x000003bf, 0x00000000);
+ nv_icmd(dev, 0x000002a0, 0x00000000);
+ nv_icmd(dev, 0x000002a1, 0x00000000);
+ nv_icmd(dev, 0x000002a2, 0x00000000);
+ nv_icmd(dev, 0x000002a3, 0x00000000);
+ nv_icmd(dev, 0x000002a4, 0x00000000);
+ nv_icmd(dev, 0x000002a5, 0x00000000);
+ nv_icmd(dev, 0x000002a6, 0x00000000);
+ nv_icmd(dev, 0x000002a7, 0x00000000);
+ nv_icmd(dev, 0x000002a8, 0x00000000);
+ nv_icmd(dev, 0x000002a9, 0x00000000);
+ nv_icmd(dev, 0x000002aa, 0x00000000);
+ nv_icmd(dev, 0x000002ab, 0x00000000);
+ nv_icmd(dev, 0x000002ac, 0x00000000);
+ nv_icmd(dev, 0x000002ad, 0x00000000);
+ nv_icmd(dev, 0x000002ae, 0x00000000);
+ nv_icmd(dev, 0x000002af, 0x00000000);
+ nv_icmd(dev, 0x00000420, 0x00000000);
+ nv_icmd(dev, 0x00000421, 0x00000000);
+ nv_icmd(dev, 0x00000422, 0x00000000);
+ nv_icmd(dev, 0x00000423, 0x00000000);
+ nv_icmd(dev, 0x00000424, 0x00000000);
+ nv_icmd(dev, 0x00000425, 0x00000000);
+ nv_icmd(dev, 0x00000426, 0x00000000);
+ nv_icmd(dev, 0x00000427, 0x00000000);
+ nv_icmd(dev, 0x00000428, 0x00000000);
+ nv_icmd(dev, 0x00000429, 0x00000000);
+ nv_icmd(dev, 0x0000042a, 0x00000000);
+ nv_icmd(dev, 0x0000042b, 0x00000000);
+ nv_icmd(dev, 0x0000042c, 0x00000000);
+ nv_icmd(dev, 0x0000042d, 0x00000000);
+ nv_icmd(dev, 0x0000042e, 0x00000000);
+ nv_icmd(dev, 0x0000042f, 0x00000000);
+ nv_icmd(dev, 0x000002b0, 0x00000000);
+ nv_icmd(dev, 0x000002b1, 0x00000000);
+ nv_icmd(dev, 0x000002b2, 0x00000000);
+ nv_icmd(dev, 0x000002b3, 0x00000000);
+ nv_icmd(dev, 0x000002b4, 0x00000000);
+ nv_icmd(dev, 0x000002b5, 0x00000000);
+ nv_icmd(dev, 0x000002b6, 0x00000000);
+ nv_icmd(dev, 0x000002b7, 0x00000000);
+ nv_icmd(dev, 0x000002b8, 0x00000000);
+ nv_icmd(dev, 0x000002b9, 0x00000000);
+ nv_icmd(dev, 0x000002ba, 0x00000000);
+ nv_icmd(dev, 0x000002bb, 0x00000000);
+ nv_icmd(dev, 0x000002bc, 0x00000000);
+ nv_icmd(dev, 0x000002bd, 0x00000000);
+ nv_icmd(dev, 0x000002be, 0x00000000);
+ nv_icmd(dev, 0x000002bf, 0x00000000);
+ nv_icmd(dev, 0x00000430, 0x00000000);
+ nv_icmd(dev, 0x00000431, 0x00000000);
+ nv_icmd(dev, 0x00000432, 0x00000000);
+ nv_icmd(dev, 0x00000433, 0x00000000);
+ nv_icmd(dev, 0x00000434, 0x00000000);
+ nv_icmd(dev, 0x00000435, 0x00000000);
+ nv_icmd(dev, 0x00000436, 0x00000000);
+ nv_icmd(dev, 0x00000437, 0x00000000);
+ nv_icmd(dev, 0x00000438, 0x00000000);
+ nv_icmd(dev, 0x00000439, 0x00000000);
+ nv_icmd(dev, 0x0000043a, 0x00000000);
+ nv_icmd(dev, 0x0000043b, 0x00000000);
+ nv_icmd(dev, 0x0000043c, 0x00000000);
+ nv_icmd(dev, 0x0000043d, 0x00000000);
+ nv_icmd(dev, 0x0000043e, 0x00000000);
+ nv_icmd(dev, 0x0000043f, 0x00000000);
+ nv_icmd(dev, 0x000002c0, 0x00000000);
+ nv_icmd(dev, 0x000002c1, 0x00000000);
+ nv_icmd(dev, 0x000002c2, 0x00000000);
+ nv_icmd(dev, 0x000002c3, 0x00000000);
+ nv_icmd(dev, 0x000002c4, 0x00000000);
+ nv_icmd(dev, 0x000002c5, 0x00000000);
+ nv_icmd(dev, 0x000002c6, 0x00000000);
+ nv_icmd(dev, 0x000002c7, 0x00000000);
+ nv_icmd(dev, 0x000002c8, 0x00000000);
+ nv_icmd(dev, 0x000002c9, 0x00000000);
+ nv_icmd(dev, 0x000002ca, 0x00000000);
+ nv_icmd(dev, 0x000002cb, 0x00000000);
+ nv_icmd(dev, 0x000002cc, 0x00000000);
+ nv_icmd(dev, 0x000002cd, 0x00000000);
+ nv_icmd(dev, 0x000002ce, 0x00000000);
+ nv_icmd(dev, 0x000002cf, 0x00000000);
+ nv_icmd(dev, 0x000004d0, 0x00000000);
+ nv_icmd(dev, 0x000004d1, 0x00000000);
+ nv_icmd(dev, 0x000004d2, 0x00000000);
+ nv_icmd(dev, 0x000004d3, 0x00000000);
+ nv_icmd(dev, 0x000004d4, 0x00000000);
+ nv_icmd(dev, 0x000004d5, 0x00000000);
+ nv_icmd(dev, 0x000004d6, 0x00000000);
+ nv_icmd(dev, 0x000004d7, 0x00000000);
+ nv_icmd(dev, 0x000004d8, 0x00000000);
+ nv_icmd(dev, 0x000004d9, 0x00000000);
+ nv_icmd(dev, 0x000004da, 0x00000000);
+ nv_icmd(dev, 0x000004db, 0x00000000);
+ nv_icmd(dev, 0x000004dc, 0x00000000);
+ nv_icmd(dev, 0x000004dd, 0x00000000);
+ nv_icmd(dev, 0x000004de, 0x00000000);
+ nv_icmd(dev, 0x000004df, 0x00000000);
+ nv_icmd(dev, 0x00000720, 0x00000000);
+ nv_icmd(dev, 0x00000721, 0x00000000);
+ nv_icmd(dev, 0x00000722, 0x00000000);
+ nv_icmd(dev, 0x00000723, 0x00000000);
+ nv_icmd(dev, 0x00000724, 0x00000000);
+ nv_icmd(dev, 0x00000725, 0x00000000);
+ nv_icmd(dev, 0x00000726, 0x00000000);
+ nv_icmd(dev, 0x00000727, 0x00000000);
+ nv_icmd(dev, 0x00000728, 0x00000000);
+ nv_icmd(dev, 0x00000729, 0x00000000);
+ nv_icmd(dev, 0x0000072a, 0x00000000);
+ nv_icmd(dev, 0x0000072b, 0x00000000);
+ nv_icmd(dev, 0x0000072c, 0x00000000);
+ nv_icmd(dev, 0x0000072d, 0x00000000);
+ nv_icmd(dev, 0x0000072e, 0x00000000);
+ nv_icmd(dev, 0x0000072f, 0x00000000);
+ nv_icmd(dev, 0x000008c0, 0x00000000);
+ nv_icmd(dev, 0x000008c1, 0x00000000);
+ nv_icmd(dev, 0x000008c2, 0x00000000);
+ nv_icmd(dev, 0x000008c3, 0x00000000);
+ nv_icmd(dev, 0x000008c4, 0x00000000);
+ nv_icmd(dev, 0x000008c5, 0x00000000);
+ nv_icmd(dev, 0x000008c6, 0x00000000);
+ nv_icmd(dev, 0x000008c7, 0x00000000);
+ nv_icmd(dev, 0x000008c8, 0x00000000);
+ nv_icmd(dev, 0x000008c9, 0x00000000);
+ nv_icmd(dev, 0x000008ca, 0x00000000);
+ nv_icmd(dev, 0x000008cb, 0x00000000);
+ nv_icmd(dev, 0x000008cc, 0x00000000);
+ nv_icmd(dev, 0x000008cd, 0x00000000);
+ nv_icmd(dev, 0x000008ce, 0x00000000);
+ nv_icmd(dev, 0x000008cf, 0x00000000);
+ nv_icmd(dev, 0x00000890, 0x00000000);
+ nv_icmd(dev, 0x00000891, 0x00000000);
+ nv_icmd(dev, 0x00000892, 0x00000000);
+ nv_icmd(dev, 0x00000893, 0x00000000);
+ nv_icmd(dev, 0x00000894, 0x00000000);
+ nv_icmd(dev, 0x00000895, 0x00000000);
+ nv_icmd(dev, 0x00000896, 0x00000000);
+ nv_icmd(dev, 0x00000897, 0x00000000);
+ nv_icmd(dev, 0x00000898, 0x00000000);
+ nv_icmd(dev, 0x00000899, 0x00000000);
+ nv_icmd(dev, 0x0000089a, 0x00000000);
+ nv_icmd(dev, 0x0000089b, 0x00000000);
+ nv_icmd(dev, 0x0000089c, 0x00000000);
+ nv_icmd(dev, 0x0000089d, 0x00000000);
+ nv_icmd(dev, 0x0000089e, 0x00000000);
+ nv_icmd(dev, 0x0000089f, 0x00000000);
+ nv_icmd(dev, 0x000008e0, 0x00000000);
+ nv_icmd(dev, 0x000008e1, 0x00000000);
+ nv_icmd(dev, 0x000008e2, 0x00000000);
+ nv_icmd(dev, 0x000008e3, 0x00000000);
+ nv_icmd(dev, 0x000008e4, 0x00000000);
+ nv_icmd(dev, 0x000008e5, 0x00000000);
+ nv_icmd(dev, 0x000008e6, 0x00000000);
+ nv_icmd(dev, 0x000008e7, 0x00000000);
+ nv_icmd(dev, 0x000008e8, 0x00000000);
+ nv_icmd(dev, 0x000008e9, 0x00000000);
+ nv_icmd(dev, 0x000008ea, 0x00000000);
+ nv_icmd(dev, 0x000008eb, 0x00000000);
+ nv_icmd(dev, 0x000008ec, 0x00000000);
+ nv_icmd(dev, 0x000008ed, 0x00000000);
+ nv_icmd(dev, 0x000008ee, 0x00000000);
+ nv_icmd(dev, 0x000008ef, 0x00000000);
+ nv_icmd(dev, 0x000008a0, 0x00000000);
+ nv_icmd(dev, 0x000008a1, 0x00000000);
+ nv_icmd(dev, 0x000008a2, 0x00000000);
+ nv_icmd(dev, 0x000008a3, 0x00000000);
+ nv_icmd(dev, 0x000008a4, 0x00000000);
+ nv_icmd(dev, 0x000008a5, 0x00000000);
+ nv_icmd(dev, 0x000008a6, 0x00000000);
+ nv_icmd(dev, 0x000008a7, 0x00000000);
+ nv_icmd(dev, 0x000008a8, 0x00000000);
+ nv_icmd(dev, 0x000008a9, 0x00000000);
+ nv_icmd(dev, 0x000008aa, 0x00000000);
+ nv_icmd(dev, 0x000008ab, 0x00000000);
+ nv_icmd(dev, 0x000008ac, 0x00000000);
+ nv_icmd(dev, 0x000008ad, 0x00000000);
+ nv_icmd(dev, 0x000008ae, 0x00000000);
+ nv_icmd(dev, 0x000008af, 0x00000000);
+ nv_icmd(dev, 0x000008f0, 0x00000000);
+ nv_icmd(dev, 0x000008f1, 0x00000000);
+ nv_icmd(dev, 0x000008f2, 0x00000000);
+ nv_icmd(dev, 0x000008f3, 0x00000000);
+ nv_icmd(dev, 0x000008f4, 0x00000000);
+ nv_icmd(dev, 0x000008f5, 0x00000000);
+ nv_icmd(dev, 0x000008f6, 0x00000000);
+ nv_icmd(dev, 0x000008f7, 0x00000000);
+ nv_icmd(dev, 0x000008f8, 0x00000000);
+ nv_icmd(dev, 0x000008f9, 0x00000000);
+ nv_icmd(dev, 0x000008fa, 0x00000000);
+ nv_icmd(dev, 0x000008fb, 0x00000000);
+ nv_icmd(dev, 0x000008fc, 0x00000000);
+ nv_icmd(dev, 0x000008fd, 0x00000000);
+ nv_icmd(dev, 0x000008fe, 0x00000000);
+ nv_icmd(dev, 0x000008ff, 0x00000000);
+ nv_icmd(dev, 0x0000094c, 0x000000ff);
+ nv_icmd(dev, 0x0000094d, 0xffffffff);
+ nv_icmd(dev, 0x0000094e, 0x00000002);
+ nv_icmd(dev, 0x000002ec, 0x00000001);
+ nv_icmd(dev, 0x00000303, 0x00000001);
+ nv_icmd(dev, 0x000002e6, 0x00000001);
+ nv_icmd(dev, 0x00000466, 0x00000052);
+ nv_icmd(dev, 0x00000301, 0x3f800000);
+ nv_icmd(dev, 0x00000304, 0x30201000);
+ nv_icmd(dev, 0x00000305, 0x70605040);
+ nv_icmd(dev, 0x00000306, 0xb8a89888);
+ nv_icmd(dev, 0x00000307, 0xf8e8d8c8);
+ nv_icmd(dev, 0x0000030a, 0x00ffff00);
+ nv_icmd(dev, 0x0000030b, 0x0000001a);
+ nv_icmd(dev, 0x0000030c, 0x00000001);
+ nv_icmd(dev, 0x00000318, 0x00000001);
+ nv_icmd(dev, 0x00000340, 0x00000000);
+ nv_icmd(dev, 0x00000375, 0x00000001);
+ nv_icmd(dev, 0x00000351, 0x00000100);
+ nv_icmd(dev, 0x0000037d, 0x00000006);
+ nv_icmd(dev, 0x000003a0, 0x00000002);
+ nv_icmd(dev, 0x000003aa, 0x00000001);
+ nv_icmd(dev, 0x000003a9, 0x00000001);
+ nv_icmd(dev, 0x00000380, 0x00000001);
+ nv_icmd(dev, 0x00000360, 0x00000040);
+ nv_icmd(dev, 0x00000366, 0x00000000);
+ nv_icmd(dev, 0x00000367, 0x00000000);
+ nv_icmd(dev, 0x00000368, 0x00001fff);
+ nv_icmd(dev, 0x00000370, 0x00000000);
+ nv_icmd(dev, 0x00000371, 0x00000000);
+ nv_icmd(dev, 0x00000372, 0x003fffff);
+ nv_icmd(dev, 0x0000037a, 0x00000012);
+ nv_icmd(dev, 0x000005e0, 0x00000022);
+ nv_icmd(dev, 0x000005e1, 0x00000022);
+ nv_icmd(dev, 0x000005e2, 0x00000022);
+ nv_icmd(dev, 0x000005e3, 0x00000022);
+ nv_icmd(dev, 0x000005e4, 0x00000022);
+ nv_icmd(dev, 0x00000619, 0x00000003);
+ nv_icmd(dev, 0x00000811, 0x00000003);
+ nv_icmd(dev, 0x00000812, 0x00000004);
+ nv_icmd(dev, 0x00000813, 0x00000006);
+ nv_icmd(dev, 0x00000814, 0x00000008);
+ nv_icmd(dev, 0x00000815, 0x0000000b);
+ nv_icmd(dev, 0x00000800, 0x00000001);
+ nv_icmd(dev, 0x00000801, 0x00000001);
+ nv_icmd(dev, 0x00000802, 0x00000001);
+ nv_icmd(dev, 0x00000803, 0x00000001);
+ nv_icmd(dev, 0x00000804, 0x00000001);
+ nv_icmd(dev, 0x00000805, 0x00000001);
+ nv_icmd(dev, 0x00000632, 0x00000001);
+ nv_icmd(dev, 0x00000633, 0x00000002);
+ nv_icmd(dev, 0x00000634, 0x00000003);
+ nv_icmd(dev, 0x00000635, 0x00000004);
+ nv_icmd(dev, 0x00000654, 0x3f800000);
+ nv_icmd(dev, 0x00000657, 0x3f800000);
+ nv_icmd(dev, 0x00000655, 0x3f800000);
+ nv_icmd(dev, 0x00000656, 0x3f800000);
+ nv_icmd(dev, 0x000006cd, 0x3f800000);
+ nv_icmd(dev, 0x000007f5, 0x3f800000);
+ nv_icmd(dev, 0x000007dc, 0x39291909);
+ nv_icmd(dev, 0x000007dd, 0x79695949);
+ nv_icmd(dev, 0x000007de, 0xb9a99989);
+ nv_icmd(dev, 0x000007df, 0xf9e9d9c9);
+ nv_icmd(dev, 0x000007e8, 0x00003210);
+ nv_icmd(dev, 0x000007e9, 0x00007654);
+ nv_icmd(dev, 0x000007ea, 0x00000098);
+ nv_icmd(dev, 0x000007ec, 0x39291909);
+ nv_icmd(dev, 0x000007ed, 0x79695949);
+ nv_icmd(dev, 0x000007ee, 0xb9a99989);
+ nv_icmd(dev, 0x000007ef, 0xf9e9d9c9);
+ nv_icmd(dev, 0x000007f0, 0x00003210);
+ nv_icmd(dev, 0x000007f1, 0x00007654);
+ nv_icmd(dev, 0x000007f2, 0x00000098);
+ nv_icmd(dev, 0x000005a5, 0x00000001);
+ nv_icmd(dev, 0x00000980, 0x00000000);
+ nv_icmd(dev, 0x00000981, 0x00000000);
+ nv_icmd(dev, 0x00000982, 0x00000000);
+ nv_icmd(dev, 0x00000983, 0x00000000);
+ nv_icmd(dev, 0x00000984, 0x00000000);
+ nv_icmd(dev, 0x00000985, 0x00000000);
+ nv_icmd(dev, 0x00000986, 0x00000000);
+ nv_icmd(dev, 0x00000987, 0x00000000);
+ nv_icmd(dev, 0x00000988, 0x00000000);
+ nv_icmd(dev, 0x00000989, 0x00000000);
+ nv_icmd(dev, 0x0000098a, 0x00000000);
+ nv_icmd(dev, 0x0000098b, 0x00000000);
+ nv_icmd(dev, 0x0000098c, 0x00000000);
+ nv_icmd(dev, 0x0000098d, 0x00000000);
+ nv_icmd(dev, 0x0000098e, 0x00000000);
+ nv_icmd(dev, 0x0000098f, 0x00000000);
+ nv_icmd(dev, 0x00000990, 0x00000000);
+ nv_icmd(dev, 0x00000991, 0x00000000);
+ nv_icmd(dev, 0x00000992, 0x00000000);
+ nv_icmd(dev, 0x00000993, 0x00000000);
+ nv_icmd(dev, 0x00000994, 0x00000000);
+ nv_icmd(dev, 0x00000995, 0x00000000);
+ nv_icmd(dev, 0x00000996, 0x00000000);
+ nv_icmd(dev, 0x00000997, 0x00000000);
+ nv_icmd(dev, 0x00000998, 0x00000000);
+ nv_icmd(dev, 0x00000999, 0x00000000);
+ nv_icmd(dev, 0x0000099a, 0x00000000);
+ nv_icmd(dev, 0x0000099b, 0x00000000);
+ nv_icmd(dev, 0x0000099c, 0x00000000);
+ nv_icmd(dev, 0x0000099d, 0x00000000);
+ nv_icmd(dev, 0x0000099e, 0x00000000);
+ nv_icmd(dev, 0x0000099f, 0x00000000);
+ nv_icmd(dev, 0x000009a0, 0x00000000);
+ nv_icmd(dev, 0x000009a1, 0x00000000);
+ nv_icmd(dev, 0x000009a2, 0x00000000);
+ nv_icmd(dev, 0x000009a3, 0x00000000);
+ nv_icmd(dev, 0x000009a4, 0x00000000);
+ nv_icmd(dev, 0x000009a5, 0x00000000);
+ nv_icmd(dev, 0x000009a6, 0x00000000);
+ nv_icmd(dev, 0x000009a7, 0x00000000);
+ nv_icmd(dev, 0x000009a8, 0x00000000);
+ nv_icmd(dev, 0x000009a9, 0x00000000);
+ nv_icmd(dev, 0x000009aa, 0x00000000);
+ nv_icmd(dev, 0x000009ab, 0x00000000);
+ nv_icmd(dev, 0x000009ac, 0x00000000);
+ nv_icmd(dev, 0x000009ad, 0x00000000);
+ nv_icmd(dev, 0x000009ae, 0x00000000);
+ nv_icmd(dev, 0x000009af, 0x00000000);
+ nv_icmd(dev, 0x000009b0, 0x00000000);
+ nv_icmd(dev, 0x000009b1, 0x00000000);
+ nv_icmd(dev, 0x000009b2, 0x00000000);
+ nv_icmd(dev, 0x000009b3, 0x00000000);
+ nv_icmd(dev, 0x000009b4, 0x00000000);
+ nv_icmd(dev, 0x000009b5, 0x00000000);
+ nv_icmd(dev, 0x000009b6, 0x00000000);
+ nv_icmd(dev, 0x000009b7, 0x00000000);
+ nv_icmd(dev, 0x000009b8, 0x00000000);
+ nv_icmd(dev, 0x000009b9, 0x00000000);
+ nv_icmd(dev, 0x000009ba, 0x00000000);
+ nv_icmd(dev, 0x000009bb, 0x00000000);
+ nv_icmd(dev, 0x000009bc, 0x00000000);
+ nv_icmd(dev, 0x000009bd, 0x00000000);
+ nv_icmd(dev, 0x000009be, 0x00000000);
+ nv_icmd(dev, 0x000009bf, 0x00000000);
+ nv_icmd(dev, 0x000009c0, 0x00000000);
+ nv_icmd(dev, 0x000009c1, 0x00000000);
+ nv_icmd(dev, 0x000009c2, 0x00000000);
+ nv_icmd(dev, 0x000009c3, 0x00000000);
+ nv_icmd(dev, 0x000009c4, 0x00000000);
+ nv_icmd(dev, 0x000009c5, 0x00000000);
+ nv_icmd(dev, 0x000009c6, 0x00000000);
+ nv_icmd(dev, 0x000009c7, 0x00000000);
+ nv_icmd(dev, 0x000009c8, 0x00000000);
+ nv_icmd(dev, 0x000009c9, 0x00000000);
+ nv_icmd(dev, 0x000009ca, 0x00000000);
+ nv_icmd(dev, 0x000009cb, 0x00000000);
+ nv_icmd(dev, 0x000009cc, 0x00000000);
+ nv_icmd(dev, 0x000009cd, 0x00000000);
+ nv_icmd(dev, 0x000009ce, 0x00000000);
+ nv_icmd(dev, 0x000009cf, 0x00000000);
+ nv_icmd(dev, 0x000009d0, 0x00000000);
+ nv_icmd(dev, 0x000009d1, 0x00000000);
+ nv_icmd(dev, 0x000009d2, 0x00000000);
+ nv_icmd(dev, 0x000009d3, 0x00000000);
+ nv_icmd(dev, 0x000009d4, 0x00000000);
+ nv_icmd(dev, 0x000009d5, 0x00000000);
+ nv_icmd(dev, 0x000009d6, 0x00000000);
+ nv_icmd(dev, 0x000009d7, 0x00000000);
+ nv_icmd(dev, 0x000009d8, 0x00000000);
+ nv_icmd(dev, 0x000009d9, 0x00000000);
+ nv_icmd(dev, 0x000009da, 0x00000000);
+ nv_icmd(dev, 0x000009db, 0x00000000);
+ nv_icmd(dev, 0x000009dc, 0x00000000);
+ nv_icmd(dev, 0x000009dd, 0x00000000);
+ nv_icmd(dev, 0x000009de, 0x00000000);
+ nv_icmd(dev, 0x000009df, 0x00000000);
+ nv_icmd(dev, 0x000009e0, 0x00000000);
+ nv_icmd(dev, 0x000009e1, 0x00000000);
+ nv_icmd(dev, 0x000009e2, 0x00000000);
+ nv_icmd(dev, 0x000009e3, 0x00000000);
+ nv_icmd(dev, 0x000009e4, 0x00000000);
+ nv_icmd(dev, 0x000009e5, 0x00000000);
+ nv_icmd(dev, 0x000009e6, 0x00000000);
+ nv_icmd(dev, 0x000009e7, 0x00000000);
+ nv_icmd(dev, 0x000009e8, 0x00000000);
+ nv_icmd(dev, 0x000009e9, 0x00000000);
+ nv_icmd(dev, 0x000009ea, 0x00000000);
+ nv_icmd(dev, 0x000009eb, 0x00000000);
+ nv_icmd(dev, 0x000009ec, 0x00000000);
+ nv_icmd(dev, 0x000009ed, 0x00000000);
+ nv_icmd(dev, 0x000009ee, 0x00000000);
+ nv_icmd(dev, 0x000009ef, 0x00000000);
+ nv_icmd(dev, 0x000009f0, 0x00000000);
+ nv_icmd(dev, 0x000009f1, 0x00000000);
+ nv_icmd(dev, 0x000009f2, 0x00000000);
+ nv_icmd(dev, 0x000009f3, 0x00000000);
+ nv_icmd(dev, 0x000009f4, 0x00000000);
+ nv_icmd(dev, 0x000009f5, 0x00000000);
+ nv_icmd(dev, 0x000009f6, 0x00000000);
+ nv_icmd(dev, 0x000009f7, 0x00000000);
+ nv_icmd(dev, 0x000009f8, 0x00000000);
+ nv_icmd(dev, 0x000009f9, 0x00000000);
+ nv_icmd(dev, 0x000009fa, 0x00000000);
+ nv_icmd(dev, 0x000009fb, 0x00000000);
+ nv_icmd(dev, 0x000009fc, 0x00000000);
+ nv_icmd(dev, 0x000009fd, 0x00000000);
+ nv_icmd(dev, 0x000009fe, 0x00000000);
+ nv_icmd(dev, 0x000009ff, 0x00000000);
+ nv_icmd(dev, 0x00000468, 0x00000004);
+ nv_icmd(dev, 0x0000046c, 0x00000001);
+ nv_icmd(dev, 0x00000470, 0x00000000);
+ nv_icmd(dev, 0x00000471, 0x00000000);
+ nv_icmd(dev, 0x00000472, 0x00000000);
+ nv_icmd(dev, 0x00000473, 0x00000000);
+ nv_icmd(dev, 0x00000474, 0x00000000);
+ nv_icmd(dev, 0x00000475, 0x00000000);
+ nv_icmd(dev, 0x00000476, 0x00000000);
+ nv_icmd(dev, 0x00000477, 0x00000000);
+ nv_icmd(dev, 0x00000478, 0x00000000);
+ nv_icmd(dev, 0x00000479, 0x00000000);
+ nv_icmd(dev, 0x0000047a, 0x00000000);
+ nv_icmd(dev, 0x0000047b, 0x00000000);
+ nv_icmd(dev, 0x0000047c, 0x00000000);
+ nv_icmd(dev, 0x0000047d, 0x00000000);
+ nv_icmd(dev, 0x0000047e, 0x00000000);
+ nv_icmd(dev, 0x0000047f, 0x00000000);
+ nv_icmd(dev, 0x00000480, 0x00000000);
+ nv_icmd(dev, 0x00000481, 0x00000000);
+ nv_icmd(dev, 0x00000482, 0x00000000);
+ nv_icmd(dev, 0x00000483, 0x00000000);
+ nv_icmd(dev, 0x00000484, 0x00000000);
+ nv_icmd(dev, 0x00000485, 0x00000000);
+ nv_icmd(dev, 0x00000486, 0x00000000);
+ nv_icmd(dev, 0x00000487, 0x00000000);
+ nv_icmd(dev, 0x00000488, 0x00000000);
+ nv_icmd(dev, 0x00000489, 0x00000000);
+ nv_icmd(dev, 0x0000048a, 0x00000000);
+ nv_icmd(dev, 0x0000048b, 0x00000000);
+ nv_icmd(dev, 0x0000048c, 0x00000000);
+ nv_icmd(dev, 0x0000048d, 0x00000000);
+ nv_icmd(dev, 0x0000048e, 0x00000000);
+ nv_icmd(dev, 0x0000048f, 0x00000000);
+ nv_icmd(dev, 0x00000490, 0x00000000);
+ nv_icmd(dev, 0x00000491, 0x00000000);
+ nv_icmd(dev, 0x00000492, 0x00000000);
+ nv_icmd(dev, 0x00000493, 0x00000000);
+ nv_icmd(dev, 0x00000494, 0x00000000);
+ nv_icmd(dev, 0x00000495, 0x00000000);
+ nv_icmd(dev, 0x00000496, 0x00000000);
+ nv_icmd(dev, 0x00000497, 0x00000000);
+ nv_icmd(dev, 0x00000498, 0x00000000);
+ nv_icmd(dev, 0x00000499, 0x00000000);
+ nv_icmd(dev, 0x0000049a, 0x00000000);
+ nv_icmd(dev, 0x0000049b, 0x00000000);
+ nv_icmd(dev, 0x0000049c, 0x00000000);
+ nv_icmd(dev, 0x0000049d, 0x00000000);
+ nv_icmd(dev, 0x0000049e, 0x00000000);
+ nv_icmd(dev, 0x0000049f, 0x00000000);
+ nv_icmd(dev, 0x000004a0, 0x00000000);
+ nv_icmd(dev, 0x000004a1, 0x00000000);
+ nv_icmd(dev, 0x000004a2, 0x00000000);
+ nv_icmd(dev, 0x000004a3, 0x00000000);
+ nv_icmd(dev, 0x000004a4, 0x00000000);
+ nv_icmd(dev, 0x000004a5, 0x00000000);
+ nv_icmd(dev, 0x000004a6, 0x00000000);
+ nv_icmd(dev, 0x000004a7, 0x00000000);
+ nv_icmd(dev, 0x000004a8, 0x00000000);
+ nv_icmd(dev, 0x000004a9, 0x00000000);
+ nv_icmd(dev, 0x000004aa, 0x00000000);
+ nv_icmd(dev, 0x000004ab, 0x00000000);
+ nv_icmd(dev, 0x000004ac, 0x00000000);
+ nv_icmd(dev, 0x000004ad, 0x00000000);
+ nv_icmd(dev, 0x000004ae, 0x00000000);
+ nv_icmd(dev, 0x000004af, 0x00000000);
+ nv_icmd(dev, 0x000004b0, 0x00000000);
+ nv_icmd(dev, 0x000004b1, 0x00000000);
+ nv_icmd(dev, 0x000004b2, 0x00000000);
+ nv_icmd(dev, 0x000004b3, 0x00000000);
+ nv_icmd(dev, 0x000004b4, 0x00000000);
+ nv_icmd(dev, 0x000004b5, 0x00000000);
+ nv_icmd(dev, 0x000004b6, 0x00000000);
+ nv_icmd(dev, 0x000004b7, 0x00000000);
+ nv_icmd(dev, 0x000004b8, 0x00000000);
+ nv_icmd(dev, 0x000004b9, 0x00000000);
+ nv_icmd(dev, 0x000004ba, 0x00000000);
+ nv_icmd(dev, 0x000004bb, 0x00000000);
+ nv_icmd(dev, 0x000004bc, 0x00000000);
+ nv_icmd(dev, 0x000004bd, 0x00000000);
+ nv_icmd(dev, 0x000004be, 0x00000000);
+ nv_icmd(dev, 0x000004bf, 0x00000000);
+ nv_icmd(dev, 0x000004c0, 0x00000000);
+ nv_icmd(dev, 0x000004c1, 0x00000000);
+ nv_icmd(dev, 0x000004c2, 0x00000000);
+ nv_icmd(dev, 0x000004c3, 0x00000000);
+ nv_icmd(dev, 0x000004c4, 0x00000000);
+ nv_icmd(dev, 0x000004c5, 0x00000000);
+ nv_icmd(dev, 0x000004c6, 0x00000000);
+ nv_icmd(dev, 0x000004c7, 0x00000000);
+ nv_icmd(dev, 0x000004c8, 0x00000000);
+ nv_icmd(dev, 0x000004c9, 0x00000000);
+ nv_icmd(dev, 0x000004ca, 0x00000000);
+ nv_icmd(dev, 0x000004cb, 0x00000000);
+ nv_icmd(dev, 0x000004cc, 0x00000000);
+ nv_icmd(dev, 0x000004cd, 0x00000000);
+ nv_icmd(dev, 0x000004ce, 0x00000000);
+ nv_icmd(dev, 0x000004cf, 0x00000000);
+ nv_icmd(dev, 0x00000510, 0x3f800000);
+ nv_icmd(dev, 0x00000511, 0x3f800000);
+ nv_icmd(dev, 0x00000512, 0x3f800000);
+ nv_icmd(dev, 0x00000513, 0x3f800000);
+ nv_icmd(dev, 0x00000514, 0x3f800000);
+ nv_icmd(dev, 0x00000515, 0x3f800000);
+ nv_icmd(dev, 0x00000516, 0x3f800000);
+ nv_icmd(dev, 0x00000517, 0x3f800000);
+ nv_icmd(dev, 0x00000518, 0x3f800000);
+ nv_icmd(dev, 0x00000519, 0x3f800000);
+ nv_icmd(dev, 0x0000051a, 0x3f800000);
+ nv_icmd(dev, 0x0000051b, 0x3f800000);
+ nv_icmd(dev, 0x0000051c, 0x3f800000);
+ nv_icmd(dev, 0x0000051d, 0x3f800000);
+ nv_icmd(dev, 0x0000051e, 0x3f800000);
+ nv_icmd(dev, 0x0000051f, 0x3f800000);
+ nv_icmd(dev, 0x00000520, 0x000002b6);
+ nv_icmd(dev, 0x00000529, 0x00000001);
+ nv_icmd(dev, 0x00000530, 0xffff0000);
+ nv_icmd(dev, 0x00000531, 0xffff0000);
+ nv_icmd(dev, 0x00000532, 0xffff0000);
+ nv_icmd(dev, 0x00000533, 0xffff0000);
+ nv_icmd(dev, 0x00000534, 0xffff0000);
+ nv_icmd(dev, 0x00000535, 0xffff0000);
+ nv_icmd(dev, 0x00000536, 0xffff0000);
+ nv_icmd(dev, 0x00000537, 0xffff0000);
+ nv_icmd(dev, 0x00000538, 0xffff0000);
+ nv_icmd(dev, 0x00000539, 0xffff0000);
+ nv_icmd(dev, 0x0000053a, 0xffff0000);
+ nv_icmd(dev, 0x0000053b, 0xffff0000);
+ nv_icmd(dev, 0x0000053c, 0xffff0000);
+ nv_icmd(dev, 0x0000053d, 0xffff0000);
+ nv_icmd(dev, 0x0000053e, 0xffff0000);
+ nv_icmd(dev, 0x0000053f, 0xffff0000);
+ nv_icmd(dev, 0x00000585, 0x0000003f);
+ nv_icmd(dev, 0x00000576, 0x00000003);
+ nv_icmd(dev, 0x00000586, 0x00000040);
+ nv_icmd(dev, 0x00000582, 0x00000080);
+ nv_icmd(dev, 0x00000583, 0x00000080);
+ nv_icmd(dev, 0x000005c2, 0x00000001);
+ nv_icmd(dev, 0x00000638, 0x00000001);
+ nv_icmd(dev, 0x00000639, 0x00000001);
+ nv_icmd(dev, 0x0000063a, 0x00000002);
+ nv_icmd(dev, 0x0000063b, 0x00000001);
+ nv_icmd(dev, 0x0000063c, 0x00000001);
+ nv_icmd(dev, 0x0000063d, 0x00000002);
+ nv_icmd(dev, 0x0000063e, 0x00000001);
+ nv_icmd(dev, 0x000008b8, 0x00000001);
+ nv_icmd(dev, 0x000008b9, 0x00000001);
+ nv_icmd(dev, 0x000008ba, 0x00000001);
+ nv_icmd(dev, 0x000008bb, 0x00000001);
+ nv_icmd(dev, 0x000008bc, 0x00000001);
+ nv_icmd(dev, 0x000008bd, 0x00000001);
+ nv_icmd(dev, 0x000008be, 0x00000001);
+ nv_icmd(dev, 0x000008bf, 0x00000001);
+ nv_icmd(dev, 0x00000900, 0x00000001);
+ nv_icmd(dev, 0x00000901, 0x00000001);
+ nv_icmd(dev, 0x00000902, 0x00000001);
+ nv_icmd(dev, 0x00000903, 0x00000001);
+ nv_icmd(dev, 0x00000904, 0x00000001);
+ nv_icmd(dev, 0x00000905, 0x00000001);
+ nv_icmd(dev, 0x00000906, 0x00000001);
+ nv_icmd(dev, 0x00000907, 0x00000001);
+ nv_icmd(dev, 0x00000908, 0x00000002);
+ nv_icmd(dev, 0x00000909, 0x00000002);
+ nv_icmd(dev, 0x0000090a, 0x00000002);
+ nv_icmd(dev, 0x0000090b, 0x00000002);
+ nv_icmd(dev, 0x0000090c, 0x00000002);
+ nv_icmd(dev, 0x0000090d, 0x00000002);
+ nv_icmd(dev, 0x0000090e, 0x00000002);
+ nv_icmd(dev, 0x0000090f, 0x00000002);
+ nv_icmd(dev, 0x00000910, 0x00000001);
+ nv_icmd(dev, 0x00000911, 0x00000001);
+ nv_icmd(dev, 0x00000912, 0x00000001);
+ nv_icmd(dev, 0x00000913, 0x00000001);
+ nv_icmd(dev, 0x00000914, 0x00000001);
+ nv_icmd(dev, 0x00000915, 0x00000001);
+ nv_icmd(dev, 0x00000916, 0x00000001);
+ nv_icmd(dev, 0x00000917, 0x00000001);
+ nv_icmd(dev, 0x00000918, 0x00000001);
+ nv_icmd(dev, 0x00000919, 0x00000001);
+ nv_icmd(dev, 0x0000091a, 0x00000001);
+ nv_icmd(dev, 0x0000091b, 0x00000001);
+ nv_icmd(dev, 0x0000091c, 0x00000001);
+ nv_icmd(dev, 0x0000091d, 0x00000001);
+ nv_icmd(dev, 0x0000091e, 0x00000001);
+ nv_icmd(dev, 0x0000091f, 0x00000001);
+ nv_icmd(dev, 0x00000920, 0x00000002);
+ nv_icmd(dev, 0x00000921, 0x00000002);
+ nv_icmd(dev, 0x00000922, 0x00000002);
+ nv_icmd(dev, 0x00000923, 0x00000002);
+ nv_icmd(dev, 0x00000924, 0x00000002);
+ nv_icmd(dev, 0x00000925, 0x00000002);
+ nv_icmd(dev, 0x00000926, 0x00000002);
+ nv_icmd(dev, 0x00000927, 0x00000002);
+ nv_icmd(dev, 0x00000928, 0x00000001);
+ nv_icmd(dev, 0x00000929, 0x00000001);
+ nv_icmd(dev, 0x0000092a, 0x00000001);
+ nv_icmd(dev, 0x0000092b, 0x00000001);
+ nv_icmd(dev, 0x0000092c, 0x00000001);
+ nv_icmd(dev, 0x0000092d, 0x00000001);
+ nv_icmd(dev, 0x0000092e, 0x00000001);
+ nv_icmd(dev, 0x0000092f, 0x00000001);
+ nv_icmd(dev, 0x00000648, 0x00000001);
+ nv_icmd(dev, 0x00000649, 0x00000001);
+ nv_icmd(dev, 0x0000064a, 0x00000001);
+ nv_icmd(dev, 0x0000064b, 0x00000001);
+ nv_icmd(dev, 0x0000064c, 0x00000001);
+ nv_icmd(dev, 0x0000064d, 0x00000001);
+ nv_icmd(dev, 0x0000064e, 0x00000001);
+ nv_icmd(dev, 0x0000064f, 0x00000001);
+ nv_icmd(dev, 0x00000650, 0x00000001);
+ nv_icmd(dev, 0x00000658, 0x0000000f);
+ nv_icmd(dev, 0x000007ff, 0x0000000a);
+ nv_icmd(dev, 0x0000066a, 0x40000000);
+ nv_icmd(dev, 0x0000066b, 0x10000000);
+ nv_icmd(dev, 0x0000066c, 0xffff0000);
+ nv_icmd(dev, 0x0000066d, 0xffff0000);
+ nv_icmd(dev, 0x000007af, 0x00000008);
+ nv_icmd(dev, 0x000007b0, 0x00000008);
+ nv_icmd(dev, 0x000007f6, 0x00000001);
+ nv_icmd(dev, 0x000006b2, 0x00000055);
+ nv_icmd(dev, 0x000007ad, 0x00000003);
+ nv_icmd(dev, 0x00000937, 0x00000001);
+ nv_icmd(dev, 0x00000971, 0x00000008);
+ nv_icmd(dev, 0x00000972, 0x00000040);
+ nv_icmd(dev, 0x00000973, 0x0000012c);
+ nv_icmd(dev, 0x0000097c, 0x00000040);
+ nv_icmd(dev, 0x00000979, 0x00000003);
+ nv_icmd(dev, 0x00000975, 0x00000020);
+ nv_icmd(dev, 0x00000976, 0x00000001);
+ nv_icmd(dev, 0x00000977, 0x00000020);
+ nv_icmd(dev, 0x00000978, 0x00000001);
+ nv_icmd(dev, 0x00000957, 0x00000003);
+ nv_icmd(dev, 0x0000095e, 0x20164010);
+ nv_icmd(dev, 0x0000095f, 0x00000020);
+ nv_icmd(dev, 0x00000683, 0x00000006);
+ nv_icmd(dev, 0x00000685, 0x003fffff);
+ nv_icmd(dev, 0x00000687, 0x00000c48);
+ nv_icmd(dev, 0x000006a0, 0x00000005);
+ nv_icmd(dev, 0x00000840, 0x00300008);
+ nv_icmd(dev, 0x00000841, 0x04000080);
+ nv_icmd(dev, 0x00000842, 0x00300008);
+ nv_icmd(dev, 0x00000843, 0x04000080);
+ nv_icmd(dev, 0x00000818, 0x00000000);
+ nv_icmd(dev, 0x00000819, 0x00000000);
+ nv_icmd(dev, 0x0000081a, 0x00000000);
+ nv_icmd(dev, 0x0000081b, 0x00000000);
+ nv_icmd(dev, 0x0000081c, 0x00000000);
+ nv_icmd(dev, 0x0000081d, 0x00000000);
+ nv_icmd(dev, 0x0000081e, 0x00000000);
+ nv_icmd(dev, 0x0000081f, 0x00000000);
+ nv_icmd(dev, 0x00000848, 0x00000000);
+ nv_icmd(dev, 0x00000849, 0x00000000);
+ nv_icmd(dev, 0x0000084a, 0x00000000);
+ nv_icmd(dev, 0x0000084b, 0x00000000);
+ nv_icmd(dev, 0x0000084c, 0x00000000);
+ nv_icmd(dev, 0x0000084d, 0x00000000);
+ nv_icmd(dev, 0x0000084e, 0x00000000);
+ nv_icmd(dev, 0x0000084f, 0x00000000);
+ nv_icmd(dev, 0x00000850, 0x00000000);
+ nv_icmd(dev, 0x00000851, 0x00000000);
+ nv_icmd(dev, 0x00000852, 0x00000000);
+ nv_icmd(dev, 0x00000853, 0x00000000);
+ nv_icmd(dev, 0x00000854, 0x00000000);
+ nv_icmd(dev, 0x00000855, 0x00000000);
+ nv_icmd(dev, 0x00000856, 0x00000000);
+ nv_icmd(dev, 0x00000857, 0x00000000);
+ nv_icmd(dev, 0x00000738, 0x00000000);
+ nv_icmd(dev, 0x000006aa, 0x00000001);
+ nv_icmd(dev, 0x000006ab, 0x00000002);
+ nv_icmd(dev, 0x000006ac, 0x00000080);
+ nv_icmd(dev, 0x000006ad, 0x00000100);
+ nv_icmd(dev, 0x000006ae, 0x00000100);
+ nv_icmd(dev, 0x000006b1, 0x00000011);
+ nv_icmd(dev, 0x000006bb, 0x000000cf);
+ nv_icmd(dev, 0x000006ce, 0x2a712488);
+ nv_icmd(dev, 0x00000739, 0x4085c000);
+ nv_icmd(dev, 0x0000073a, 0x00000080);
+ nv_icmd(dev, 0x00000786, 0x80000100);
+ nv_icmd(dev, 0x0000073c, 0x00010100);
+ nv_icmd(dev, 0x0000073d, 0x02800000);
+ nv_icmd(dev, 0x00000787, 0x000000cf);
+ nv_icmd(dev, 0x0000078c, 0x00000008);
+ nv_icmd(dev, 0x00000792, 0x00000001);
+ nv_icmd(dev, 0x00000794, 0x00000001);
+ nv_icmd(dev, 0x00000795, 0x00000001);
+ nv_icmd(dev, 0x00000796, 0x00000001);
+ nv_icmd(dev, 0x00000797, 0x000000cf);
+ nv_icmd(dev, 0x00000836, 0x00000001);
+ nv_icmd(dev, 0x0000079a, 0x00000002);
+ nv_icmd(dev, 0x00000833, 0x04444480);
+ nv_icmd(dev, 0x000007a1, 0x00000001);
+ nv_icmd(dev, 0x000007a3, 0x00000001);
+ nv_icmd(dev, 0x000007a4, 0x00000001);
+ nv_icmd(dev, 0x000007a5, 0x00000001);
+ nv_icmd(dev, 0x00000831, 0x00000004);
+ nv_icmd(dev, 0x0000080c, 0x00000002);
+ nv_icmd(dev, 0x0000080d, 0x00000100);
+ nv_icmd(dev, 0x0000080e, 0x00000100);
+ nv_icmd(dev, 0x0000080f, 0x00000001);
+ nv_icmd(dev, 0x00000823, 0x00000002);
+ nv_icmd(dev, 0x00000824, 0x00000100);
+ nv_icmd(dev, 0x00000825, 0x00000100);
+ nv_icmd(dev, 0x00000826, 0x00000001);
+ nv_icmd(dev, 0x0000095d, 0x00000001);
+ nv_icmd(dev, 0x0000082b, 0x00000004);
+ nv_icmd(dev, 0x00000942, 0x00010001);
+ nv_icmd(dev, 0x00000943, 0x00000001);
+ nv_icmd(dev, 0x00000944, 0x00000022);
+ nv_icmd(dev, 0x000007c5, 0x00010001);
+ nv_icmd(dev, 0x00000834, 0x00000001);
+ nv_icmd(dev, 0x000007c7, 0x00000001);
+ nv_icmd(dev, 0x0000c1b0, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b1, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b2, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b3, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b4, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b5, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b6, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b7, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b8, 0x0fac6881);
+ nv_icmd(dev, 0x0000c1b9, 0x00fac688);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000002);
+ nv_icmd(dev, 0x000006aa, 0x00000001);
+ nv_icmd(dev, 0x000006ad, 0x00000100);
+ nv_icmd(dev, 0x000006ae, 0x00000100);
+ nv_icmd(dev, 0x000006b1, 0x00000011);
+ nv_icmd(dev, 0x0000078c, 0x00000008);
+ nv_icmd(dev, 0x00000792, 0x00000001);
+ nv_icmd(dev, 0x00000794, 0x00000001);
+ nv_icmd(dev, 0x00000795, 0x00000001);
+ nv_icmd(dev, 0x00000796, 0x00000001);
+ nv_icmd(dev, 0x00000797, 0x000000cf);
+ nv_icmd(dev, 0x0000079a, 0x00000002);
+ nv_icmd(dev, 0x00000833, 0x04444480);
+ nv_icmd(dev, 0x000007a1, 0x00000001);
+ nv_icmd(dev, 0x000007a3, 0x00000001);
+ nv_icmd(dev, 0x000007a4, 0x00000001);
+ nv_icmd(dev, 0x000007a5, 0x00000001);
+ nv_icmd(dev, 0x00000831, 0x00000004);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000014);
+ nv_icmd(dev, 0x00000351, 0x00000100);
+ nv_icmd(dev, 0x00000957, 0x00000003);
+ nv_icmd(dev, 0x0000095d, 0x00000001);
+ nv_icmd(dev, 0x0000082b, 0x00000004);
+ nv_icmd(dev, 0x00000942, 0x00010001);
+ nv_icmd(dev, 0x00000943, 0x00000001);
+ nv_icmd(dev, 0x000007c5, 0x00010001);
+ nv_icmd(dev, 0x00000834, 0x00000001);
+ nv_icmd(dev, 0x000007c7, 0x00000001);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000001);
+ nv_icmd(dev, 0x0000080c, 0x00000002);
+ nv_icmd(dev, 0x0000080d, 0x00000100);
+ nv_icmd(dev, 0x0000080e, 0x00000100);
+ nv_icmd(dev, 0x0000080f, 0x00000001);
+ nv_icmd(dev, 0x00000823, 0x00000002);
+ nv_icmd(dev, 0x00000824, 0x00000100);
+ nv_icmd(dev, 0x00000825, 0x00000100);
+ nv_icmd(dev, 0x00000826, 0x00000001);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_wr32(dev, 0x400208, 0x00000000);
+ nv_wr32(dev, 0x404154, 0x00000400);
+
+ nvc0_grctx_generate_9097(dev);
+ nvc0_grctx_generate_902d(dev);
+ nvc0_grctx_generate_9039(dev);
+ nvc0_grctx_generate_90c0(dev);
+
+ nv_wr32(dev, 0x000260, r000260);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 13a0f78a908..c0909174905 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -25,206 +25,207 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+struct nvc0_instmem_priv {
+ struct nouveau_gpuobj *bar1_pgd;
+ struct nouveau_channel *bar1;
+ struct nouveau_gpuobj *bar3_pgd;
+ struct nouveau_channel *bar3;
+ struct nouveau_gpuobj *chan_pgd;
+};
int
-nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
- uint32_t *size)
+nvc0_instmem_suspend(struct drm_device *dev)
{
- int ret;
-
- *size = ALIGN(*size, 4096);
- if (*size == 0)
- return -EINVAL;
-
- ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
- true, false, &gpuobj->im_backing);
- if (ret) {
- NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
- return ret;
- }
-
- ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- return ret;
- }
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
- gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
+ dev_priv->ramin_available = false;
return 0;
}
void
-nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nvc0_instmem_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
- if (gpuobj && gpuobj->im_backing) {
- if (gpuobj->im_bound)
- dev_priv->engine.instmem.unbind(dev, gpuobj);
- nouveau_bo_unpin(gpuobj->im_backing);
- nouveau_bo_ref(NULL, &gpuobj->im_backing);
- gpuobj->im_backing = NULL;
- }
+ nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
+ nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
+ dev_priv->ramin_available = true;
}
-int
-nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+static void
+nvc0_channel_del(struct nouveau_channel **pchan)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t pte, pte_end;
- uint64_t vram;
-
- if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
- return -EINVAL;
-
- NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
- gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+ struct nouveau_channel *chan;
+
+ chan = *pchan;
+ *pchan = NULL;
+ if (!chan)
+ return;
+
+ nouveau_vm_ref(NULL, &chan->vm, NULL);
+ if (chan->ramin_heap.free_stack.next)
+ drm_mm_takedown(&chan->ramin_heap);
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
+ kfree(chan);
+}
- pte = gpuobj->im_pramin->start >> 12;
- pte_end = (gpuobj->im_pramin->size >> 12) + pte;
- vram = gpuobj->vinst;
+static int
+nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
+ struct nouveau_channel **pchan,
+ struct nouveau_gpuobj *pgd, u64 vm_size)
+{
+ struct nouveau_channel *chan;
+ int ret;
- NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
- gpuobj->im_pramin->start, pte, pte_end);
- NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = dev;
- while (pte < pte_end) {
- nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
- nv_wr32(dev, 0x702004 + (pte * 8), 0);
- vram += 4096;
- pte++;
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
}
- dev_priv->engine.instmem.flush(dev);
- if (1) {
- u32 chan = nv_rd32(dev, 0x1700) << 16;
- nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
- nv_wr32(dev, 0x100cbc, 0x80000005);
+ ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
}
- gpuobj->im_bound = 1;
- return 0;
-}
-
-int
-nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t pte, pte_end;
-
- if (gpuobj->im_bound == 0)
- return -EINVAL;
-
- pte = gpuobj->im_pramin->start >> 12;
- pte_end = (gpuobj->im_pramin->size >> 12) + pte;
- while (pte < pte_end) {
- nv_wr32(dev, 0x702000 + (pte * 8), 0);
- nv_wr32(dev, 0x702004 + (pte * 8), 0);
- pte++;
+ ret = nouveau_vm_ref(vm, &chan->vm, NULL);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
}
- dev_priv->engine.instmem.flush(dev);
- gpuobj->im_bound = 0;
- return 0;
-}
+ nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
+ nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
+ nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
+ nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
-void
-nvc0_instmem_flush(struct drm_device *dev)
-{
- nv_wr32(dev, 0x070000, 1);
- if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
- NV_ERROR(dev, "PRAMIN flush timeout\n");
+ *pchan = chan;
+ return 0;
}
int
-nvc0_instmem_suspend(struct drm_device *dev)
+nvc0_instmem_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 *buf;
- int i;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct pci_dev *pdev = dev->pdev;
+ struct nvc0_instmem_priv *priv;
+ struct nouveau_vm *vm = NULL;
+ int ret;
- dev_priv->susres.ramin_copy = vmalloc(65536);
- if (!dev_priv->susres.ramin_copy)
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- buf = dev_priv->susres.ramin_copy;
-
- for (i = 0; i < 65536; i += 4)
- buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
+ pinstmem->priv = priv;
+
+ /* BAR3 VM */
+ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
+ &dev_priv->bar3_vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL,
+ (pci_resource_len(pdev, 3) >> 12) * 8, 0,
+ NVOBJ_FLAG_DONT_MAP |
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->bar3_vm->pgt[0].obj[0]);
+ if (ret)
+ goto error;
+ dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
+
+ nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
+ priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
+ if (ret)
+ goto error;
+
+ /* BAR1 VM */
+ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
+ priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
+ if (ret)
+ goto error;
+
+ /* channel vm */
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
+ if (ret)
+ goto error;
+
+ nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ nvc0_instmem_resume(dev);
return 0;
+error:
+ nvc0_instmem_takedown(dev);
+ return ret;
}
void
-nvc0_instmem_resume(struct drm_device *dev)
+nvc0_instmem_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 *buf = dev_priv->susres.ramin_copy;
- u64 chan;
- int i;
+ struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ struct nouveau_vm *vm = NULL;
- chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
- nv_wr32(dev, 0x001700, chan >> 16);
+ nvc0_instmem_suspend(dev);
- for (i = 0; i < 65536; i += 4)
- nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
- vfree(dev_priv->susres.ramin_copy);
- dev_priv->susres.ramin_copy = NULL;
+ nv_wr32(dev, 0x1704, 0x00000000);
+ nv_wr32(dev, 0x1714, 0x00000000);
- nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
-}
+ nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
-int
-nvc0_instmem_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
- int ret, i;
-
- dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
- chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
- imem = 4096 + 4096 + 32768;
-
- nv_wr32(dev, 0x001700, chan >> 16);
-
- /* channel setup */
- nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
- nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
- nv_wr32(dev, 0x700208, lower_32_bits(lim3));
- nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
-
- /* point pgd -> pgt */
- nv_wr32(dev, 0x701000, 0);
- nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
-
- /* point pgt -> physical vram for channel */
- pgt3 = 0x2000;
- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
- nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
- nv_wr32(dev, 0x700004 + pgt3, 0);
- }
-
- /* clear rest of pgt */
- for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
- nv_wr32(dev, 0x700000 + pgt3, 0);
- nv_wr32(dev, 0x700004 + pgt3, 0);
- }
-
- /* point bar3 at the channel */
- nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
-
- /* Global PRAMIN heap */
- ret = drm_mm_init(&dev_priv->ramin_heap, imem,
- dev_priv->ramin_size - imem);
- if (ret) {
- NV_ERROR(dev, "Failed to init RAMIN heap\n");
- return -ENOMEM;
- }
+ nvc0_channel_del(&priv->bar1);
+ nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
- return 0;
-}
+ nvc0_channel_del(&priv->bar3);
+ nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
+ nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
+ nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+ nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
-void
-nvc0_instmem_takedown(struct drm_device *dev)
-{
+ dev_priv->engine.instmem.priv = NULL;
+ kfree(priv);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
new file mode 100644
index 00000000000..4b9251bb0ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+void
+nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
+ struct nouveau_gpuobj *pgt[2])
+{
+ u32 pde[2] = { 0, 0 };
+
+ if (pgt[0])
+ pde[1] = 0x00000001 | (pgt[0]->vinst >> 8);
+ if (pgt[1])
+ pde[0] = 0x00000001 | (pgt[1]->vinst >> 8);
+
+ nv_wo32(pgd, (index * 8) + 0, pde[0]);
+ nv_wo32(pgd, (index * 8) + 4, pde[1]);
+}
+
+static inline u64
+nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+{
+ phys >>= 8;
+
+ phys |= 0x00000001; /* present */
+// if (vma->access & NV_MEM_ACCESS_SYS)
+// phys |= 0x00000002;
+
+ phys |= ((u64)target << 32);
+ phys |= ((u64)memtype << 36);
+
+ return phys;
+}
+
+void
+nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+{
+ u32 next = 1 << (vma->node->type - 8);
+
+ phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ phys += next;
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ u32 pte, dma_addr_t *list, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, 0x00000000);
+ nv_wo32(pgt, pte + 4, 0x00000000);
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_flush(struct nouveau_vm *vm)
+{
+ struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct drm_device *dev = vm->dev;
+ struct nouveau_vm_pgd *vpgd;
+ u32 r100c80, engine;
+
+ pinstmem->flush(vm->dev);
+
+ if (vm == dev_priv->chan_vm)
+ engine = 1;
+ else
+ engine = 5;
+
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ r100c80 = nv_rd32(dev, 0x100c80);
+ nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
+ nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
+ if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
+ NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
new file mode 100644
index 00000000000..858eda5dedd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+bool
+nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
+ case 0x0000:
+ case 0xfe00:
+ case 0xdb00:
+ case 0x1100:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+int
+nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
+ u32 type, struct nouveau_vram **pvram)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ struct nouveau_vram *vram;
+ int ret;
+
+ size >>= 12;
+ align >>= 12;
+ ncmin >>= 12;
+
+ vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vram->regions);
+ vram->dev = dev_priv->dev;
+ vram->memtype = type;
+ vram->size = size;
+
+ mutex_lock(&mm->mutex);
+ do {
+ ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
+ if (ret) {
+ mutex_unlock(&mm->mutex);
+ nv50_vram_del(dev, &vram);
+ return ret;
+ }
+
+ list_add_tail(&r->rl_entry, &vram->regions);
+ size -= r->length;
+ } while (size);
+ mutex_unlock(&mm->mutex);
+
+ r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+ vram->offset = (u64)r->offset << 12;
+ *pvram = vram;
+ return 0;
+}
+
+int
+nvc0_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
+ dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
+ dev_priv->vram_rblock_size = 4096;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index 881f8a58561..fe0f253089a 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -153,7 +153,8 @@
#define NV_PCRTC_START 0x00600800
#define NV_PCRTC_CONFIG 0x00600804
# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0)
-# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
+# define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC (4 << 0)
+# define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
#define NV_PCRTC_CURSOR_CONFIG 0x00600810
# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0)
# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4)
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 6cae4f2028d..e47eecfc2df 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -65,10 +65,13 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
- evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o
+ evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
+ radeon_trace_points.o ni.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
+
+CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index c714179d1bf..c61c3fe9fb9 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -37,6 +37,8 @@
#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3
#define GRAPH_OBJECT_TYPE_ROUTER 0x4
/* deleted */
+#define GRAPH_OBJECT_TYPE_DISPLAY_PATH 0x6
+#define GRAPH_OBJECT_TYPE_GENERIC 0x7
/****************************************************/
/* Encoder Object ID Definition */
@@ -64,6 +66,9 @@
#define ENCODER_OBJECT_ID_VT1623 0x10
#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
+#define ENCODER_OBJECT_ID_ALMOND 0x22
+#define ENCODER_OBJECT_ID_TRAVIS 0x23
+#define ENCODER_OBJECT_ID_NUTMEG 0x22
/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
@@ -108,6 +113,7 @@
#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
+#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
/* deleted */
@@ -124,6 +130,7 @@
#define GENERIC_OBJECT_ID_GLSYNC 0x01
#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
#define GENERIC_OBJECT_ID_MXM_OPM 0x03
+#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
/****************************************************/
/* Graphics Object ENUM ID Definition */
@@ -360,6 +367,26 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+#define ENCODER_ALMOND_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_NUTMEG_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
@@ -421,6 +448,14 @@
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
@@ -512,6 +547,7 @@
#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
@@ -593,6 +629,14 @@
GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
+#define CONNECTOR_LVDS_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Router Object ID definition - Shared with BIOS */
/****************************************************/
@@ -621,6 +665,10 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+#define GENERICOBJECT_STEREO_PIN_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 05efb5b9f13..258fa5e7a2d 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -734,16 +734,16 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
{
uint8_t attr = U8((*ptr)++);
- uint32_t dst, src1, src2, saved;
+ uint32_t dst, mask, src, saved;
int dptr = *ptr;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
- SDEBUG(" src1: ");
- src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
- SDEBUG(" src2: ");
- src2 = atom_get_src(ctx, attr, ptr);
- dst &= src1;
- dst |= src2;
+ mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
+ SDEBUG(" mask: 0x%08x", mask);
+ SDEBUG(" src: ");
+ src = atom_get_src(ctx, attr, ptr);
+ dst &= mask;
+ dst |= src;
SDEBUG(" dst: ");
atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
}
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index fe359a239df..58a0cd02c0a 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -73,8 +73,18 @@
#define ATOM_PPLL1 0
#define ATOM_PPLL2 1
#define ATOM_DCPLL 2
+#define ATOM_PPLL0 2
+#define ATOM_EXT_PLL1 8
+#define ATOM_EXT_PLL2 9
+#define ATOM_EXT_CLOCK 10
#define ATOM_PPLL_INVALID 0xFF
+#define ENCODER_REFCLK_SRC_P1PLL 0
+#define ENCODER_REFCLK_SRC_P2PLL 1
+#define ENCODER_REFCLK_SRC_DCPLL 2
+#define ENCODER_REFCLK_SRC_EXTCLK 3
+#define ENCODER_REFCLK_SRC_INVALID 0xFF
+
#define ATOM_SCALER1 0
#define ATOM_SCALER2 1
@@ -192,6 +202,9 @@ typedef struct _ATOM_COMMON_TABLE_HEADER
/*Image can't be updated, while Driver needs to carry the new table! */
}ATOM_COMMON_TABLE_HEADER;
+/****************************************************************************/
+// Structure stores the ROM header.
+/****************************************************************************/
typedef struct _ATOM_ROM_HEADER
{
ATOM_COMMON_TABLE_HEADER sHeader;
@@ -221,6 +234,9 @@ typedef struct _ATOM_ROM_HEADER
#define USHORT void*
#endif
+/****************************************************************************/
+// Structures used in Command.mtb
+/****************************************************************************/
typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
@@ -312,6 +328,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
#define HPDInterruptService ReadHWAssistedI2CStatus
#define EnableVGA_Access GetSCLKOverMCLKRatio
+#define GetDispObjectInfo EnableYUV
typedef struct _ATOM_MASTER_COMMAND_TABLE
{
@@ -357,6 +374,24 @@ typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
/****************************************************************************/
#define COMPUTE_MEMORY_PLL_PARAM 1
#define COMPUTE_ENGINE_PLL_PARAM 2
+#define ADJUST_MC_SETTING_PARAM 3
+
+/****************************************************************************/
+// Structures used by AdjustMemoryControllerTable
+/****************************************************************************/
+typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
+ ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
+ ULONG ulClockFreq:24;
+#else
+ ULONG ulClockFreq:24;
+ ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
+ ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
+#endif
+}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
+#define POINTER_RETURN_FLAG 0x80
typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
{
@@ -440,6 +475,26 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
#endif
}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+{
+ union
+ {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
+ };
+ UCHAR ucRefDiv; //Output Parameter
+ UCHAR ucPostDiv; //Output Parameter
+ union
+ {
+ UCHAR ucCntlFlag; //Output Flags
+ UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
+ };
+ UCHAR ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
+
+// ucInputFlag
+#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
+
typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
{
ATOM_COMPUTE_CLOCK_FREQ ulClock;
@@ -583,6 +638,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02
#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04
#define ATOM_ENCODER_CONFIG_LINKA 0x00
#define ATOM_ENCODER_CONFIG_LINKB 0x04
@@ -608,6 +664,9 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
#define ATOM_ENCODER_MODE_TV 13
#define ATOM_ENCODER_MODE_CV 14
#define ATOM_ENCODER_MODE_CRT 15
+#define ATOM_ENCODER_MODE_DVO 16
+#define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2
+#define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2
typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
{
@@ -661,6 +720,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13
#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
@@ -671,24 +731,34 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
+//ucTableFormatRevision=1
+//ucTableContentRevision=3
// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
{
#if ATOM_BIG_ENDIAN
UCHAR ucReserved1:1;
- UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
UCHAR ucReserved:3;
UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
#else
UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
UCHAR ucReserved:3;
- UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
UCHAR ucReserved1:1;
#endif
}ATOM_DIG_ENCODER_CONFIG_V3;
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
-
+#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00
+#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10
+#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20
+#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30
+#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40
+#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50
typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
{
@@ -707,6 +777,56 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
UCHAR ucReserved;
}DIG_ENCODER_CONTROL_PARAMETERS_V3;
+//ucTableFormatRevision=1
+//ucTableContentRevision=4
+// start from NI
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucReserved1:1;
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+ UCHAR ucReserved:2;
+ UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
+#else
+ UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
+ UCHAR ucReserved:2;
+ UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+ UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V4;
+
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02
+#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70
+#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00
+#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10
+#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20
+#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30
+#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40
+#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ union{
+ ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
+ UCHAR ucConfig;
+ };
+ UCHAR ucAction;
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ // =5: DP audio
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+ UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
+}DIG_ENCODER_CONTROL_PARAMETERS_V4;
// define ucBitPerColor:
#define PANEL_BPC_UNDEFINE 0x00
@@ -893,6 +1013,7 @@ typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
#endif
}ATOM_DIG_TRANSMITTER_CONFIG_V3;
+
typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
{
union
@@ -936,6 +1057,149 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
+
+/****************************************************************************/
+// Structures used by UNIPHYTransmitterControlTable V1.4
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=4
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE_V4
+{
+ UCHAR ucLaneSel;
+ union
+ {
+ UCHAR ucLaneSet;
+ struct {
+#if ATOM_BIG_ENDIAN
+ UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
+ UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
+ UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
+#else
+ UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
+ UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
+ UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
+#endif
+ };
+ };
+}ATOM_DP_VS_MODE_V4;
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+#else
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V4;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version
+ };
+ union
+ {
+ ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
+ UCHAR ucConfig;
+ };
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucLaneNum;
+ UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08
+// Bit5:4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30
+#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10
+#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF
+
+
+/****************************************************************************/
+// Structures used by ExternalEncoderControlTable V1.3
+// ASIC Families: Evergreen, Llano, NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
+{
+ union{
+ USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT
+ USHORT usConnectorId; // connector id, valid when ucAction = INIT
+ };
+ UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucAction; //
+ UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT
+ UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
+ UCHAR ucReserved;
+}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
+
+// ucAction
+#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00
+#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11
+#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12
+
+// ucConfig
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK 0x70
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
+{
+ EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
+ ULONG ulReserved[2];
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
+
+
/****************************************************************************/
// Structures used by DAC1OuputControlTable
// DAC2OuputControlTable
@@ -1142,6 +1406,7 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V2
#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
+
typedef struct _PIXEL_CLOCK_PARAMETERS_V3
{
USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
@@ -1202,6 +1467,55 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V5
#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
+typedef struct _CRTC_PIXEL_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+ ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
+ // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+#else
+ ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
+ // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+ ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+#endif
+}CRTC_PIXEL_CLOCK_FREQ;
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V6
+{
+ union{
+ CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency
+ ULONG ulDispEngClkFreq; // dispclk frequency
+ };
+ USHORT usFbDiv; // feedback divider integer part.
+ UCHAR ucPostDiv; // post divider.
+ UCHAR ucRefDiv; // Reference divider
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+ UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
+ // indicate which graphic encoder will be used.
+ UCHAR ucEncoderMode; // Encoder mode:
+ UCHAR ucMiscInfo; // bit[0]= Force program PPLL
+ // bit[1]= when VGA timing is used.
+ // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+ // bit[4]= RefClock source for PPLL.
+ // =0: XTLAIN( default mode )
+ // =1: other external clock source, which is pre-defined
+ // by VBIOS depend on the feature required.
+ // bit[7:5]: reserved.
+ ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V6;
+
+#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01
+#define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02
+#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
+#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
+#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
+
typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
{
PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
@@ -1241,10 +1555,11 @@ typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
{
USHORT usPixelClock; // target pixel clock
- UCHAR ucTransmitterID; // transmitter id defined in objectid.h
+ UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h
UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
- UCHAR ucReserved[3];
+ UCHAR ucExtTransmitterID; // external encoder id.
+ UCHAR ucReserved[2];
}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
// usDispPllConfig v1.2 for RoadRunner
@@ -1358,6 +1673,7 @@ typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
/**************************************************************************/
#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
/****************************************************************************/
// Structures used by PowerConnectorDetectionTable
/****************************************************************************/
@@ -1438,6 +1754,31 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
+// Used by DCE5.0
+ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
+{
+ USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0
+ UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
+ // Bit[1]: 1-Ext. 0-Int.
+ // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+ // Bits[7:4] reserved
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
+ USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
+
+#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00
+#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01
+#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02
+#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c
+#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00
+#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04
+#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8
+
#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
/**************************************************************************/
@@ -1706,7 +2047,7 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
USHORT StandardVESA_Timing; // Only used by Bios
USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
USHORT DAC_Info; // Will be obsolete from R600
- USHORT LVDS_Info; // Shared by various SW components,latest version 1.1
+ USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
USHORT TMDS_Info; // Will be obsolete from R600
USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
USHORT SupportedDevicesInfo; // Will be obsolete from R600
@@ -1736,12 +2077,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
}ATOM_MASTER_LIST_OF_DATA_TABLES;
+// For backward compatible
+#define LVDS_Info LCD_Info
+
typedef struct _ATOM_MASTER_DATA_TABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
}ATOM_MASTER_DATA_TABLE;
+
/****************************************************************************/
// Structure used in MultimediaCapabilityInfoTable
/****************************************************************************/
@@ -1776,6 +2121,7 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
}ATOM_MULTIMEDIA_CONFIG_INFO;
+
/****************************************************************************/
// Structures used in FirmwareInfoTable
/****************************************************************************/
@@ -2031,8 +2377,47 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_1
UCHAR ucReserved4[3];
}ATOM_FIRMWARE_INFO_V2_1;
+//the structure below to be used from NI
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_FIRMWARE_INFO_V2_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulReserved[2];
+ ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
+ ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ?
+ ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.
+ UCHAR ucReserved3; //Was ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ulReserved4; //Was ulAsicMaximumVoltage
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+ ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+ ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+ USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usCoreReferenceClock; //In 10Khz unit
+ USHORT usMemoryReferenceClock; //In 10Khz unit
+ USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+ UCHAR ucReserved9[3];
+ USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
+ USHORT usReserved12;
+ ULONG ulReserved10[3]; // New added comparing to previous version
+}ATOM_FIRMWARE_INFO_V2_2;
-#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1
+#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2
/****************************************************************************/
// Structures used in IntegratedSystemInfoTable
@@ -2212,7 +2597,7 @@ ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pi
ucDockingPinBit: which bit in this register to read the pin status;
ucDockingPinPolarity:Polarity of the pin when docked;
-ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
+ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
@@ -2250,6 +2635,14 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep
usMinDownStreamHTLinkWidth: same as above.
*/
+// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition
+#define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4
+
+#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code
#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
@@ -2778,8 +3171,88 @@ typedef struct _ATOM_LVDS_INFO_V12
#define PANEL_RANDOM_DITHER 0x80
#define PANEL_RANDOM_DITHER_MASK 0x80
+#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this
+
+/****************************************************************************/
+// Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/
+typedef struct _ATOM_LCD_INFO_V13
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usExtInfoTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ ULONG ulReserved0;
+ UCHAR ucLCD_Misc; // Reorganized in V13
+ // Bit0: {=0:single, =1:dual},
+ // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB},
+ // Bit3:2: {Grey level}
+ // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h)
+ // Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it?
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+ USHORT usLCDVenderID;
+ USHORT usLCDProductID;
+ UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13
+ // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
+ // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
+ // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
+ // Bit7-3: Reserved
+ UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+ USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13
+
+ UCHAR ucPowerSequenceDIGONtoDE_in4Ms;
+ UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms;
+ UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
+
+ UCHAR ucOffDelay_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms;
+ UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms;
+ UCHAR ucReserved1;
+
+ ULONG ulReserved[4];
+}ATOM_LCD_INFO_V13;
+
+#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13
+
+//Definitions for ucLCD_Misc
+#define ATOM_PANEL_MISC_V13_DUAL 0x00000001
+#define ATOM_PANEL_MISC_V13_FPDI 0x00000002
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2
+#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70
+#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10
+#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6 5 4
+ // 0 0 0 - Color bit depth is undefined
+ // 0 0 1 - 6 Bits per Primary Color
+ // 0 1 0 - 8 Bits per Primary Color
+ // 0 1 1 - 10 Bits per Primary Color
+ // 1 0 0 - 12 Bits per Primary Color
+ // 1 0 1 - 14 Bits per Primary Color
+ // 1 1 0 - 16 Bits per Primary Color
+ // 1 1 1 - Reserved
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
+#define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
-#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version
typedef struct _ATOM_PATCH_RECORD_MODE
{
@@ -2944,9 +3417,9 @@ typedef struct _ATOM_DPCD_INFO
#define MAX_DTD_MODE_IN_VRAM 6
#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
-#define DFP_ENCODER_TYPE_OFFSET 0x80
-#define DP_ENCODER_LANE_NUM_OFFSET 0x84
-#define DP_ENCODER_LINK_RATE_OFFSET 0x88
+//20 bytes for Encoder Type and DPCD in STD EDID area
+#define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)
+#define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 )
#define ATOM_HWICON1_SURFACE_ADDR 0
#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
@@ -2997,14 +3470,16 @@ typedef struct _ATOM_DPCD_INFO
#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256)
-#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512
+#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024)
+#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START + 512
//The size below is in Kb!
#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
+#define ATOM_VRAM_RESERVE_V2_SIZE 32
+
#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
@@ -3206,6 +3681,15 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH
USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
}ATOM_DISPLAY_OBJECT_PATH;
+typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
+{
+ USHORT usDeviceTag; //supported device
+ USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
+ USHORT usConnObjectId; //Connector Object ID
+ USHORT usGPUObjectId; //GPU ID
+ USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder
+}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
+
typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
{
UCHAR ucNumOfDispPath;
@@ -3261,6 +3745,47 @@ typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset
#define EXT_AUXDDC_LUTINDEX_7 7
#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
+//ucChannelMapping are defined as following
+//for DP connector, eDP, DP to VGA/LVDS
+//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucDP_Lane3_Source:2;
+ UCHAR ucDP_Lane2_Source:2;
+ UCHAR ucDP_Lane1_Source:2;
+ UCHAR ucDP_Lane0_Source:2;
+#else
+ UCHAR ucDP_Lane0_Source:2;
+ UCHAR ucDP_Lane1_Source:2;
+ UCHAR ucDP_Lane2_Source:2;
+ UCHAR ucDP_Lane3_Source:2;
+#endif
+}ATOM_DP_CONN_CHANNEL_MAPPING;
+
+//for DVI/HDMI, in dual link case, both links have to have same mapping.
+//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucDVI_CLK_Source:2;
+ UCHAR ucDVI_DATA0_Source:2;
+ UCHAR ucDVI_DATA1_Source:2;
+ UCHAR ucDVI_DATA2_Source:2;
+#else
+ UCHAR ucDVI_DATA2_Source:2;
+ UCHAR ucDVI_DATA1_Source:2;
+ UCHAR ucDVI_DATA0_Source:2;
+ UCHAR ucDVI_CLK_Source:2;
+#endif
+}ATOM_DVI_CONN_CHANNEL_MAPPING;
+
typedef struct _EXT_DISPLAY_PATH
{
USHORT usDeviceTag; //A bit vector to show what devices are supported
@@ -3269,7 +3794,13 @@ typedef struct _EXT_DISPLAY_PATH
UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
USHORT usExtEncoderObjId; //external encoder object id
- USHORT usReserved[3];
+ union{
+ UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping
+ ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+ ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+ };
+ UCHAR ucReserved;
+ USHORT usReserved[2];
}EXT_DISPLAY_PATH;
#define NUMBER_OF_UCHAR_FOR_GUID 16
@@ -3281,7 +3812,8 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
- UCHAR Reserved [7]; // for potential expansion
+ UCHAR uc3DStereoPinId; // use for eDP panel
+ UCHAR Reserved [6]; // for potential expansion
}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
//Related definitions, all records are differnt but they have a commond header
@@ -3311,10 +3843,11 @@ typedef struct _ATOM_COMMON_RECORD_HEADER
#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
+#define ATOM_ENCODER_CAP_RECORD_TYPE 20
//Must be updated when new record type is added,equal to that record definition!
-#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
typedef struct _ATOM_I2C_RECORD
{
@@ -3441,6 +3974,26 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
UCHAR ucPadding[2];
}ATOM_ENCODER_DVO_CF_RECORD;
+// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path
+
+typedef struct _ATOM_ENCODER_CAP_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ union {
+ USHORT usEncoderCap;
+ struct {
+#if ATOM_BIG_ENDIAN
+ USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+#else
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+ USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
+#endif
+ };
+ };
+}ATOM_ENCODER_CAP_RECORD;
+
// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
@@ -3580,6 +4133,11 @@ typedef struct _ATOM_VOLTAGE_CONTROL
#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
#define VOLTAGE_CONTROL_ID_DS4402 0x04
+#define VOLTAGE_CONTROL_ID_UP6266 0x05
+#define VOLTAGE_CONTROL_ID_SCORPIO 0x06
+#define VOLTAGE_CONTROL_ID_VT1556M 0x07
+#define VOLTAGE_CONTROL_ID_CHL822x 0x08
+#define VOLTAGE_CONTROL_ID_VT1586M 0x09
typedef struct _ATOM_VOLTAGE_OBJECT
{
@@ -3670,66 +4228,157 @@ typedef struct _ATOM_POWER_SOURCE_INFO
#define POWER_SENSOR_GPIO 0x01
#define POWER_SENSOR_I2C 0x02
+typedef struct _ATOM_CLK_VOLT_CAPABILITY
+{
+ ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table
+ ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
+}ATOM_CLK_VOLT_CAPABILITY;
+
+typedef struct _ATOM_AVAILABLE_SCLK_LIST
+{
+ ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
+ USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK
+ USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK
+}ATOM_AVAILABLE_SCLK_LIST;
+
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
+#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0]
+
+// this IntegrateSystemInfoTable is used for Liano/Ontario APU
typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
{
ATOM_COMMON_TABLE_HEADER sHeader;
ULONG ulBootUpEngineClock;
ULONG ulDentistVCOFreq;
ULONG ulBootUpUMAClock;
- ULONG ulReserved1[8];
+ ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
ULONG ulBootUpReqDisplayVector;
ULONG ulOtherDisplayMisc;
ULONG ulGPUCapInfo;
- ULONG ulReserved2[3];
+ ULONG ulSB_MMIO_Base_Addr;
+ USHORT usRequestedPWMFreqInHz;
+ UCHAR ucHtcTmpLmt;
+ UCHAR ucHtcHystLmt;
+ ULONG ulMinEngineClock;
ULONG ulSystemConfig;
ULONG ulCPUCapInfo;
- USHORT usMaxNBVoltage;
- USHORT usMinNBVoltage;
- USHORT usBootUpNBVoltage;
- USHORT usExtDispConnInfoOffset;
- UCHAR ucHtcTmpLmt;
- UCHAR ucTjOffset;
+ USHORT usNBP0Voltage;
+ USHORT usNBP1Voltage;
+ USHORT usBootUpNBVoltage;
+ USHORT usExtDispConnInfoOffset;
+ USHORT usPanelRefreshRateRange;
UCHAR ucMemoryType;
UCHAR ucUMAChannelNumber;
ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
ULONG ulCSR_M3_ARB_CNTL_UVD[10];
ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
- ULONG ulReserved3[42];
+ ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
+ ULONG ulGMCRestoreResetTime;
+ ULONG ulMinimumNClk;
+ ULONG ulIdleNClk;
+ ULONG ulDDR_DLL_PowerUpTime;
+ ULONG ulDDR_PLL_PowerUpTime;
+ USHORT usPCIEClkSSPercentage;
+ USHORT usPCIEClkSSType;
+ USHORT usLvdsSSPercentage;
+ USHORT usLvdsSSpreadRateIn10Hz;
+ USHORT usHDMISSPercentage;
+ USHORT usHDMISSpreadRateIn10Hz;
+ USHORT usDVISSPercentage;
+ USHORT usDVISSpreadRateIn10Hz;
+ ULONG ulReserved3[21];
ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
}ATOM_INTEGRATED_SYSTEM_INFO_V6;
+// ulGPUCapInfo
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08
+
+// ulOtherDisplayMisc
+#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
+
+
/**********************************************************************************************************************
-// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
-//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit.
-//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
-//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
-//ulReserved1[8] Reserved by now, must be 0x0.
-//ulBootUpReqDisplayVector VBIOS boot up display IDs
-// ATOM_DEVICE_CRT1_SUPPORT 0x0001
-// ATOM_DEVICE_CRT2_SUPPORT 0x0010
-// ATOM_DEVICE_DFP1_SUPPORT 0x0008
-// ATOM_DEVICE_DFP6_SUPPORT 0x0040
-// ATOM_DEVICE_DFP2_SUPPORT 0x0080
-// ATOM_DEVICE_DFP3_SUPPORT 0x0200
-// ATOM_DEVICE_DFP4_SUPPORT 0x0400
-// ATOM_DEVICE_DFP5_SUPPORT 0x0800
-// ATOM_DEVICE_LCD1_SUPPORT 0x0002
-//ulOtherDisplayMisc Other display related flags, not defined yet.
-//ulGPUCapInfo TBD
-//ulReserved2[3] must be 0x0 for the reserved.
-//ulSystemConfig TBD
-//ulCPUCapInfo TBD
-//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
-//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
-//usBootUpNBVoltage Boot up NB voltage in unit of mv.
-//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
-//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
-//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
-//ucUMAChannelNumber System memory channel numbers.
-//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table.
-//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default
-//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback.
-//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+ ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
+ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
+sDISPCLK_Voltage: Report Display clock voltage requirement.
+
+ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
+ ATOM_DEVICE_CRT1_SUPPORT 0x0001
+ ATOM_DEVICE_CRT2_SUPPORT 0x0010
+ ATOM_DEVICE_DFP1_SUPPORT 0x0008
+ ATOM_DEVICE_DFP6_SUPPORT 0x0040
+ ATOM_DEVICE_DFP2_SUPPORT 0x0080
+ ATOM_DEVICE_DFP3_SUPPORT 0x0200
+ ATOM_DEVICE_DFP4_SUPPORT 0x0400
+ ATOM_DEVICE_DFP5_SUPPORT 0x0800
+ ATOM_DEVICE_LCD1_SUPPORT 0x0002
+ulOtherDisplayMisc: Other display related flags, not defined yet.
+ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+ =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+ bit[3]=0: Enable HW AUX mode detection logic
+ =1: Disable HW AUX mode dettion logic
+ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
+ Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+
+ When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+ 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+ VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+ Changing BL using VBIOS function is functional in both driver and non-driver present environment;
+ and enabling VariBri under the driver environment from PP table is optional.
+
+ 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+ that BL control from GPU is expected.
+ VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+ Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+ it's per platform
+ and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
+ Threshold on value to enter HTC_active state.
+ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
+ To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
+ =1: PCIE Power Gating Enabled
+ Bit[1]=0: DDR-DLL shut-down feature disabled.
+ 1: DDR-DLL shut-down feature enabled.
+ Bit[2]=0: DDR-PLL Power down feature disabled.
+ 1: DDR-PLL Power down feature enabled.
+ulCPUCapInfo: TBD
+usNBP0Voltage: VID for voltage on NB P0 State
+usNBP1Voltage: VID for voltage on NB P1 State
+usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+ to indicate a range.
+ SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
+ SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
+ SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
+ SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
+ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber: System memory channel numbers.
+ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
+ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
+ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
+usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
**********************************************************************************************************************/
/**************************************************************************/
@@ -3790,6 +4439,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
#define ASIC_INTERNAL_SS_ON_LVDS 6
#define ASIC_INTERNAL_SS_ON_DP 7
#define ASIC_INTERNAL_SS_ON_DCPLL 8
+#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
{
@@ -3903,6 +4553,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
//Byte aligned defintion for BIOS usage
#define ATOM_S0_CRT1_MONOb0 0x01
@@ -4529,7 +5180,8 @@ typedef struct _ATOM_INIT_REG_BLOCK{
#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1)
#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
-
+//#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code
+#define ACCESS_PLACEHOLDER 0x80
typedef struct _ATOM_MC_INIT_PARAM_TABLE
{
@@ -4554,6 +5206,10 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
#define _32Mx32 0x33
#define _64Mx8 0x41
#define _64Mx16 0x42
+#define _64Mx32 0x43
+#define _128Mx8 0x51
+#define _128Mx16 0x52
+#define _256Mx8 0x61
#define SAMSUNG 0x1
#define INFINEON 0x2
@@ -4569,10 +5225,11 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
#define QIMONDA INFINEON
#define PROMOS MOSEL
#define KRETON INFINEON
+#define ELIXIR NANYA
/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
-#define UCODE_ROM_START_ADDRESS 0x1c000
+#define UCODE_ROM_START_ADDRESS 0x1b800
#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
//uCode block header for reference
@@ -4903,7 +5560,34 @@ typedef struct _ATOM_VRAM_MODULE_V6
ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
}ATOM_VRAM_MODULE_V6;
-
+typedef struct _ATOM_VRAM_MODULE_V7
+{
+// Design Specific Values
+ ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP
+ USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7
+ USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // Current memory module ID
+ UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+ UCHAR ucChannelNum; // Number of mem. channels supported in this module
+ UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucReserve; // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
+ UCHAR ucMisc; // RANK_OF_THISMEMORY etc.
+ UCHAR ucVREFI; // Not used.
+ UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+// Memory Module specific values
+ USHORT usEMRS2Value; // EMRS2/MR2 Value.
+ USHORT usEMRS3Value; // EMRS3/MR3 Value.
+ UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ char strMemPNString[20]; // part number end with '0'.
+}ATOM_VRAM_MODULE_V7;
typedef struct _ATOM_VRAM_INFO_V2
{
@@ -4942,6 +5626,20 @@ typedef struct _ATOM_VRAM_INFO_V4
// ATOM_INIT_REG_BLOCK aMemAdjust;
}ATOM_VRAM_INFO_V4;
+typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usReserved[4];
+ UCHAR ucNumOfVRAMModule; // indicate number of VRAM module
+ UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list
+ UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version
+ UCHAR ucReserved;
+ ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_HEADER_V2_1;
+
+
typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
{
ATOM_COMMON_TABLE_HEADER sHeader;
@@ -5182,6 +5880,16 @@ typedef struct _ASIC_TRANSMITTER_INFO
UCHAR ucReserved;
}ASIC_TRANSMITTER_INFO;
+#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01
+#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84
+
typedef struct _ASIC_ENCODER_INFO
{
UCHAR ucEncoderID;
@@ -5284,6 +5992,28 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS
/* /obselete */
#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
+{
+ USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+ UCHAR ucAuxId;
+ UCHAR ucAction;
+ UCHAR ucSinkType; // Iput and Output parameters.
+ UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+ UCHAR ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS_V2;
+
+typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
+{
+ DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
+}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
+
+// ucAction
+#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01
+#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02
+
+
// DP_TRAINING_TABLE
#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
@@ -5339,6 +6069,7 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
#define SELECT_DCIO_IMPCAL 4
#define SELECT_DCIO_DIG 6
#define SELECT_CRTC_PIXEL_RATE 7
+#define SELECT_VGA_BLK 8
/****************************************************************************/
//Portion VI: Definitinos for vbios MC scratch registers that driver used
@@ -5744,7 +6475,17 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
typedef struct _ATOM_PPLIB_STATE
{
@@ -5841,6 +6582,29 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
USHORT usExtendendedHeaderOffset;
} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+ ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+ ULONG ulGoldenPPID; // PPGen use only
+ ULONG ulGoldenRevision; // PPGen use only
+ USHORT usVddcDependencyOnSCLKOffset;
+ USHORT usVddciDependencyOnMCLKOffset;
+ USHORT usVddcDependencyOnMCLKOffset;
+ USHORT usMaxClockVoltageOnDCOffset;
+ USHORT usReserved[2];
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+ ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+ ULONG ulTDPLimit;
+ ULONG ulNearTDPLimit;
+ ULONG ulSQRampingThreshold;
+ USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
+ ULONG ulCACLeakage; // TBD, this parameter is still under discussion. Change to ulReserved if not needed.
+ ULONG ulReserved;
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@@ -5864,6 +6628,10 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
+
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
@@ -5896,9 +6664,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
#define ATOM_PPLIB_M3ARB_MASK 0x00060000
#define ATOM_PPLIB_M3ARB_SHIFT 17
+#define ATOM_PPLIB_ENABLE_DRR 0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
typedef struct _ATOM_PPLIB_NONCLOCK_INFO
{
USHORT usClassification;
@@ -5906,15 +6686,15 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
UCHAR ucMaxTemperature;
ULONG ulCapsAndSettings;
UCHAR ucRequiredPower;
- UCHAR ucUnused1[3];
+ USHORT usClassification2;
+ ULONG ulVCLK;
+ ULONG ulDCLK;
+ UCHAR ucUnused[5];
} ATOM_PPLIB_NONCLOCK_INFO;
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
-#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
-#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
-
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
{
USHORT usEngineClockLow;
@@ -5985,6 +6765,93 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+ USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
+ UCHAR ucEngineClockHigh; //clockfrequency >> 16.
+ UCHAR vddcIndex; //2-bit vddc index;
+ UCHAR leakage; //please use 8-bit absolute value, not the 6-bit % value
+ //please initalize to 0
+ UCHAR rsv;
+ //please initalize to 0
+ USHORT rsv1;
+ //please initialize to 0s
+ ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+ //number of valid dpm levels in this state; Driver uses it to calculate the whole
+ //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ UCHAR ucNumDPMLevels;
+
+ //a index to the array of nonClockInfos
+ UCHAR nonClockInfoIndex;
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+ UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct StateArray{
+ //how many states we have
+ UCHAR ucNumEntries;
+
+ ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct ClockInfoArray{
+ //how many clock levels we have
+ UCHAR ucNumEntries;
+
+ //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ //this is for Sumo
+ ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1];
+}ClockInfoArray;
+
+typedef struct NonClockInfoArray{
+
+ //how many non-clock levels we have. normally should be same as number of states
+ UCHAR ucNumEntries;
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+ USHORT usClockLow;
+ UCHAR ucClockHigh;
+ USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+ USHORT usVddc;
+ USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
/**************************************************************************/
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9fbabaa6ee4..b0ab185b86f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -403,6 +403,7 @@ union atom_enable_ss {
ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
};
static void atombios_crtc_program_ss(struct drm_crtc *crtc,
@@ -417,7 +418,30 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ args.v3.usSpreadSpectrumAmountFrac = 0;
+ args.v3.ucSpreadSpectrumType = ss->type;
+ switch (pll_id) {
+ case ATOM_PPLL1:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
+ args.v3.usSpreadSpectrumAmount = ss->amount;
+ args.v3.usSpreadSpectrumStep = ss->step;
+ break;
+ case ATOM_PPLL2:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
+ args.v3.usSpreadSpectrumAmount = ss->amount;
+ args.v3.usSpreadSpectrumStep = ss->step;
+ break;
+ case ATOM_DCPLL:
+ args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
+ args.v3.usSpreadSpectrumAmount = 0;
+ args.v3.usSpreadSpectrumStep = 0;
+ break;
+ case ATOM_PPLL_INVALID:
+ return;
+ }
+ args.v2.ucEnable = enable;
+ } else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v2.ucSpreadSpectrumType = ss->type;
switch (pll_id) {
@@ -673,9 +697,14 @@ union set_pixel_clock {
PIXEL_CLOCK_PARAMETERS_V2 v2;
PIXEL_CLOCK_PARAMETERS_V3 v3;
PIXEL_CLOCK_PARAMETERS_V5 v5;
+ PIXEL_CLOCK_PARAMETERS_V6 v6;
};
-static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
+/* on DCE5, make sure the voltage is high enough to support the
+ * required disp clk.
+ */
+static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
+ u32 dispclk)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -698,9 +727,16 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
* SetPixelClock provides the dividers
*/
args.v5.ucCRTC = ATOM_CRTC_INVALID;
- args.v5.usPixelClock = rdev->clock.default_dispclk;
+ args.v5.usPixelClock = dispclk;
args.v5.ucPpll = ATOM_DCPLL;
break;
+ case 6:
+ /* if the default dcpll clock is specified,
+ * SetPixelClock provides the dividers
+ */
+ args.v6.ulDispEngClkFreq = dispclk;
+ args.v6.ucPpll = ATOM_DCPLL;
+ break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
@@ -784,6 +820,18 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v5.ucEncoderMode = encoder_mode;
args.v5.ucPpll = pll_id;
break;
+ case 6:
+ args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id;
+ args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
+ args.v6.ucRefDiv = ref_div;
+ args.v6.usFbDiv = cpu_to_le16(fb_div);
+ args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+ args.v6.ucPostDiv = post_div;
+ args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+ args.v6.ucTransmitterID = encoder_id;
+ args.v6.ucEncoderMode = encoder_mode;
+ args.v6.ucPpll = pll_id;
+ break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
@@ -1377,7 +1425,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
- atombios_crtc_set_dcpll(crtc);
+ /* XXX: DCE5, make sure voltage, dispclk is high enough */
+ atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7b337c361a1..7fe8ebdcdc0 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,6 +39,62 @@
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+
+void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ u32 tmp;
+
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+ WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+ /* Lock the graphics update lock */
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
+}
/* get temperature in millidegrees */
u32 evergreen_get_temp(struct radeon_device *rdev)
@@ -57,6 +113,14 @@ u32 evergreen_get_temp(struct radeon_device *rdev)
return actual_temp * 1000;
}
+u32 sumo_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
+ u32 actual_temp = (temp >> 1) & 0xff;
+
+ return actual_temp * 1000;
+}
+
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -337,16 +401,28 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
case 0:
case 4:
default:
- return 3840 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 4096 * 2;
+ else
+ return 3840 * 2;
case 1:
case 5:
- return 5760 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 6144 * 2;
+ else
+ return 5760 * 2;
case 2:
case 6:
- return 7680 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 8192 * 2;
+ else
+ return 7680 * 2;
case 3:
case 7:
- return 1920 * 2;
+ if (ASIC_IS_DCE5(rdev))
+ return 2048 * 2;
+ else
+ return 1920 * 2;
}
}
@@ -890,31 +966,39 @@ static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_sa
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
- save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
- save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
- save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
/* Stop all video */
WREG32(VGA_RENDER_CONTROL, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ }
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(D1VGA_CONTROL, 0);
WREG32(D2VGA_CONTROL, 0);
@@ -944,41 +1028,43 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
-
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
- upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
- (u32)rdev->mc.vram_start);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ }
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
@@ -994,22 +1080,28 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ }
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+ }
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
@@ -1057,11 +1149,17 @@ static void evergreen_mc_program(struct radeon_device *rdev)
rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+ if (rdev->flags & RADEON_IS_IGP) {
+ tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
+ tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
+ tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
+ WREG32(MC_FUS_VM_FB_OFFSET, tmp);
+ }
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
- WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
@@ -1285,11 +1383,15 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_REDWOOD:
+ case CHIP_PALM:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
force_no_swizzle = false;
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_JUNIPER:
+ case CHIP_BARTS:
default:
force_no_swizzle = true;
break;
@@ -1384,6 +1486,46 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
+static void evergreen_program_channel_remap(struct radeon_device *rdev)
+{
+ u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
+
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ default:
+ /* default mapping */
+ mc_shared_chremap = 0x00fac688;
+ break;
+ }
+
+ switch (rdev->family) {
+ case CHIP_HEMLOCK:
+ case CHIP_CYPRESS:
+ case CHIP_BARTS:
+ tcp_chan_steer_lo = 0x54763210;
+ tcp_chan_steer_hi = 0x0000ba98;
+ break;
+ case CHIP_JUNIPER:
+ case CHIP_REDWOOD:
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ default:
+ tcp_chan_steer_lo = 0x76543210;
+ tcp_chan_steer_hi = 0x0000ba98;
+ break;
+ }
+
+ WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
+ WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
+ WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
static void evergreen_gpu_init(struct radeon_device *rdev)
{
u32 cc_rb_backend_disable = 0;
@@ -1495,6 +1637,90 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
+ case CHIP_PALM:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_BARTS:
+ rdev->config.evergreen.num_ses = 2;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 8;
+ rdev->config.evergreen.max_simds = 7;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_TURKS:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 6;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_CAICOS:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
}
/* Initialize HDP */
@@ -1636,6 +1862,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
+ case CHIP_BARTS:
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
@@ -1687,6 +1914,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ evergreen_program_channel_remap(rdev);
+
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
@@ -1769,9 +1998,16 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
GS_PRIO(2) |
ES_PRIO(3));
- if (rdev->family == CHIP_CEDAR)
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_CAICOS:
/* no vertex cache */
sq_config &= ~VC_ENABLE;
+ break;
+ default:
+ break;
+ }
sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
@@ -1783,10 +2019,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
- if (rdev->family == CHIP_CEDAR)
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
ps_thread_count = 96;
- else
+ break;
+ default:
ps_thread_count = 128;
+ break;
+ }
sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
@@ -1817,10 +2058,16 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
FORCE_EOV_MAX_REZ_CNT(255)));
- if (rdev->family == CHIP_CEDAR)
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_PALM:
+ case CHIP_CAICOS:
vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
- else
+ break;
+ default:
vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+ break;
+ }
vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
@@ -1904,12 +2151,18 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
- /* size in MB on evergreen */
- rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
- rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* size in bytes on fusion */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+ } else {
+ /* size in MB on evergreen */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ }
rdev->mc.visible_vram_size = rdev->mc.aper_size;
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
- r600_vram_gtt_location(rdev, &rdev->mc);
+ r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
@@ -1917,8 +2170,30 @@ int evergreen_mc_init(struct radeon_device *rdev)
bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
- /* FIXME: implement for evergreen */
- return false;
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status_se0, grbm_status_se1;
+ struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
+ int r;
+
+ srbm_status = RREG32(SRBM_STATUS);
+ grbm_status = RREG32(GRBM_STATUS);
+ grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+ grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+ if (!(grbm_status & GUI_ACTIVE)) {
+ r100_gpu_lockup_update(lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2011,17 +2286,21 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2047,6 +2326,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
+ u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2072,27 +2352,33 @@ int evergreen_irq_set(struct radeon_device *rdev)
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
DRM_DEBUG("evergreen_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[2]) {
+ if (rdev->irq.crtc_vblank_int[2] ||
+ rdev->irq.pflip[2]) {
DRM_DEBUG("evergreen_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[3]) {
+ if (rdev->irq.crtc_vblank_int[3] ||
+ rdev->irq.pflip[3]) {
DRM_DEBUG("evergreen_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[4]) {
+ if (rdev->irq.crtc_vblank_int[4] ||
+ rdev->irq.pflip[4]) {
DRM_DEBUG("evergreen_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[5]) {
+ if (rdev->irq.crtc_vblank_int[5] ||
+ rdev->irq.pflip[5]) {
DRM_DEBUG("evergreen_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
@@ -2130,10 +2416,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
- WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
- WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
- WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
- WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+ }
+
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
@@ -2145,79 +2440,92 @@ int evergreen_irq_set(struct radeon_device *rdev)
return 0;
}
-static inline void evergreen_irq_ack(struct radeon_device *rdev,
- u32 *disp_int,
- u32 *disp_int_cont,
- u32 *disp_int_cont2,
- u32 *disp_int_cont3,
- u32 *disp_int_cont4,
- u32 *disp_int_cont5)
+static inline void evergreen_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
- *disp_int = RREG32(DISP_INTERRUPT_STATUS);
- *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
- *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
- *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
- *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
- *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
-
- if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+ rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
+ if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
- if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
- if (*disp_int & DC_HPD1_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD1_INT_CONTROL, tmp);
}
- if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD2_INT_CONTROL, tmp);
}
- if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD3_INT_CONTROL, tmp);
}
- if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp);
}
- if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
- if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -2226,14 +2534,10 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev,
void evergreen_irq_disable(struct radeon_device *rdev)
{
- u32 disp_int, disp_int_cont, disp_int_cont2;
- u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
-
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
mdelay(1);
- evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
- &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+ evergreen_irq_ack(rdev);
evergreen_disable_interrupt_state(rdev);
}
@@ -2273,8 +2577,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 rptr = rdev->ih.rptr;
u32 src_id, src_data;
u32 ring_index;
- u32 disp_int, disp_int_cont, disp_int_cont2;
- u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
unsigned long flags;
bool queue_hotplug = false;
@@ -2295,8 +2597,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
restart_ih:
/* display interrupts */
- evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
- &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+ evergreen_irq_ack(rdev);
rdev->ih.wptr = wptr;
while (rptr != wptr) {
@@ -2309,17 +2610,21 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (disp_int & LB_D1_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
break;
case 1: /* D1 vline */
- if (disp_int & LB_D1_VLINE_INTERRUPT) {
- disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n");
}
break;
@@ -2331,17 +2636,21 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
break;
case 1: /* D2 vline */
- if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
- disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n");
}
break;
@@ -2353,17 +2662,21 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[2])
+ radeon_crtc_handle_flip(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
break;
case 1: /* D3 vline */
- if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
- disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
DRM_DEBUG("IH: D3 vline\n");
}
break;
@@ -2375,17 +2688,21 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[3])
+ radeon_crtc_handle_flip(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
break;
case 1: /* D4 vline */
- if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
- disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
DRM_DEBUG("IH: D4 vline\n");
}
break;
@@ -2397,17 +2714,21 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[4])
+ radeon_crtc_handle_flip(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
break;
case 1: /* D5 vline */
- if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
- disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
DRM_DEBUG("IH: D5 vline\n");
}
break;
@@ -2419,17 +2740,21 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[5])
+ radeon_crtc_handle_flip(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
break;
case 1: /* D6 vline */
- if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
- disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
DRM_DEBUG("IH: D6 vline\n");
}
break;
@@ -2441,43 +2766,43 @@ restart_ih:
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (disp_int & DC_HPD1_INTERRUPT) {
- disp_int &= ~DC_HPD1_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n");
}
break;
case 1:
- if (disp_int_cont & DC_HPD2_INTERRUPT) {
- disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n");
}
break;
case 2:
- if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
- disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n");
}
break;
case 3:
- if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
- disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n");
}
break;
case 4:
- if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
- disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 5:
- if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
- disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
@@ -2516,7 +2841,7 @@ restart_ih:
if (wptr != rdev->ih.wptr)
goto restart_ih;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -2527,12 +2852,31 @@ static int evergreen_startup(struct radeon_device *rdev)
{
int r;
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
+ /* enable pcie gen2 link */
+ if (!ASIC_IS_DCE5(rdev))
+ evergreen_pcie_gen2_enable(rdev);
+
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ r = btc_mc_load_microcode(rdev);
if (r) {
- DRM_ERROR("Failed to load firmware!\n");
+ DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
}
evergreen_mc_program(rdev);
@@ -2551,6 +2895,11 @@ static int evergreen_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
+ /* XXX: ontario has problems blitting to gart at the moment */
+ if (rdev->family == CHIP_PALM) {
+ rdev->asic->copy = NULL;
+ rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -2658,12 +3007,16 @@ static bool evergreen_card_posted(struct radeon_device *rdev)
u32 reg;
/* first check CRTCs */
- reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (rdev->flags & RADEON_IS_IGP)
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ else
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
if (reg & EVERGREEN_CRTC_MASTER_EN)
return true;
@@ -2800,3 +3153,52 @@ void evergreen_fini(struct radeon_device *rdev)
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
}
+
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, speed_cntl;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index e0e590110dd..b758dc7f2f2 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -147,7 +147,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
- if (rdev->family == CHIP_CEDAR)
+ if ((rdev->family == CHIP_CEDAR) ||
+ (rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_CAICOS))
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, 48, gpu_addr);
else
@@ -331,9 +333,95 @@ set_default_state(struct radeon_device *rdev)
num_hs_stack_entries = 85;
num_ls_stack_entries = 85;
break;
+ case CHIP_PALM:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 96;
+ num_vs_threads = 16;
+ num_gs_threads = 16;
+ num_es_threads = 16;
+ num_hs_threads = 16;
+ num_ls_threads = 16;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_BARTS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 85;
+ num_vs_stack_entries = 85;
+ num_gs_stack_entries = 85;
+ num_es_stack_entries = 85;
+ num_hs_stack_entries = 85;
+ num_ls_stack_entries = 85;
+ break;
+ case CHIP_TURKS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 20;
+ num_gs_threads = 20;
+ num_es_threads = 20;
+ num_hs_threads = 20;
+ num_ls_threads = 20;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
+ case CHIP_CAICOS:
+ num_ps_gprs = 93;
+ num_vs_gprs = 46;
+ num_temp_gprs = 4;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_hs_gprs = 23;
+ num_ls_gprs = 23;
+ num_ps_threads = 128;
+ num_vs_threads = 10;
+ num_gs_threads = 10;
+ num_es_threads = 10;
+ num_hs_threads = 10;
+ num_ls_threads = 10;
+ num_ps_stack_entries = 42;
+ num_vs_stack_entries = 42;
+ num_gs_stack_entries = 42;
+ num_es_stack_entries = 42;
+ num_hs_stack_entries = 42;
+ num_ls_stack_entries = 42;
+ break;
}
- if (rdev->family == CHIP_CEDAR)
+ if ((rdev->family == CHIP_CEDAR) ||
+ (rdev->family == CHIP_PALM) ||
+ (rdev->family == CHIP_CAICOS))
sq_config = 0;
else
sq_config = VC_ENABLE;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 2330f3a36fd..c781c92c345 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -105,6 +105,11 @@
#define EVERGREEN_GRPH_Y_START 0x6830
#define EVERGREEN_GRPH_X_END 0x6834
#define EVERGREEN_GRPH_Y_END 0x6838
+#define EVERGREEN_GRPH_UPDATE 0x6844
+# define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
+# define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL 0x6848
+# define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
#define EVERGREEN_CUR_CONTROL 0x6998
@@ -178,6 +183,7 @@
# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define EVERGREEN_CRTC_STATUS 0x6e8c
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a73b53c4435..36d32d83d86 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -164,11 +164,13 @@
#define SE_SC_BUSY (1 << 29)
#define SE_DB_BUSY (1 << 30)
#define SE_CB_BUSY (1 << 31)
-
+/* evergreen */
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x7FF0000
#define ASIC_T_SHIFT 16
+/* APU */
+#define CG_THERMAL_STATUS 0x678
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
@@ -181,6 +183,7 @@
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
@@ -200,6 +203,7 @@
#define MC_VM_AGP_BOT 0x202C
#define MC_VM_AGP_BASE 0x2030
#define MC_VM_FB_LOCATION 0x2024
+#define MC_FUS_VM_FB_OFFSET 0x2898
#define MC_VM_MB_L1_TLB0_CNTL 0x2234
#define MC_VM_MB_L1_TLB1_CNTL 0x2238
#define MC_VM_MB_L1_TLB2_CNTL 0x223C
@@ -349,6 +353,9 @@
#define SYNC_WALKER (1 << 25)
#define SYNC_ALIGNER (1 << 26)
+#define TCP_CHAN_STEER_LO 0x960c
+#define TCP_CHAN_STEER_HI 0x9610
+
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x) << 0)
#define VC_ONLY 0
@@ -574,6 +581,44 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
/*
* PM4
*/
@@ -603,7 +648,7 @@
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11
#define PACKET3_CLEAR_STATE 0x12
-#define PACKET3_INDIRECT_BUFFER_SIZE 0x13
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_INDIRECT_BUFFER_END 0x17
@@ -644,14 +689,14 @@
# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
-# define PACKET3_CB11_DEST_BASE_ENA (1 << 17)
+# define PACKET3_CB11_DEST_BASE_ENA (1 << 18)
# define PACKET3_FULL_CACHE_ENA (1 << 20)
# define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_VC_ACTION_ENA (1 << 24)
# define PACKET3_CB_ACTION_ENA (1 << 25)
# define PACKET3_DB_ACTION_ENA (1 << 26)
# define PACKET3_SH_ACTION_ENA (1 << 27)
-# define PACKET3_SMX_ACTION_ENA (1 << 28)
+# define PACKET3_SX_ACTION_ENA (1 << 28)
#define PACKET3_ME_INITIALIZE 0x44
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
new file mode 100644
index 00000000000..5e0bef80ad7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_drm.h"
+#include "nid.h"
+#include "atom.h"
+#include "ni_reg.h"
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define BTC_MC_UCODE_SIZE 6024
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
+MODULE_FIRMWARE("radeon/BARTS_me.bin");
+MODULE_FIRMWARE("radeon/BARTS_mc.bin");
+MODULE_FIRMWARE("radeon/BTC_rlc.bin");
+MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
+MODULE_FIRMWARE("radeon/TURKS_me.bin");
+MODULE_FIRMWARE("radeon/TURKS_mc.bin");
+MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
+MODULE_FIRMWARE("radeon/CAICOS_me.bin");
+MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+
+#define BTC_IO_MC_REGS_SIZE 29
+
+static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00946a00}
+};
+
+static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00936a00}
+};
+
+static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00916a00}
+};
+
+int btc_mc_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ u32 mem_type, running, blackout = 0;
+ u32 *io_mc_regs;
+ int i;
+
+ if (!rdev->mc_fw)
+ return -EINVAL;
+
+ switch (rdev->family) {
+ case CHIP_BARTS:
+ io_mc_regs = (u32 *)&barts_io_mc_regs;
+ break;
+ case CHIP_TURKS:
+ io_mc_regs = (u32 *)&turks_io_mc_regs;
+ break;
+ case CHIP_CAICOS:
+ default:
+ io_mc_regs = (u32 *)&caicos_io_mc_regs;
+ break;
+ }
+
+ mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
+ running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+ if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
+ if (running) {
+ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
+ }
+
+ /* reset the engine and set to writable */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+ /* load mc io regs */
+ for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
+ /* load the MC ucode */
+ fw_data = (const __be32 *)rdev->mc_fw->data;
+ for (i = 0; i < BTC_MC_UCODE_SIZE; i++)
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+ /* put the engine back into the active state */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+ /* wait for training to complete */
+ while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
+ udelay(10);
+
+ if (running)
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+ }
+
+ return 0;
+}
+
+int ni_init_microcode(struct radeon_device *rdev)
+{
+ struct platform_device *pdev;
+ const char *chip_name;
+ const char *rlc_chip_name;
+ size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+
+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+ err = IS_ERR(pdev);
+ if (err) {
+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+ return -EINVAL;
+ }
+
+ switch (rdev->family) {
+ case CHIP_BARTS:
+ chip_name = "BARTS";
+ rlc_chip_name = "BTC";
+ break;
+ case CHIP_TURKS:
+ chip_name = "TURKS";
+ rlc_chip_name = "BTC";
+ break;
+ case CHIP_CAICOS:
+ chip_name = "CAICOS";
+ rlc_chip_name = "BTC";
+ break;
+ default: BUG();
+ }
+
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
+
+ DRM_INFO("Loading %s Microcode\n", chip_name);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->mc_fw->size != mc_req_size) {
+ printk(KERN_ERR
+ "ni_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+out:
+ platform_device_unregister(pdev);
+
+ if (err) {
+ if (err != -EINVAL)
+ printk(KERN_ERR
+ "ni_cp: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->pfp_fw);
+ rdev->pfp_fw = NULL;
+ release_firmware(rdev->me_fw);
+ rdev->me_fw = NULL;
+ release_firmware(rdev->rlc_fw);
+ rdev->rlc_fw = NULL;
+ release_firmware(rdev->mc_fw);
+ rdev->mc_fw = NULL;
+ }
+ return err;
+}
+
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
new file mode 100644
index 00000000000..5db7b7d6feb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __NI_REG_H__
+#define __NI_REG_H__
+
+/* northern islands - DCE5 */
+
+#define NI_INPUT_GAMMA_CONTROL 0x6840
+# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_GAMMA_USE_LUT 0
+# define NI_INPUT_GAMMA_BYPASS 1
+# define NI_INPUT_GAMMA_SRGB_24 2
+# define NI_INPUT_GAMMA_XVYCC_222 3
+# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_PRESCALE_GRPH_CONTROL 0x68b4
+# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL 0x68c4
+# define NI_OVL_PRESCALE_BYPASS (1 << 4)
+
+#define NI_INPUT_CSC_CONTROL 0x68d4
+# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_CSC_BYPASS 0
+# define NI_INPUT_CSC_PROG_COEFF 1
+# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
+# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL 0x68f0
+# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
+# define NI_OUTPUT_CSC_BYPASS 0
+# define NI_OUTPUT_CSC_TV_RGB 1
+# define NI_OUTPUT_CSC_YCBCR_601 2
+# define NI_OUTPUT_CSC_YCBCR_709 3
+# define NI_OUTPUT_CSC_PROG_COEFF 4
+# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
+# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL 0x6960
+# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_DEGAMMA_BYPASS 0
+# define NI_DEGAMMA_SRGB_24 1
+# define NI_DEGAMMA_XVYCC_222 2
+# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
+# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
+# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL 0x6964
+# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
+# define NI_GAMUT_REMAP_BYPASS 0
+# define NI_GAMUT_REMAP_PROG_COEFF 1
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
+# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL 0x6a80
+# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
+# define NI_REGAMMA_BYPASS 0
+# define NI_REGAMMA_SRGB_24 1
+# define NI_REGAMMA_XVYCC_222 2
+# define NI_REGAMMA_PROG_A 3
+# define NI_REGAMMA_PROG_B 4
+# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
new file mode 100644
index 00000000000..f7b445390e0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef NI_H
+#define NI_H
+
+#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define MC_SEQ_SUP_CNTL 0x28c8
+#define RUN_MASK (1 << 0)
+#define MC_SEQ_SUP_PGM 0x28cc
+#define MC_IO_PAD_CNTL_D0 0x29d0
+#define MEM_FALL_OUT_CMD (1 << 8)
+#define MC_SEQ_MISC0 0x2a00
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
+#define MC_SEQ_IO_DEBUG_DATA 0x2a48
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8e10aa9f74b..f637595b14e 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -68,6 +68,56 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
+void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ u32 tmp;
+
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
+ tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
+ /* make sure pending bit is asserted */
+ tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
+ WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen as late as possible in the vblank interval.
+ * same field for crtc1/2
+ */
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void r100_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+
+ /* Lock the graphics update lock */
+ /* update the scanout addresses */
+ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
+}
+
void r100_pm_get_dynpm_state(struct radeon_device *rdev)
{
int i;
@@ -526,10 +576,12 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.gui_idle) {
tmp |= RADEON_GUI_IDLE_MASK;
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
tmp |= RADEON_CRTC2_VBLANK_MASK;
}
if (rdev->irq.hpd[0]) {
@@ -600,14 +652,22 @@ int r100_irq_process(struct radeon_device *rdev)
}
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
}
if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true;
@@ -622,7 +682,7 @@ int r100_irq_process(struct radeon_device *rdev)
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS400:
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index b121b6c678d..eab91760fae 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -551,7 +551,7 @@
#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31)
#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1)
#define C_000360_CUR2_LOCK 0x7FFFFFFF
-#define R_0003C2_GENMO_WT 0x0003C0
+#define R_0003C2_GENMO_WT 0x0003C2
#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0)
#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1)
#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index cde1d3480d9..fae5e709f27 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -558,10 +558,7 @@ int rv370_get_pcie_lanes(struct radeon_device *rdev)
/* FIXME wait for idle */
- if (rdev->family < CHIP_R600)
- link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
- else
- link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
case RADEON_PCIE_LC_LINK_WIDTH_X0:
@@ -745,6 +742,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4E00:
/* RB3D_CCTL */
+ if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
+ p->rdev->cmask_filp != p->filp) {
+ DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+ return -EINVAL;
+ }
track->num_cb = ((idx_value >> 5) & 0x3) + 1;
break;
case 0x4E38:
@@ -787,6 +789,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 15:
track->cb[i].cpp = 2;
break;
+ case 5:
+ if (p->rdev->family < CHIP_RV515) {
+ DRM_ERROR("Invalid color buffer format (%d)!\n",
+ ((idx_value >> 21) & 0xF));
+ return -EINVAL;
+ }
+ /* Pass through. */
case 6:
track->cb[i].cpp = 4;
break;
@@ -1199,6 +1208,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
if (p->rdev->hyperz_filp != p->filp)
return -EINVAL;
break;
+ case PACKET3_3D_CLEAR_CMASK:
+ if (p->rdev->cmask_filp != p->filp)
+ return -EINVAL;
+ break;
case PACKET3_NOP:
break;
default:
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 0c036c60d9d..1f519a5ffb8 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -54,6 +54,7 @@
#define PACKET3_3D_DRAW_IMMD_2 0x35
#define PACKET3_3D_DRAW_INDX_2 0x36
#define PACKET3_3D_CLEAR_HIZ 0x37
+#define PACKET3_3D_CLEAR_CMASK 0x38
#define PACKET3_BITBLT_MULTI 0x9B
#define PACKET0(reg, n) (CP_PACKET0 | \
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 6ac1f604e29..fc437059918 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,8 @@
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
+#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
+
/* master controls */
#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
@@ -409,8 +411,10 @@
#define AVIVO_D1GRPH_X_END 0x6134
#define AVIVO_D1GRPH_Y_END 0x6138
#define AVIVO_D1GRPH_UPDATE 0x6144
+# define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2)
# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
+# define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
#define AVIVO_D1CUR_CONTROL 0x6400
# define AVIVO_D1CURSOR_EN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9c92db7c896..6b50716267c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -83,6 +83,9 @@ MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
+MODULE_FIRMWARE("radeon/PALM_pfp.bin");
+MODULE_FIRMWARE("radeon/PALM_me.bin");
+MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -91,6 +94,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
+static void r600_pcie_gen2_enable(struct radeon_device *rdev);
/* get temperature in millidegrees */
u32 rv6xx_get_temp(struct radeon_device *rdev)
@@ -1164,7 +1168,7 @@ static void r600_mc_program(struct radeon_device *rdev)
* Note: GTT start, end, size should be initialized before calling this
* function on AGP platform.
*/
-void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_bf, size_af;
@@ -2009,6 +2013,10 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "CYPRESS";
rlc_chip_name = "CYPRESS";
break;
+ case CHIP_PALM:
+ chip_name = "PALM";
+ rlc_chip_name = "SUMO";
+ break;
default: BUG();
}
@@ -2372,6 +2380,9 @@ int r600_startup(struct radeon_device *rdev)
{
int r;
+ /* enable pcie gen2 link */
+ r600_pcie_gen2_enable(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -2874,6 +2885,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(DxMODE_INT_MASK, 0);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
if (ASIC_IS_DCE3(rdev)) {
WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2998,6 +3011,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 hdmi1, hdmi2;
+ u32 d1grph = 0, d2grph = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3034,11 +3048,13 @@ int r600_irq_set(struct radeon_device *rdev)
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
mode_int |= D1MODE_VBLANK_INT_MASK;
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
DRM_DEBUG("r600_irq_set: vblank 1\n");
mode_int |= D2MODE_VBLANK_INT_MASK;
}
@@ -3081,6 +3097,8 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
@@ -3103,32 +3121,35 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
-static inline void r600_irq_ack(struct radeon_device *rdev,
- u32 *disp_int,
- u32 *disp_int_cont,
- u32 *disp_int_cont2)
+static inline void r600_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
if (ASIC_IS_DCE3(rdev)) {
- *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
- *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
- *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+ rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
} else {
- *disp_int = RREG32(DISP_INTERRUPT_STATUS);
- *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
- *disp_int_cont2 = 0;
- }
-
- if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
+ }
+ rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
+ rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
+
+ if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+ WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+ WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
- if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
- if (*disp_int & LB_D2_VBLANK_INTERRUPT)
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
- if (*disp_int & LB_D2_VLINE_INTERRUPT)
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
- if (*disp_int & DC_HPD1_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
@@ -3139,7 +3160,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
}
}
- if (*disp_int & DC_HPD2_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
@@ -3150,7 +3171,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
}
}
- if (*disp_int_cont & DC_HPD3_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
@@ -3161,18 +3182,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
}
}
- if (*disp_int_cont & DC_HPD4_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp);
}
if (ASIC_IS_DCE32(rdev)) {
- if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
- if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -3194,12 +3215,10 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
void r600_irq_disable(struct radeon_device *rdev)
{
- u32 disp_int, disp_int_cont, disp_int_cont2;
-
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
mdelay(1);
- r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+ r600_irq_ack(rdev);
r600_disable_interrupt_state(rdev);
}
@@ -3262,7 +3281,7 @@ int r600_irq_process(struct radeon_device *rdev)
u32 wptr = r600_get_ih_wptr(rdev);
u32 rptr = rdev->ih.rptr;
u32 src_id, src_data;
- u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
+ u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
@@ -3283,7 +3302,7 @@ int r600_irq_process(struct radeon_device *rdev)
restart_ih:
/* display interrupts */
- r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+ r600_irq_ack(rdev);
rdev->ih.wptr = wptr;
while (rptr != wptr) {
@@ -3296,17 +3315,21 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (disp_int & LB_D1_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
break;
case 1: /* D1 vline */
- if (disp_int & LB_D1_VLINE_INTERRUPT) {
- disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n");
}
break;
@@ -3318,17 +3341,21 @@ restart_ih:
case 5: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (disp_int & LB_D2_VBLANK_INTERRUPT) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
- disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
break;
case 1: /* D1 vline */
- if (disp_int & LB_D2_VLINE_INTERRUPT) {
- disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n");
}
break;
@@ -3340,43 +3367,43 @@ restart_ih:
case 19: /* HPD/DAC hotplug */
switch (src_data) {
case 0:
- if (disp_int & DC_HPD1_INTERRUPT) {
- disp_int &= ~DC_HPD1_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n");
}
break;
case 1:
- if (disp_int & DC_HPD2_INTERRUPT) {
- disp_int &= ~DC_HPD2_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n");
}
break;
case 4:
- if (disp_int_cont & DC_HPD3_INTERRUPT) {
- disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n");
}
break;
case 5:
- if (disp_int_cont & DC_HPD4_INTERRUPT) {
- disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n");
}
break;
case 10:
- if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
- disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 12:
- if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
- disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
@@ -3419,7 +3446,7 @@ restart_ih:
if (wptr != rdev->ih.wptr)
goto restart_ih;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -3508,3 +3535,219 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
} else
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
}
+
+void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+ u32 link_width_cntl, mask, target_reg;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ /* FIXME wait for idle */
+
+ switch (lanes) {
+ case 0:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+ break;
+ case 1:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+ break;
+ case 2:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+ break;
+ case 4:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+ break;
+ case 8:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+ break;
+ case 12:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+ break;
+ case 16:
+ default:
+ mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+ break;
+ }
+
+ link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+ (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+ return;
+
+ if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
+ return;
+
+ link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+ RADEON_PCIE_LC_RECONFIG_NOW |
+ R600_PCIE_LC_RENEGOTIATE_EN |
+ R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= mask;
+
+ WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+ /* some northbridges can renegotiate the link rather than requiring
+ * a complete re-config.
+ * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
+ */
+ if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
+ link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
+ else
+ link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
+
+ WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+ RADEON_PCIE_LC_RECONFIG_NOW));
+
+ if (rdev->family >= CHIP_RV770)
+ target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
+ else
+ target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
+
+ /* wait for lane set to complete */
+ link_width_cntl = RREG32(target_reg);
+ while (link_width_cntl == 0xffffffff)
+ link_width_cntl = RREG32(target_reg);
+
+}
+
+int r600_get_pcie_lanes(struct radeon_device *rdev)
+{
+ u32 link_width_cntl;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return 0;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return 0;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return 0;
+
+ /* FIXME wait for idle */
+
+ link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ return 0;
+ case RADEON_PCIE_LC_LINK_WIDTH_X1:
+ return 1;
+ case RADEON_PCIE_LC_LINK_WIDTH_X2:
+ return 2;
+ case RADEON_PCIE_LC_LINK_WIDTH_X4:
+ return 4;
+ case RADEON_PCIE_LC_LINK_WIDTH_X8:
+ return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
+static void r600_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
+ u16 link_cntl2;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ /* only RV6xx+ chips are supported */
+ if (rdev->family <= CHIP_R600)
+ return;
+
+ /* 55 nm r6xx asics */
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ /* advertise upconfig capability */
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+ lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+ link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+ LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ } else {
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+ }
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ /* 55 nm r6xx asics */
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ WREG32(MM_CFGREGS_CNTL, 0x8);
+ link_cntl2 = RREG32(0x4088);
+ WREG32(MM_CFGREGS_CNTL, 0);
+ /* not supported yet */
+ if (link_cntl2 & SELECTABLE_DEEMPHASIS)
+ return;
+ }
+
+ speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
+ speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
+ speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
+ speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
+ speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ tmp = RREG32(0x541c);
+ WREG32(0x541c, tmp | 0x8);
+ WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+ link_cntl2 = RREG16(0x4088);
+ link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+ link_cntl2 |= 0x2;
+ WREG16(0x4088, link_cntl2);
+ WREG32(MM_CFGREGS_CNTL, 0);
+
+ if ((rdev->family == CHIP_RV670) ||
+ (rdev->family == CHIP_RV620) ||
+ (rdev->family == CHIP_RV635)) {
+ training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
+ training_cntl &= ~LC_POINT_7_PLUS_EN;
+ WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
+ } else {
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+ }
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bff4dc4f410..a5d898b4bad 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -728,6 +728,54 @@
/* DCE 3.2 */
# define DC_HPDx_EN (1 << 28)
+#define D1GRPH_INTERRUPT_STATUS 0x6158
+#define D2GRPH_INTERRUPT_STATUS 0x6958
+# define DxGRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define DxGRPH_PFLIP_INT_CLEAR (1 << 8)
+#define D1GRPH_INTERRUPT_CONTROL 0x615c
+#define D2GRPH_INTERRUPT_CONTROL 0x695c
+# define DxGRPH_PFLIP_INT_MASK (1 << 0)
+# define DxGRPH_PFLIP_INT_TYPE (1 << 8)
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+# define LC_POINT_7_PLUS_EN (1 << 6)
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
/*
* PM4
*/
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3a7095743d4..e9486630a46 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -69,6 +69,7 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+#include <ttm/ttm_execbuf_util.h>
#include "radeon_family.h"
#include "radeon_mode.h"
@@ -180,6 +181,7 @@ void rs690_pm_info(struct radeon_device *rdev);
extern u32 rv6xx_get_temp(struct radeon_device *rdev);
extern u32 rv770_get_temp(struct radeon_device *rdev);
extern u32 evergreen_get_temp(struct radeon_device *rdev);
+extern u32 sumo_get_temp(struct radeon_device *rdev);
/*
* Fences.
@@ -259,13 +261,12 @@ struct radeon_bo {
};
struct radeon_bo_list {
- struct list_head list;
+ struct ttm_validate_buffer tv;
struct radeon_bo *bo;
uint64_t gpu_offset;
unsigned rdomain;
unsigned wdomain;
u32 tiling_flags;
- bool reserved;
};
/*
@@ -377,11 +378,56 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
/*
* IRQS.
*/
+
+struct radeon_unpin_work {
+ struct work_struct work;
+ struct radeon_device *rdev;
+ int crtc_id;
+ struct radeon_fence *fence;
+ struct drm_pending_vblank_event *event;
+ struct radeon_bo *old_rbo;
+ u64 new_crtc_base;
+};
+
+struct r500_irq_stat_regs {
+ u32 disp_int;
+};
+
+struct r600_irq_stat_regs {
+ u32 disp_int;
+ u32 disp_int_cont;
+ u32 disp_int_cont2;
+ u32 d1grph_int;
+ u32 d2grph_int;
+};
+
+struct evergreen_irq_stat_regs {
+ u32 disp_int;
+ u32 disp_int_cont;
+ u32 disp_int_cont2;
+ u32 disp_int_cont3;
+ u32 disp_int_cont4;
+ u32 disp_int_cont5;
+ u32 d1grph_int;
+ u32 d2grph_int;
+ u32 d3grph_int;
+ u32 d4grph_int;
+ u32 d5grph_int;
+ u32 d6grph_int;
+};
+
+union radeon_irq_stat_regs {
+ struct r500_irq_stat_regs r500;
+ struct r600_irq_stat_regs r600;
+ struct evergreen_irq_stat_regs evergreen;
+};
+
struct radeon_irq {
bool installed;
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[6];
+ bool pflip[6];
wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
@@ -392,12 +438,17 @@ struct radeon_irq {
bool hdmi[2];
spinlock_t sw_lock;
int sw_refcount;
+ union radeon_irq_stat_regs stat_regs;
+ spinlock_t pflip_lock[6];
+ int pflip_refcount[6];
};
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
/*
* CP & ring.
@@ -687,6 +738,8 @@ enum radeon_int_thermal_type {
THERMAL_TYPE_RV6XX,
THERMAL_TYPE_RV770,
THERMAL_TYPE_EVERGREEN,
+ THERMAL_TYPE_SUMO,
+ THERMAL_TYPE_NI,
};
struct radeon_voltage {
@@ -770,6 +823,9 @@ struct radeon_pm {
u32 current_sclk;
u32 current_mclk;
u32 current_vddc;
+ u32 default_sclk;
+ u32 default_mclk;
+ u32 default_vddc;
struct radeon_i2c_chan *i2c_bus;
/* selected pm method */
enum radeon_pm_method pm_method;
@@ -881,6 +937,10 @@ struct radeon_asic {
void (*pm_finish)(struct radeon_device *rdev);
void (*pm_init_profile)(struct radeon_device *rdev);
void (*pm_get_dynpm_state)(struct radeon_device *rdev);
+ /* pageflipping */
+ void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+ u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ void (*post_page_flip)(struct radeon_device *rdev, int crtc);
};
/*
@@ -975,6 +1035,7 @@ struct evergreen_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
+ struct r100_gpu_lockup lockup;
};
union radeon_asic_config {
@@ -1091,11 +1152,11 @@ struct radeon_device {
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
+ const struct firmware *mc_fw; /* NI MC firmware */
struct r600_blit r600_blit;
struct r700_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
- struct workqueue_struct *wq;
struct work_struct hotplug_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
@@ -1110,10 +1171,10 @@ struct radeon_device {
uint8_t audio_status_bits;
uint8_t audio_category_code;
- bool powered_down;
struct notifier_block acpi_nb;
- /* only one userspace can use Hyperz features at a time */
+ /* only one userspace can use Hyperz features or CMASK at a time */
struct drm_file *hyperz_filp;
+ struct drm_file *cmask_filp;
/* i2c buses */
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
};
@@ -1188,6 +1249,8 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
*/
#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
+#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
+#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
#define RREG32(reg) r100_mm_rreg(rdev, (reg))
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
@@ -1261,6 +1324,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
(rdev->family == CHIP_RV410) || \
(rdev->family == CHIP_RS400) || \
(rdev->family == CHIP_RS480))
+#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
+ (rdev->ddev->pdev->device == 0x9443) || \
+ (rdev->ddev->pdev->device == 0x944B) || \
+ (rdev->ddev->pdev->device == 0x9506) || \
+ (rdev->ddev->pdev->device == 0x9509) || \
+ (rdev->ddev->pdev->device == 0x950F) || \
+ (rdev->ddev->pdev->device == 0x689C) || \
+ (rdev->ddev->pdev->device == 0x689D))
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
(rdev->family == CHIP_RS690) || \
@@ -1269,6 +1340,9 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
+#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
+ (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
/*
* BIOS helpers.
@@ -1344,6 +1418,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
+#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
/* Common functions */
/* AGP */
@@ -1372,67 +1449,7 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
-/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
-extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
-extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
-
-/* rv200,rv250,rv280 */
-extern void r200_set_safe_registers(struct radeon_device *rdev);
-
-/* r300,r350,rv350,rv370,rv380 */
-extern void r300_set_reg_safe(struct radeon_device *rdev);
-extern void r300_mc_program(struct radeon_device *rdev);
-extern void r300_mc_init(struct radeon_device *rdev);
-extern void r300_clock_startup(struct radeon_device *rdev);
-extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
-extern int rv370_pcie_gart_init(struct radeon_device *rdev);
-extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
-extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
-extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
-
-/* r420,r423,rv410 */
-extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
-extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
-extern void r420_pipes_init(struct radeon_device *rdev);
-
-/* rv515 */
-struct rv515_mc_save {
- u32 d1vga_control;
- u32 d2vga_control;
- u32 vga_render_control;
- u32 vga_hdp_control;
- u32 d1crtc_control;
- u32 d2crtc_control;
-};
-extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
-extern void rv515_vga_render_disable(struct radeon_device *rdev);
-extern void rv515_set_safe_registers(struct radeon_device *rdev);
-extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
-extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
-extern void rv515_clock_startup(struct radeon_device *rdev);
-extern void rv515_debugfs(struct radeon_device *rdev);
-extern int rv515_suspend(struct radeon_device *rdev);
-
-/* rs400 */
-extern int rs400_gart_init(struct radeon_device *rdev);
-extern int rs400_gart_enable(struct radeon_device *rdev);
-extern void rs400_gart_adjust_size(struct radeon_device *rdev);
-extern void rs400_gart_disable(struct radeon_device *rdev);
-extern void rs400_gart_fini(struct radeon_device *rdev);
-
-/* rs600 */
-extern void rs600_set_safe_registers(struct radeon_device *rdev);
-extern int rs600_irq_set(struct radeon_device *rdev);
-extern void rs600_irq_disable(struct radeon_device *rdev);
-
-/* rs690, rs740 */
-extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
- struct drm_display_mode *mode1,
- struct drm_display_mode *mode2);
-
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
-extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
extern int r600_cp_start(struct radeon_device *rdev);
@@ -1478,6 +1495,7 @@ extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mo
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern void r700_cp_stop(struct radeon_device *rdev);
extern void r700_cp_fini(struct radeon_device *rdev);
extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
@@ -1485,6 +1503,9 @@ extern int evergreen_irq_set(struct radeon_device *rdev);
extern int evergreen_blit_init(struct radeon_device *rdev);
extern void evergreen_blit_fini(struct radeon_device *rdev);
+extern int ni_init_microcode(struct radeon_device *rdev);
+extern int btc_mc_load_microcode(struct radeon_device *rdev);
+
/* radeon_acpi.c */
#if defined(CONFIG_ACPI)
extern int radeon_acpi_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 64fb89ecbf7..3a1b1618622 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
- if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
+ if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
@@ -171,6 +171,9 @@ static struct radeon_asic r100_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r200_asic = {
@@ -215,6 +218,9 @@ static struct radeon_asic r200_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r300_asic = {
@@ -260,6 +266,9 @@ static struct radeon_asic r300_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r300_asic_pcie = {
@@ -304,6 +313,9 @@ static struct radeon_asic r300_asic_pcie = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic r420_asic = {
@@ -349,6 +361,9 @@ static struct radeon_asic r420_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic rs400_asic = {
@@ -394,6 +409,9 @@ static struct radeon_asic rs400_asic = {
.pm_finish = &r100_pm_finish,
.pm_init_profile = &r100_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &r100_pre_page_flip,
+ .page_flip = &r100_page_flip,
+ .post_page_flip = &r100_post_page_flip,
};
static struct radeon_asic rs600_asic = {
@@ -439,6 +457,9 @@ static struct radeon_asic rs600_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rs690_asic = {
@@ -484,6 +505,9 @@ static struct radeon_asic rs690_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rv515_asic = {
@@ -529,6 +553,9 @@ static struct radeon_asic rv515_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic r520_asic = {
@@ -574,6 +601,9 @@ static struct radeon_asic r520_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r420_pm_init_profile,
.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic r600_asic = {
@@ -601,8 +631,8 @@ static struct radeon_asic r600_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
@@ -618,6 +648,9 @@ static struct radeon_asic r600_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rs780_asic = {
@@ -662,6 +695,9 @@ static struct radeon_asic rs780_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &rs780_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rs600_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic rv770_asic = {
@@ -689,8 +725,8 @@ static struct radeon_asic rv770_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = NULL,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
@@ -706,6 +742,9 @@ static struct radeon_asic rv770_asic = {
.pm_finish = &rs600_pm_finish,
.pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &rs600_pre_page_flip,
+ .page_flip = &rv770_page_flip,
+ .post_page_flip = &rs600_post_page_flip,
};
static struct radeon_asic evergreen_asic = {
@@ -733,6 +772,95 @@ static struct radeon_asic evergreen_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &r600_get_pcie_lanes,
+ .set_pcie_lanes = &r600_set_pcie_lanes,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+};
+
+static struct radeon_asic sumo_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &evergreen_gpu_is_lockup,
+ .asic_reset = &evergreen_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &rs780_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+};
+
+static struct radeon_asic btc_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &evergreen_gpu_is_lockup,
+ .asic_reset = &evergreen_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = &evergreen_copy_blit,
+ .copy_dma = &evergreen_copy_blit,
+ .copy = &evergreen_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
@@ -749,6 +877,9 @@ static struct radeon_asic evergreen_asic = {
.pm_finish = &evergreen_pm_finish,
.pm_init_profile = &r600_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
};
int radeon_asic_init(struct radeon_device *rdev)
@@ -835,6 +966,14 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_HEMLOCK:
rdev->asic = &evergreen_asic;
break;
+ case CHIP_PALM:
+ rdev->asic = &sumo_asic;
+ break;
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ rdev->asic = &btc_asic;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -849,7 +988,9 @@ int radeon_asic_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_SINGLE_CRTC)
rdev->num_crtc = 1;
else {
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE41(rdev))
+ rdev->num_crtc = 2;
+ else if (ASIC_IS_DCE4(rdev))
rdev->num_crtc = 6;
else
rdev->num_crtc = 2;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 74098824414..e01f0771853 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -102,6 +102,11 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
+ struct radeon_cp *cp);
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
+ struct r100_gpu_lockup *lockup,
+ struct radeon_cp *cp);
void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_init(struct radeon_device *rdev);
void r100_irq_disable(struct radeon_device *rdev);
@@ -130,15 +135,19 @@ extern void r100_pm_prepare(struct radeon_device *rdev);
extern void r100_pm_finish(struct radeon_device *rdev);
extern void r100_pm_init_profile(struct radeon_device *rdev);
extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
/*
* r200,rv250,rs300,rv280
*/
extern int r200_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
struct radeon_fence *fence);
+void r200_set_safe_registers(struct radeon_device *rdev);
/*
* r300,r350,rv350,rv380
@@ -159,6 +168,15 @@ extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+extern void r300_set_reg_safe(struct radeon_device *rdev);
+extern void r300_mc_program(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
+extern void r300_clock_startup(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+extern int rv370_pcie_gart_init(struct radeon_device *rdev);
+extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
+extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
+extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
/*
* r420,r423,rv410
@@ -168,6 +186,10 @@ extern void r420_fini(struct radeon_device *rdev);
extern int r420_suspend(struct radeon_device *rdev);
extern int r420_resume(struct radeon_device *rdev);
extern void r420_pm_init_profile(struct radeon_device *rdev);
+extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
+extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+extern void r420_pipes_init(struct radeon_device *rdev);
/*
* rs400,rs480
@@ -180,6 +202,12 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev);
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int rs400_gart_init(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_adjust_size(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+void rs400_gart_fini(struct radeon_device *rdev);
+
/*
* rs600.
@@ -191,6 +219,7 @@ extern int rs600_suspend(struct radeon_device *rdev);
extern int rs600_resume(struct radeon_device *rdev);
int rs600_irq_set(struct radeon_device *rdev);
int rs600_irq_process(struct radeon_device *rdev);
+void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -205,6 +234,11 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev,
extern void rs600_pm_misc(struct radeon_device *rdev);
extern void rs600_pm_prepare(struct radeon_device *rdev);
extern void rs600_pm_finish(struct radeon_device *rdev);
+extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
+void rs600_set_safe_registers(struct radeon_device *rdev);
+
/*
* rs690,rs740
@@ -216,10 +250,21 @@ int rs690_suspend(struct radeon_device *rdev);
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs690_bandwidth_update(struct radeon_device *rdev);
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+ struct drm_display_mode *mode1,
+ struct drm_display_mode *mode2);
/*
* rv515
*/
+struct rv515_mc_save {
+ u32 d1vga_control;
+ u32 d2vga_control;
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ u32 d1crtc_control;
+ u32 d2crtc_control;
+};
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -230,6 +275,14 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
+void rv515_vga_render_disable(struct radeon_device *rdev);
+void rv515_set_safe_registers(struct radeon_device *rdev);
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_clock_startup(struct radeon_device *rdev);
+void rv515_debugfs(struct radeon_device *rdev);
+
/*
* r520,rv530,rv560,rv570,r580
@@ -278,6 +331,8 @@ extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
extern void rs780_pm_init_profile(struct radeon_device *rdev);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int r600_get_pcie_lanes(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -287,6 +342,7 @@ void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
extern void rv770_pm_misc(struct radeon_device *rdev);
+extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
/*
* evergreen
@@ -314,5 +370,8 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
+extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bc5a2c3382d..1573202a641 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -37,7 +37,7 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
- uint32_t supported_device);
+ uint32_t supported_device, u16 caps);
/* from radeon_connector.c */
extern void
@@ -313,7 +313,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint16_t *line_mux,
struct radeon_hpd *hpd)
{
- struct radeon_device *rdev = dev->dev_private;
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x791e) &&
@@ -388,6 +387,17 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*line_mux = 0x90;
}
+ /* mac rv630 */
+ if ((dev->pdev->device == 0x9588) &&
+ (dev->pdev->subsystem_vendor == 0x106b) &&
+ (dev->pdev->subsystem_device == 0x00a6)) {
+ if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+ (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+ *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+ *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
+ }
+ }
+
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x9598) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
@@ -425,21 +435,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
- /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
+ /* Acer laptop (Acer TravelMate 5730G) has an HDMI port
+ * on the laptop and a DVI port on the docking station and
+ * both share the same encoder, hpd pin, and ddc line.
+ * So while the bios table is technically correct,
+ * we drop the DVI port here since xrandr has no concept of
+ * encoders and will try and drive both connectors
+ * with different crtcs which isn't possible on the hardware
+ * side and leaves no crtcs for LVDS or VGA.
+ */
if ((dev->pdev->device == 0x95c4) &&
(dev->pdev->subsystem_vendor == 0x1025) &&
(dev->pdev->subsystem_device == 0x013c)) {
- struct radeon_gpio_rec gpio;
-
if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
(supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
- gpio = radeon_lookup_gpio(rdev, 6);
- *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ /* actually it's a DVI-D port not DVI-I */
*connector_type = DRM_MODE_CONNECTOR_DVID;
- } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
- (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
- gpio = radeon_lookup_gpio(rdev, 7);
- *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ return false;
}
}
@@ -525,6 +537,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
u16 size, data_offset;
u8 frev, crev;
ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+ ATOM_ENCODER_OBJECT_TABLE *enc_obj;
ATOM_OBJECT_TABLE *router_obj;
ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
ATOM_OBJECT_HEADER *obj_header;
@@ -549,6 +562,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
(ctx->bios + data_offset +
le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+ enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usEncoderObjectTableOffset));
router_obj = (ATOM_OBJECT_TABLE *)
(ctx->bios + data_offset +
le16_to_cpu(obj_header->usRouterObjectTableOffset));
@@ -654,14 +670,35 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
- u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
-
- radeon_add_atom_encoder(dev,
- encoder_obj,
- le16_to_cpu
- (path->
- usDeviceTag));
+ for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
+ u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
+ if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
+ ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
+ ATOM_ENCODER_CAP_RECORD *cap_record;
+ u16 caps = 0;
+ while (record->ucRecordType > 0 &&
+ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+ switch (record->ucRecordType) {
+ case ATOM_ENCODER_CAP_RECORD_TYPE:
+ cap_record =(ATOM_ENCODER_CAP_RECORD *)
+ record;
+ caps = le16_to_cpu(cap_record->usEncoderCap);
+ break;
+ }
+ record = (ATOM_COMMON_RECORD_HEADER *)
+ ((char *)record + record->ucRecordSize);
+ }
+ radeon_add_atom_encoder(dev,
+ encoder_obj,
+ le16_to_cpu
+ (path->
+ usDeviceTag),
+ caps);
+ }
+ }
} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
@@ -995,7 +1032,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
radeon_get_encoder_enum(dev,
(1 << i),
dac),
- (1 << i));
+ (1 << i),
+ 0);
else
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
@@ -1074,6 +1112,7 @@ union firmware_info {
ATOM_FIRMWARE_INFO_V1_3 info_13;
ATOM_FIRMWARE_INFO_V1_4 info_14;
ATOM_FIRMWARE_INFO_V2_1 info_21;
+ ATOM_FIRMWARE_INFO_V2_2 info_22;
};
bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -1148,8 +1187,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
*p2pll = *p1pll;
/* system clock */
- spll->reference_freq =
- le16_to_cpu(firmware_info->info.usReferenceClock);
+ if (ASIC_IS_DCE4(rdev))
+ spll->reference_freq =
+ le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
+ else
+ spll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
spll->reference_div = 0;
spll->pll_out_min =
@@ -1171,8 +1214,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
/* memory clock */
- mpll->reference_freq =
- le16_to_cpu(firmware_info->info.usReferenceClock);
+ if (ASIC_IS_DCE4(rdev))
+ mpll->reference_freq =
+ le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
+ else
+ mpll->reference_freq =
+ le16_to_cpu(firmware_info->info.usReferenceClock);
mpll->reference_div = 0;
mpll->pll_out_min =
@@ -1201,8 +1248,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
if (ASIC_IS_DCE4(rdev)) {
rdev->clock.default_dispclk =
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
- if (rdev->clock.default_dispclk == 0)
- rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ if (rdev->clock.default_dispclk == 0) {
+ if (ASIC_IS_DCE5(rdev))
+ rdev->clock.default_dispclk = 54000; /* 540 Mhz */
+ else
+ rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ }
rdev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
}
@@ -1337,6 +1388,43 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
return false;
}
+static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
+ struct radeon_atom_ss *ss,
+ int id)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ u16 data_offset, size;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
+ u8 frev, crev;
+ u16 percentage = 0, rate = 0;
+
+ /* get any igp specific overrides */
+ if (atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_TMDS:
+ percentage = le16_to_cpu(igp_info->usDVISSPercentage);
+ rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_HDMI:
+ percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
+ rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
+ rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
+ break;
+ }
+ if (percentage)
+ ss->percentage = percentage;
+ if (rate)
+ ss->rate = rate;
+ }
+}
+
union asic_ss_info {
struct _ATOM_ASIC_INTERNAL_SS_INFO info;
struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
@@ -1401,6 +1489,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+ if (rdev->flags & RADEON_IS_IGP)
+ radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
return true;
}
}
@@ -1477,6 +1567,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
if (misc & ATOM_DOUBLE_CLOCK_MODE)
lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+ lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize;
+ lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize;
+
/* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1489,6 +1582,59 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
else
lvds->linkb = false;
+ /* parse the lcd record table */
+ if (lvds_info->info.usModePatchTableOffset) {
+ ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
+ ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
+ bool bad_record = false;
+ u8 *record = (u8 *)(mode_info->atom_context->bios +
+ data_offset +
+ lvds_info->info.usModePatchTableOffset);
+ while (*record != ATOM_RECORD_END_TYPE) {
+ switch (*record) {
+ case LCD_MODE_PATCH_RECORD_MODE_TYPE:
+ record += sizeof(ATOM_PATCH_RECORD_MODE);
+ break;
+ case LCD_RTS_RECORD_TYPE:
+ record += sizeof(ATOM_LCD_RTS_RECORD);
+ break;
+ case LCD_CAP_RECORD_TYPE:
+ record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
+ break;
+ case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
+ fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+ if (fake_edid_record->ucFakeEDIDLength) {
+ struct edid *edid;
+ int edid_size =
+ max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+ edid = kmalloc(edid_size, GFP_KERNEL);
+ if (edid) {
+ memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+ fake_edid_record->ucFakeEDIDLength);
+
+ if (drm_edid_is_valid(edid))
+ rdev->mode_info.bios_hardcoded_edid = edid;
+ else
+ kfree(edid);
+ }
+ }
+ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+ break;
+ case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+ panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+ lvds->native_mode.width_mm = panel_res_record->usHSize;
+ lvds->native_mode.height_mm = panel_res_record->usVSize;
+ record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
+ break;
+ default:
+ DRM_ERROR("Bad LCD record %d\n", *record);
+ bad_record = true;
+ break;
+ }
+ if (bad_record)
+ break;
+ }
+ }
}
return lvds;
}
@@ -1740,496 +1886,614 @@ static const char *pp_lib_thermal_controller_names[] = {
"RV6xx",
"RV770",
"adt7473",
+ "NONE",
"External GPIO",
"Evergreen",
- "adt7473 with internal",
-
+ "emc2103",
+ "Sumo",
+ "Northern Islands",
};
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
};
-void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev,
+ int state_index,
+ u32 misc, u32 misc2)
+{
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
+ if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+}
+
+static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
+ u32 misc, misc2 = 0;
+ int num_modes = 0, i;
+ int state_index = 0;
+ struct radeon_i2c_bus_rec i2c_bus;
+ union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
- u32 misc, misc2 = 0, sclk, mclk;
- union power_info *power_info;
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
- struct _ATOM_PPLIB_STATE *power_state;
- int num_modes = 0, i, j;
- int state_index = 0, mode_index = 0;
- struct radeon_i2c_bus_rec i2c_bus;
-
- rdev->pm.default_power_state_index = -1;
- if (atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset)) {
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
- if (frev < 4) {
- /* add the i2c bus for thermal/fan chip */
- if (power_info->info.ucOverdriveThermalController > 0) {
- DRM_INFO("Possible %s thermal controller at 0x%02x\n",
- thermal_controller_names[power_info->info.ucOverdriveThermalController],
- power_info->info.ucOverdriveControllerAddress >> 1);
- i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
- rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
- if (rdev->pm.i2c_bus) {
- struct i2c_board_info info = { };
- const char *name = thermal_controller_names[power_info->info.
- ucOverdriveThermalController];
- info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
- }
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ /* add the i2c bus for thermal/fan chip */
+ if (power_info->info.ucOverdriveThermalController > 0) {
+ DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+ thermal_controller_names[power_info->info.ucOverdriveThermalController],
+ power_info->info.ucOverdriveControllerAddress >> 1);
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = thermal_controller_names[power_info->info.
+ ucOverdriveThermalController];
+ info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
+ }
+ num_modes = power_info->info.ucNumOfPowerModeEntries;
+ if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+ num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ /* last mode is usually default, array is low to high */
+ for (i = 0; i < num_modes; i++) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ switch (frev) {
+ case 1:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
}
- num_modes = power_info->info.ucNumOfPowerModeEntries;
- if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
- num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
- /* last mode is usually default, array is low to high */
- for (i = 0; i < num_modes; i++) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- switch (frev) {
- case 1:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk =
- le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
- rdev->pm.power_state[state_index].clock_info[0].sclk =
- le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].pcie_lanes =
- power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
- misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
- if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
- (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_GPIO;
- rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
- power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- true;
- else
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- false;
- } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_VDDC;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
- power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
- }
- rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = misc;
- /* order matters! */
- if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_POWERSAVE;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- }
- if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- } else if (state_index == 0) {
- rdev->pm.power_state[state_index].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- state_index++;
- break;
- case 2:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk =
- le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
- rdev->pm.power_state[state_index].clock_info[0].sclk =
- le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].pcie_lanes =
- power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
- misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
- misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
- if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
- (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_GPIO;
- rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
- power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- true;
- else
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- false;
- } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_VDDC;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
- power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
- }
- rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = misc;
- rdev->pm.power_state[state_index].misc2 = misc2;
- /* order matters! */
- if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_POWERSAVE;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- }
- if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- } else if (state_index == 0) {
- rdev->pm.power_state[state_index].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- state_index++;
- break;
- case 3:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk =
- le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
- rdev->pm.power_state[state_index].clock_info[0].sclk =
- le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].pcie_lanes =
- power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
- misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
- misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
- if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
- (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_GPIO;
- rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_lookup_gpio(rdev,
- power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
- if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- true;
- else
- rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
- false;
- } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.type =
- VOLTAGE_VDDC;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
- power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
- if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
- true;
- rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
- power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
- }
- }
- rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = misc;
- rdev->pm.power_state[state_index].misc2 = misc2;
- /* order matters! */
- if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_POWERSAVE;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- }
- if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- } else if (state_index == 0) {
- rdev->pm.power_state[state_index].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- state_index++;
- break;
- }
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0);
+ state_index++;
+ break;
+ case 2:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
}
- /* last mode is usually default */
- if (rdev->pm.default_power_state_index == -1) {
- rdev->pm.power_state[state_index - 1].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index - 1;
- rdev->pm.power_state[state_index - 1].default_clock_mode =
- &rdev->pm.power_state[state_index - 1].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
- ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = 0;
- rdev->pm.power_state[state_index].misc2 = 0;
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+ state_index++;
+ break;
+ case 3:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+ if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+ (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+ if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+ true;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+ }
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+ state_index++;
+ break;
+ }
+ }
+ /* last mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[state_index - 1].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index - 1;
+ rdev->pm.power_state[state_index - 1].default_clock_mode =
+ &rdev->pm.power_state[state_index - 1].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = 0;
+ rdev->pm.power_state[state_index].misc2 = 0;
+ }
+ return state_index;
+}
+
+static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev,
+ ATOM_PPLIB_THERMALCONTROLLER *controller)
+{
+ struct radeon_i2c_bus_rec i2c_bus;
+
+ /* add the i2c bus for thermal/fan chip */
+ if (controller->ucType > 0) {
+ if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
+ } else if ((controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
+ DRM_INFO("Special thermal controller config\n");
} else {
- int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
- uint8_t fw_frev, fw_crev;
- uint16_t fw_data_offset, vddc = 0;
- union firmware_info *firmware_info;
- ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
-
- if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL,
- &fw_frev, &fw_crev, &fw_data_offset)) {
- firmware_info =
- (union firmware_info *)(mode_info->atom_context->bios +
- fw_data_offset);
- vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+ DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ pp_lib_thermal_controller_names[controller->ucType],
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = pp_lib_thermal_controller_names[controller->ucType];
+ info.addr = controller->ucI2cAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
+ }
+ }
+}
- /* add the i2c bus for thermal/fan chip */
- if (controller->ucType > 0) {
- if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
- } else if ((controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
- (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
- DRM_INFO("Special thermal controller config\n");
- } else {
- DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
- pp_lib_thermal_controller_names[controller->ucType],
- controller->ucI2cAddress >> 1,
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
- rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
- if (rdev->pm.i2c_bus) {
- struct i2c_board_info info = { };
- const char *name = pp_lib_thermal_controller_names[controller->ucType];
- info.addr = controller->ucI2cAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
- }
+static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ u8 frev, crev;
+ u16 data_offset;
+ union firmware_info *firmware_info;
+ u16 vddc = 0;
- }
- }
- /* first mode is usually default, followed by low to high */
- for (i = 0; i < power_info->info_4.ucNumStates; i++) {
- mode_index = 0;
- power_state = (struct _ATOM_PPLIB_STATE *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usStateArrayOffset) +
- i * power_info->info_4.ucStateEntrySize);
- non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
- (power_state->ucNonClockStateIndex *
- power_info->info_4.ucNonClockSize));
- for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
- if (rdev->flags & RADEON_IS_IGP) {
- struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
- (struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
- (power_state->ucClockStateIndices[j] *
- power_info->info_4.ucClockInfoSize));
- sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
- sclk |= clock_info->ucLowEngineClockHigh << 16;
- rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
- /* skip invalid modes */
- if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
- continue;
- /* voltage works differently on IGPs */
- mode_index++;
- } else if (ASIC_IS_DCE4(rdev)) {
- struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
- (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
- (power_state->ucClockStateIndices[j] *
- power_info->info_4.ucClockInfoSize));
- sclk = le16_to_cpu(clock_info->usEngineClockLow);
- sclk |= clock_info->ucEngineClockHigh << 16;
- mclk = le16_to_cpu(clock_info->usMemoryClockLow);
- mclk |= clock_info->ucMemoryClockHigh << 16;
- rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
- rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
- VOLTAGE_SW;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
- clock_info->usVDDC;
- /* XXX usVDDCI */
- mode_index++;
- } else {
- struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
- (struct _ATOM_PPLIB_R600_CLOCK_INFO *)
- (mode_info->atom_context->bios +
- data_offset +
- le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
- (power_state->ucClockStateIndices[j] *
- power_info->info_4.ucClockInfoSize));
- sclk = le16_to_cpu(clock_info->usEngineClockLow);
- sclk |= clock_info->ucEngineClockHigh << 16;
- mclk = le16_to_cpu(clock_info->usMemoryClockLow);
- mclk |= clock_info->ucMemoryClockHigh << 16;
- rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
- rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
- /* skip invalid modes */
- if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
- (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
- continue;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
- VOLTAGE_SW;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
- clock_info->usVDDC;
- mode_index++;
- }
- }
- rdev->pm.power_state[state_index].num_clock_modes = mode_index;
- if (mode_index) {
- misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
- misc2 = le16_to_cpu(non_clock_info->usClassification);
- rdev->pm.power_state[state_index].misc = misc;
- rdev->pm.power_state[state_index].misc2 = misc2;
- rdev->pm.power_state[state_index].pcie_lanes =
- ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
- ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
- switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
- case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BATTERY;
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_BALANCED;
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
- if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_PERFORMANCE;
- break;
- }
- rdev->pm.power_state[state_index].flags = 0;
- if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
- rdev->pm.power_state[state_index].flags |=
- RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
- /* patch the table values with the default slck/mclk from firmware info */
- for (j = 0; j < mode_index; j++) {
- rdev->pm.power_state[state_index].clock_info[j].mclk =
- rdev->clock.default_mclk;
- rdev->pm.power_state[state_index].clock_info[j].sclk =
- rdev->clock.default_sclk;
- if (vddc)
- rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
- vddc;
- }
- }
- state_index++;
- }
- }
- /* if multiple clock modes, mark the lowest as no display */
- for (i = 0; i < state_index; i++) {
- if (rdev->pm.power_state[i].num_clock_modes > 1)
- rdev->pm.power_state[i].clock_info[0].flags |=
- RADEON_PM_MODE_NO_DISPLAY;
- }
- /* first mode is usually default */
- if (rdev->pm.default_power_state_index == -1) {
- rdev->pm.power_state[0].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state_index = 0;
- rdev->pm.power_state[0].default_clock_mode =
- &rdev->pm.power_state[0].clock_info[0];
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
+ vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+ }
+
+ return vddc;
+}
+
+static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
+ int state_index, int mode_index,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
+{
+ int j;
+ u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
+ u16 vddc = radeon_atombios_get_default_vddc(rdev);
+
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ rdev->pm.power_state[state_index].pcie_lanes =
+ ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+ ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ }
+ rdev->pm.power_state[state_index].flags = 0;
+ if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+ rdev->pm.power_state[state_index].flags |=
+ RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+ if (ASIC_IS_DCE5(rdev)) {
+ /* NI chips post without MC ucode, so default clocks are strobe mode only */
+ rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
+ rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
+ rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+ } else {
+ /* patch the table values with the default slck/mclk from firmware info */
+ for (j = 0; j < mode_index; j++) {
+ rdev->pm.power_state[state_index].clock_info[j].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[j].sclk =
+ rdev->clock.default_sclk;
+ if (vddc)
+ rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+ vddc;
}
}
+ }
+}
+
+static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+ int state_index, int mode_index,
+ union pplib_clock_info *clock_info)
+{
+ u32 sclk, mclk;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (rdev->family >= CHIP_PALM) {
+ sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+ sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ } else {
+ sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
+ sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ }
+ } else if (ASIC_IS_DCE4(rdev)) {
+ sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+ sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
+ mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->evergreen.usVDDC;
+ } else {
+ sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
+ sclk |= clock_info->r600.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
+ mclk |= clock_info->r600.ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->r600.usVDDC;
+ }
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* skip invalid modes */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+ return false;
+ } else {
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+ return false;
+ }
+ return true;
+}
+
+static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j;
+ int state_index = 0, mode_index = 0;
+ union pplib_clock_info *clock_info;
+ bool valid;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ /* first mode is usually default, followed by low to high */
+ for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+ mode_index = 0;
+ power_state = (union pplib_power_state *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+ i * power_info->pplib.ucStateEntrySize);
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+ (power_state->v1.ucNonClockStateIndex *
+ power_info->pplib.ucNonClockSize));
+ for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+ clock_info = (union pplib_clock_info *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+ (power_state->v1.ucClockStateIndices[j] *
+ power_info->pplib.ucClockInfoSize));
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ if (mode_index) {
+ radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+ non_clock_info);
+ state_index++;
+ }
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
+ return state_index;
+}
+
+static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, non_clock_array_index, clock_array_index;
+ int state_index = 0, mode_index = 0;
+ union pplib_clock_info *clock_info;
+ struct StateArray *state_array;
+ struct ClockInfoArray *clock_info_array;
+ struct NonClockInfoArray *non_clock_info_array;
+ bool valid;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return state_index;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+ state_array = (struct StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ power_info->pplib.usStateArrayOffset);
+ clock_info_array = (struct ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ power_info->pplib.usClockInfoArrayOffset);
+ non_clock_info_array = (struct NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ power_info->pplib.usNonClockInfoArrayOffset);
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ mode_index = 0;
+ power_state = (union pplib_power_state *)&state_array->states[i];
+ /* XXX this might be an inagua bug... */
+ non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = power_state->v2.clockInfoIndex[j];
+ /* XXX this might be an inagua bug... */
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index];
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ if (mode_index) {
+ radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+ non_clock_info);
+ state_index++;
+ }
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
+ return state_index;
+}
+
+void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ int state_index = 0;
+
+ rdev->pm.default_power_state_index = -1;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ switch (frev) {
+ case 1:
+ case 2:
+ case 3:
+ state_index = radeon_atombios_parse_power_table_1_3(rdev);
+ break;
+ case 4:
+ case 5:
+ state_index = radeon_atombios_parse_power_table_4_5(rdev);
+ break;
+ case 6:
+ state_index = radeon_atombios_parse_power_table_6(rdev);
+ break;
+ default:
+ break;
+ }
} else {
/* add the default mode */
rdev->pm.power_state[state_index].type =
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 8f2c7b50dcf..1aba85cad1a 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -131,6 +131,45 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
return true;
}
+static bool ni_read_disabled_bios(struct radeon_device *rdev)
+{
+ u32 bus_cntl;
+ u32 d1vga_control;
+ u32 d2vga_control;
+ u32 vga_render_control;
+ u32 rom_cntl;
+ bool r;
+
+ bus_cntl = RREG32(R600_BUS_CNTL);
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+ rom_cntl = RREG32(R600_ROM_CNTL);
+
+ /* enable the rom */
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+ WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+ r = radeon_read_bios(rdev);
+
+ /* restore regs */
+ WREG32(R600_BUS_CNTL, bus_cntl);
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+ WREG32(R600_ROM_CNTL, rom_cntl);
+ return r;
+}
+
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
@@ -416,6 +455,8 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
{
if (rdev->flags & RADEON_IS_IGP)
return igp_read_bios_from_vram(rdev);
+ else if (rdev->family >= CHIP_BARTS)
+ return ni_read_disabled_bios(rdev);
else if (rdev->family >= CHIP_RV770)
return r700_read_disabled_bios(rdev);
else if (rdev->family >= CHIP_R600)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 137b8075f6e..591fcae8f22 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -471,8 +471,9 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
return true;
}
+/* this is used for atom LCDs as well */
struct edid *
-radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
{
if (rdev->mode_info.bios_hardcoded_edid)
return rdev->mode_info.bios_hardcoded_edid;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 8afaf7a7459..22b7e3dc0ec 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -472,6 +472,9 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
if (mode) {
ret = 1;
drm_mode_probed_add(connector, mode);
+ /* add the width/height from vbios tables if available */
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
/* add scaled modes */
radeon_add_common_modes(encoder, connector);
}
@@ -1216,7 +1219,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
- UNDERSCAN_AUTO);
+ UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
@@ -1256,7 +1259,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
- UNDERSCAN_AUTO);
+ UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
@@ -1299,7 +1302,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (ASIC_IS_AVIVO(rdev)) {
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_property,
- UNDERSCAN_AUTO);
+ UNDERSCAN_OFF);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.underscan_hborder_property,
0);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6d64a2705f1..35b5eb8fbe2 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,13 +77,13 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = p->relocs[i].gobj->driver_private;
p->relocs[i].lobj.bo = p->relocs[i].robj;
- p->relocs[i].lobj.rdomain = r->read_domains;
p->relocs[i].lobj.wdomain = r->write_domain;
+ p->relocs[i].lobj.rdomain = r->read_domains;
+ p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].handle = r->handle;
p->relocs[i].flags = r->flags;
- INIT_LIST_HEAD(&p->relocs[i].lobj.list);
radeon_bo_list_add_object(&p->relocs[i].lobj,
- &p->validated);
+ &p->validated);
}
}
return radeon_bo_list_validate(&p->validated);
@@ -189,10 +189,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
- if (!error && parser->ib) {
- radeon_bo_list_fence(&parser->validated, parser->ib->fence);
- }
- radeon_bo_list_unreserve(&parser->validated);
+
+ if (!error && parser->ib)
+ ttm_eu_fence_buffer_objects(&parser->validated,
+ parser->ib->fence);
+ else
+ ttm_eu_backoff_reservation(&parser->validated);
+
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 501966a13f4..26091d602b8 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -81,6 +81,10 @@ static const char radeon_family_name[][16] = {
"JUNIPER",
"CYPRESS",
"HEMLOCK",
+ "PALM",
+ "BARTS",
+ "TURKS",
+ "CAICOS",
"LAST",
};
@@ -224,6 +228,11 @@ int radeon_wb_init(struct radeon_device *rdev)
rdev->wb.use_event = true;
}
}
+ /* always use writeback/events on NI */
+ if (ASIC_IS_DCE5(rdev)) {
+ rdev->wb.enabled = true;
+ rdev->wb.use_event = true;
+ }
dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
@@ -335,7 +344,12 @@ bool radeon_card_posted(struct radeon_device *rdev)
uint32_t reg;
/* first check CRTCs */
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE41(rdev)) {
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+ } else if (ASIC_IS_DCE4(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
@@ -636,20 +650,20 @@ void radeon_check_arguments(struct radeon_device *rdev)
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct radeon_device *rdev = dev->dev_private;
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_INFO "radeon: switched on\n");
/* don't suspend or resume card normally */
- rdev->powered_down = false;
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
radeon_resume_kms(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
printk(KERN_INFO "radeon: switched off\n");
drm_kms_helper_poll_disable(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
radeon_suspend_kms(dev, pmm);
- /* don't suspend or resume card normally */
- rdev->powered_down = true;
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -704,11 +718,6 @@ int radeon_device_init(struct radeon_device *rdev,
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
- /* setup workqueue */
- rdev->wq = create_workqueue("radeon");
- if (rdev->wq == NULL)
- return -ENOMEM;
-
/* Set asic functions */
r = radeon_asic_init(rdev);
if (r)
@@ -773,6 +782,7 @@ int radeon_device_init(struct radeon_device *rdev,
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
vga_switcheroo_register_client(rdev->pdev,
radeon_switcheroo_set_state,
+ NULL,
radeon_switcheroo_can_switch);
r = radeon_init(rdev);
@@ -806,7 +816,6 @@ void radeon_device_fini(struct radeon_device *rdev)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
- destroy_workqueue(rdev->wq);
vga_switcheroo_unregister_client(rdev->pdev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
if (rdev->rio_mem)
@@ -835,7 +844,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
}
rdev = dev->dev_private;
- if (rdev->powered_down)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* turn off display hw */
@@ -893,7 +902,7 @@ int radeon_resume_kms(struct drm_device *dev)
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
- if (rdev->powered_down)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
acquire_console_sem();
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1df4dc6c063..d26dabf878d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,7 +68,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
}
-static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
+static void dce4_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -98,6 +98,66 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
}
}
+static void dce5_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+
+ WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+ (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+ NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+ WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
+ NI_GRPH_PRESCALE_BYPASS);
+ WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
+ NI_OVL_PRESCALE_BYPASS);
+ WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+ NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+ WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+ WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+
+ WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+ WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+ NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+ WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
+ (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+ NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+ WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+ (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
+ NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+ /* XXX match this to the depth of the crtc fmt block, move to modeset? */
+ WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
+
+}
+
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -130,8 +190,10 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
if (!crtc->enabled)
return;
- if (ASIC_IS_DCE4(rdev))
- evergreen_crtc_load_lut(crtc);
+ if (ASIC_IS_DCE5(rdev))
+ dce5_crtc_load_lut(crtc);
+ else if (ASIC_IS_DCE4(rdev))
+ dce4_crtc_load_lut(crtc);
else if (ASIC_IS_AVIVO(rdev))
avivo_crtc_load_lut(crtc);
else
@@ -183,12 +245,272 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc)
kfree(radeon_crtc);
}
+/*
+ * Handle unpin events outside the interrupt handler proper.
+ */
+static void radeon_unpin_work_func(struct work_struct *__work)
+{
+ struct radeon_unpin_work *work =
+ container_of(__work, struct radeon_unpin_work, work);
+ int r;
+
+ /* unpin of the old buffer */
+ r = radeon_bo_reserve(work->old_rbo, false);
+ if (likely(r == 0)) {
+ r = radeon_bo_unpin(work->old_rbo);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to unpin buffer after flip\n");
+ }
+ radeon_bo_unreserve(work->old_rbo);
+ } else
+ DRM_ERROR("failed to reserve buffer after flip\n");
+ kfree(work);
+}
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ struct radeon_unpin_work *work;
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+ u32 update_pending;
+ int vpos, hpos;
+
+ spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+ work = radeon_crtc->unpin_work;
+ if (work == NULL ||
+ !radeon_fence_signaled(work->fence)) {
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ return;
+ }
+ /* New pageflip, or just completion of a previous one? */
+ if (!radeon_crtc->deferred_flip_completion) {
+ /* do the flip (mmio) */
+ update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
+ } else {
+ /* This is just a completion of a flip queued in crtc
+ * at last invocation. Make sure we go directly to
+ * completion routine.
+ */
+ update_pending = 0;
+ radeon_crtc->deferred_flip_completion = 0;
+ }
+
+ /* Has the pageflip already completed in crtc, or is it certain
+ * to complete in this vblank?
+ */
+ if (update_pending &&
+ (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+ &vpos, &hpos)) &&
+ (vpos >=0) &&
+ (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
+ /* crtc didn't flip in this target vblank interval,
+ * but flip is pending in crtc. It will complete it
+ * in next vblank interval, so complete the flip at
+ * next vblank irq.
+ */
+ radeon_crtc->deferred_flip_completion = 1;
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ return;
+ }
+
+ /* Pageflip (will be) certainly completed in this vblank. Clean up. */
+ radeon_crtc->unpin_work = NULL;
+
+ /* wakeup userspace */
+ if (work->event) {
+ e = work->event;
+ e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+ spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+
+ drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+ radeon_fence_unref(&work->fence);
+ radeon_post_page_flip(work->rdev, work->crtc_id);
+ schedule_work(&work->work);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_framebuffer *old_radeon_fb;
+ struct radeon_framebuffer *new_radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
+ struct radeon_fence *fence;
+ struct radeon_unpin_work *work;
+ unsigned long flags;
+ u32 tiling_flags, pitch_pixels;
+ u64 base;
+ int r;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ r = radeon_fence_create(rdev, &fence);
+ if (unlikely(r != 0)) {
+ kfree(work);
+ DRM_ERROR("flip queue: failed to create fence.\n");
+ return -ENOMEM;
+ }
+ work->event = event;
+ work->rdev = rdev;
+ work->crtc_id = radeon_crtc->crtc_id;
+ work->fence = radeon_fence_ref(fence);
+ old_radeon_fb = to_radeon_framebuffer(crtc->fb);
+ new_radeon_fb = to_radeon_framebuffer(fb);
+ /* schedule unpin of the old buffer */
+ obj = old_radeon_fb->obj;
+ rbo = obj->driver_private;
+ work->old_rbo = rbo;
+ INIT_WORK(&work->work, radeon_unpin_work_func);
+
+ /* We borrow the event spin lock for protecting unpin_work */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (radeon_crtc->unpin_work) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(work);
+ radeon_fence_unref(&fence);
+
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ return -EBUSY;
+ }
+ radeon_crtc->unpin_work = work;
+ radeon_crtc->deferred_flip_completion = 0;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* pin the new buffer */
+ obj = new_radeon_fb->obj;
+ rbo = obj->driver_private;
+
+ DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
+ work->old_rbo, rbo);
+
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+ goto pflip_cleanup;
+ }
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ r = -EINVAL;
+ DRM_ERROR("failed to pin new rbo buffer before flip\n");
+ goto pflip_cleanup;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+
+ if (!ASIC_IS_AVIVO(rdev)) {
+ /* crtc offset is from display base addr not FB location */
+ base -= radeon_crtc->legacy_display_base_addr;
+ pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
+
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (ASIC_IS_R300(rdev)) {
+ base &= ~0x7ff;
+ } else {
+ int byteshift = fb->bits_per_pixel >> 4;
+ int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11;
+ base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
+ }
+ } else {
+ int offset = crtc->y * pitch_pixels + crtc->x;
+ switch (fb->bits_per_pixel) {
+ case 8:
+ default:
+ offset *= 1;
+ break;
+ case 15:
+ case 16:
+ offset *= 2;
+ break;
+ case 24:
+ offset *= 3;
+ break;
+ case 32:
+ offset *= 4;
+ break;
+ }
+ base += offset;
+ }
+ base &= ~7;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work->new_crtc_base = base;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /* update crtc fb */
+ crtc->fb = fb;
+
+ r = drm_vblank_get(dev, radeon_crtc->crtc_id);
+ if (r) {
+ DRM_ERROR("failed to get vblank before flip\n");
+ goto pflip_cleanup1;
+ }
+
+ /* 32 ought to cover us */
+ r = radeon_ring_lock(rdev, 32);
+ if (r) {
+ DRM_ERROR("failed to lock the ring before flip\n");
+ goto pflip_cleanup2;
+ }
+
+ /* emit the fence */
+ radeon_fence_emit(rdev, fence);
+ /* set the proper interrupt */
+ radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
+ /* fire the ring */
+ radeon_ring_unlock_commit(rdev);
+
+ return 0;
+
+pflip_cleanup2:
+ drm_vblank_put(dev, radeon_crtc->crtc_id);
+
+pflip_cleanup1:
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve new rbo in error path\n");
+ goto pflip_cleanup;
+ }
+ r = radeon_bo_unpin(rbo);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ r = -EINVAL;
+ DRM_ERROR("failed to unpin new rbo in error path\n");
+ goto pflip_cleanup;
+ }
+ radeon_bo_unreserve(rbo);
+
+pflip_cleanup:
+ spin_lock_irqsave(&dev->event_lock, flags);
+ radeon_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ radeon_fence_unref(&fence);
+ kfree(work);
+
+ return r;
+}
+
static const struct drm_crtc_funcs radeon_crtc_funcs = {
.cursor_set = radeon_crtc_cursor_set,
.cursor_move = radeon_crtc_cursor_move,
.gamma_set = radeon_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = radeon_crtc_destroy,
+ .page_flip = radeon_crtc_page_flip,
};
static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -225,7 +547,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
radeon_legacy_init_crtc(dev, radeon_crtc);
}
-static const char *encoder_names[34] = {
+static const char *encoder_names[36] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
@@ -260,6 +582,8 @@ static const char *encoder_names[34] = {
"INTERNAL_KLDSCP_LVTMA",
"INTERNAL_UNIPHY1",
"INTERNAL_UNIPHY2",
+ "NUTMEG",
+ "TRAVIS",
};
static const char *connector_names[15] = {
@@ -417,9 +741,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (!radeon_connector->edid) {
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
}
- /* some servers provide a hardcoded edid in rom for KVMs */
- if (!radeon_connector->edid)
- radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
+
+ if (!radeon_connector->edid) {
+ if (rdev->is_atom_bios) {
+ /* some laptops provide a hardcoded edid in rom for LCDs */
+ if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ } else
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ }
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -849,7 +1181,10 @@ int radeon_modeset_init(struct radeon_device *rdev)
rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ rdev->ddev->mode_config.max_width = 16384;
+ rdev->ddev->mode_config.max_height = 16384;
+ } else if (ASIC_IS_AVIVO(rdev)) {
rdev->ddev->mode_config.max_width = 8192;
rdev->ddev->mode_config.max_height = 8192;
} else {
@@ -1019,7 +1354,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
/*
* Retrieve current video scanout position of crtc on a given gpu.
*
- * \param rdev Device to query.
+ * \param dev Device to query.
* \param crtc Crtc to query.
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
@@ -1031,72 +1366,74 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
*
* \return Flags, or'ed together as follows:
*
- * RADEON_SCANOUTPOS_VALID = Query successfull.
- * RADEON_SCANOUTPOS_INVBL = Inside vblank.
- * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * DRM_SCANOUTPOS_VALID = Query successfull.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
* this flag means that returned position may be offset by a constant but
* unknown small number of scanlines wrt. real scanout position.
*
*/
-int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
{
u32 stat_crtc = 0, vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
bool in_vbl = true;
+ struct radeon_device *rdev = dev->dev_private;
+
if (ASIC_IS_DCE4(rdev)) {
if (crtc == 0) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC0_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC0_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC1_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC1_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 2) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC2_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC2_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 3) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC3_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC3_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 4) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC4_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC4_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 5) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC5_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC5_REGISTER_OFFSET);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
} else if (ASIC_IS_AVIVO(rdev)) {
if (crtc == 0) {
vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
} else {
/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
@@ -1112,7 +1449,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
if (!(stat_crtc & 1))
in_vbl = false;
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
if (crtc == 1) {
vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
@@ -1122,7 +1459,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
if (!(stat_crtc & 1))
in_vbl = false;
- ret |= RADEON_SCANOUTPOS_VALID;
+ ret |= DRM_SCANOUTPOS_VALID;
}
}
@@ -1133,13 +1470,13 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* Valid vblank area boundaries from gpu retrieved? */
if (vbl > 0) {
/* Yes: Decode. */
- ret |= RADEON_SCANOUTPOS_ACCURATE;
+ ret |= DRM_SCANOUTPOS_ACCURATE;
vbl_start = vbl & 0x1fff;
vbl_end = (vbl >> 16) & 0x1fff;
}
else {
/* No: Fake something reasonable which gives at least ok results. */
- vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay;
+ vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
vbl_end = 0;
}
@@ -1155,7 +1492,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
- vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal;
+ vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
*vpos = *vpos - vtotal;
}
@@ -1164,7 +1501,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
/* In vblank? */
if (in_vbl)
- ret |= RADEON_SCANOUTPOS_INVBL;
+ ret |= DRM_SCANOUTPOS_INVBL;
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 60e689f2d04..be5cb4f28c2 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,9 +48,10 @@
* - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
* - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
* 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
+ * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 7
+#define KMS_DRIVER_MINOR 8
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -66,6 +67,10 @@ int radeon_resume_kms(struct drm_device *dev);
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
@@ -74,6 +79,8 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
extern struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -296,6 +303,8 @@ static struct drm_driver kms_driver = {
.get_vblank_counter = radeon_get_vblank_counter_kms,
.enable_vblank = radeon_enable_vblank_kms,
.disable_vblank = radeon_disable_vblank_kms,
+ .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
+ .get_scanout_position = radeon_get_crtc_scanoutpos,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = radeon_debugfs_init,
.debugfs_cleanup = radeon_debugfs_cleanup,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 041943df966..8fd184286c0 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -641,7 +641,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_monitor_audio(radeon_connector->edid)) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
@@ -655,7 +655,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_monitor_audio(radeon_connector->edid)) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
@@ -673,7 +673,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ else if (drm_detect_monitor_audio(radeon_connector->edid)) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
@@ -712,8 +712,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
- * DCE 4.0
- * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
+ * DCE 4.0/5.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 6 DIG encoder blocks.
* - DIG to PHY mapping is hardcoded
@@ -724,6 +724,12 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* DIG5 drives UNIPHY2 link A, A+B
* DIG6 drives UNIPHY2 link B
*
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
@@ -737,6 +743,7 @@ union dig_encoder_control {
DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+ DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
};
void
@@ -752,6 +759,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
uint8_t frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
+ int hpd_id = RADEON_HPD_NONE;
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -760,6 +768,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
+ hpd_id = radeon_connector->hpd.hpd;
}
/* no dig encoder assigned */
@@ -784,19 +793,36 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
- if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dp_clock == 270000)
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+ (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
args.v1.ucLaneNum = dp_lane_count;
- } else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_encoder->pixel_clock > 165000)
args.v1.ucLaneNum = 8;
else
args.v1.ucLaneNum = 4;
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+ (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
+ if (dp_clock == 270000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+ }
+ args.v4.acConfig.ucDigSel = dig->dig_encoder;
+ args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ if (hpd_id == RADEON_HPD_NONE)
+ args.v4.ucHPD_ID = 0;
+ else
+ args.v4.ucHPD_ID = hpd_id + 1;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
args.v3.acConfig.ucDigSel = dig->dig_encoder;
args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
} else {
+ if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -823,6 +849,7 @@ union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
};
void
@@ -917,10 +944,18 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
pll_id = radeon_crtc->pll_id;
}
- if (is_dp && rdev->clock.dp_extclk)
- args.v3.acConfig.ucRefClkSource = 2; /* external src */
- else
- args.v3.acConfig.ucRefClkSource = pll_id;
+
+ if (ASIC_IS_DCE5(rdev)) {
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v4.acConfig.ucRefClkSource = 3; /* external src */
+ else
+ args.v4.acConfig.ucRefClkSource = pll_id;
+ } else {
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v3.acConfig.ucRefClkSource = 2; /* external src */
+ else
+ args.v3.acConfig.ucRefClkSource = pll_id;
+ }
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -1044,6 +1079,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
union external_encoder_control {
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
};
static void
@@ -1054,6 +1090,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
union external_encoder_control args;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
@@ -1061,6 +1098,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
+ u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1099,6 +1137,37 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
else
args.v1.sDigEncoder.ucLaneNum = 4;
break;
+ case 3:
+ args.v3.sExtEncoder.ucAction = action;
+ if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+ args.v3.sExtEncoder.usConnectorId = connector_object_id;
+ else
+ args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dp_clock == 270000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+ else if (dp_clock == 540000)
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+ args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.sExtEncoder.ucLaneNum = 8;
+ else
+ args.v3.sExtEncoder.ucLaneNum = 4;
+ switch (ext_enum) {
+ case GRAPH_OBJECT_ENUM_ID1:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+ break;
+ case GRAPH_OBJECT_ENUM_ID2:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+ break;
+ case GRAPH_OBJECT_ENUM_ID3:
+ args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+ break;
+ }
+ args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
@@ -1158,6 +1227,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index = 0;
bool is_dig = false;
+ bool is_dce5_dac = false;
+ bool is_dce5_dvo = false;
memset(&args, 0, sizeof(args));
@@ -1180,7 +1251,9 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- if (ASIC_IS_DCE3(rdev))
+ if (ASIC_IS_DCE5(rdev))
+ is_dce5_dvo = true;
+ else if (ASIC_IS_DCE3(rdev))
is_dig = true;
else
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
@@ -1196,12 +1269,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
- else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
- index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+ if (ASIC_IS_DCE5(rdev))
+ is_dce5_dac = true;
+ else {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+ index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
@@ -1260,6 +1337,28 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
break;
}
+ } else if (is_dce5_dac) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dac_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+ } else if (is_dce5_dvo) {
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ atombios_dvo_setup(encoder, ATOM_DISABLE);
+ break;
+ }
} else {
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -1289,12 +1388,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
default:
- action = ATOM_ENABLE;
+ if (ASIC_IS_DCE41(rdev))
+ action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT;
+ else
+ action = ATOM_ENABLE;
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- action = ATOM_DISABLE;
+ if (ASIC_IS_DCE41(rdev))
+ action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT;
+ else
+ action = ATOM_DISABLE;
break;
}
atombios_external_encoder_setup(encoder, ext_encoder, action);
@@ -1483,27 +1588,35 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
struct radeon_encoder_atom_dig *dig;
uint32_t dig_enc_in_use = 0;
+ /* DCE4/5 */
if (ASIC_IS_DCE4(rdev)) {
dig = radeon_encoder->enc_priv;
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (ASIC_IS_DCE41(rdev)) {
if (dig->linkb)
return 1;
else
return 0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return 3;
- else
- return 2;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return 5;
- else
- return 4;
- break;
+ } else {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
}
}
@@ -1610,7 +1723,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
if (ext_encoder) {
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ if (ASIC_IS_DCE41(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
@@ -1927,7 +2046,10 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
}
void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
@@ -1970,6 +2092,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
radeon_encoder->is_ext_encoder = false;
+ radeon_encoder->caps = caps;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -2029,6 +2152,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
case ENCODER_OBJECT_ID_TITFP513:
case ENCODER_OBJECT_ID_VT1623:
case ENCODER_OBJECT_ID_HDMI_SI1930:
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
/* these are handled by the primary encoders */
radeon_encoder->is_ext_encoder = true;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index e329066dcab..1ca55eb09ad 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -80,6 +80,10 @@ enum radeon_family {
CHIP_JUNIPER,
CHIP_CYPRESS,
CHIP_HEMLOCK,
+ CHIP_PALM,
+ CHIP_BARTS,
+ CHIP_TURKS,
+ CHIP_CAICOS,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 6abea32be5e..ca32e9c1e91 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -225,8 +225,6 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
strcpy(info->fix.id, "radeondrmfb");
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
-
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
@@ -247,8 +245,6 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
- info->fix.mmio_start = 0;
- info->fix.mmio_len = 0;
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index daacb281dfa..171b0b2e3a6 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -38,6 +38,7 @@
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_trace.h"
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
@@ -57,6 +58,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
} else
radeon_fence_ring_emit(rdev, fence);
+ trace_radeon_fence_emit(rdev->ddev, fence->seq);
fence->emited = true;
list_del(&fence->list);
list_add_tail(&fence->list, &rdev->fence_drv.emited);
@@ -213,6 +215,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
retry:
/* save current sequence used to check for GPU lockup */
seq = rdev->fence_drv.last_seq;
+ trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
@@ -227,6 +230,7 @@ retry:
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
}
+ trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!radeon_fence_signaled(fence))) {
/* we were interrupted for some reason and fence isn't
* isn't signaled yet, resume wait
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a108c7ed14f..a289646e8aa 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -64,15 +64,15 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
struct radeon_device *rdev = dev->dev_private;
unsigned i;
- INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
-
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 6; i++) {
rdev->irq.hpd[i] = false;
+ rdev->irq.pflip[i] = false;
+ }
radeon_irq_set(rdev);
/* Clear bits */
radeon_irq_process(rdev);
@@ -101,8 +101,10 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 6; i++) {
rdev->irq.hpd[i] = false;
+ rdev->irq.pflip[i] = false;
+ }
radeon_irq_set(rdev);
}
@@ -110,6 +112,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
{
int r = 0;
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+
spin_lock_init(&rdev->irq.sw_lock);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
@@ -121,7 +125,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
* chips. Disable MSI on them for now.
*/
if ((rdev->family >= CHIP_RV380) &&
- (!(rdev->flags & RADEON_IS_IGP)) &&
+ ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
(!(rdev->flags & RADEON_IS_AGP))) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
@@ -148,6 +152,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
}
+ flush_work_sync(&rdev->hotplug_work);
}
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
@@ -175,3 +180,34 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
+{
+ unsigned long irqflags;
+
+ if (crtc < 0 || crtc >= rdev->num_crtc)
+ return;
+
+ spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+ if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
+ rdev->irq.pflip[crtc] = true;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+}
+
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
+{
+ unsigned long irqflags;
+
+ if (crtc < 0 || crtc >= rdev->num_crtc)
+ return;
+
+ spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+ BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
+ if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
+ rdev->irq.pflip[crtc] = false;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8fbbe1c6ebb..28a53e4a925 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -96,9 +96,27 @@ out:
return r;
}
+static void radeon_set_filp_rights(struct drm_device *dev,
+ struct drm_file **owner,
+ struct drm_file *applier,
+ uint32_t *value)
+{
+ mutex_lock(&dev->struct_mutex);
+ if (*value == 1) {
+ /* wants rights */
+ if (!*owner)
+ *owner = applier;
+ } else if (*value == 0) {
+ /* revokes rights */
+ if (*owner == applier)
+ *owner = NULL;
+ }
+ *value = *owner == applier ? 1 : 0;
+ mutex_unlock(&dev->struct_mutex);
+}
/*
- * Userspace get informations ioctl
+ * Userspace get information ioctl
*/
int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
@@ -173,18 +191,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
return -EINVAL;
}
- mutex_lock(&dev->struct_mutex);
- if (value == 1) {
- /* wants hyper-z */
- if (!rdev->hyperz_filp)
- rdev->hyperz_filp = filp;
- } else if (value == 0) {
- /* revokes hyper-z */
- if (rdev->hyperz_filp == filp)
- rdev->hyperz_filp = NULL;
+ radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
+ break;
+ case RADEON_INFO_WANT_CMASK:
+ /* The same logic as Hyper-Z. */
+ if (value >= 2) {
+ DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
+ return -EINVAL;
}
- value = rdev->hyperz_filp == filp ? 1 : 0;
- mutex_unlock(&dev->struct_mutex);
+ radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
@@ -203,10 +218,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
*/
int radeon_driver_firstopen_kms(struct drm_device *dev)
{
- struct radeon_device *rdev = dev->dev_private;
-
- if (rdev->powered_down)
- return -EINVAL;
return 0;
}
@@ -277,6 +288,27 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
radeon_irq_set(rdev);
}
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct drm_crtc *drmcrtc;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Get associated drm_crtc: */
+ drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+ vblank_time, flags,
+ drmcrtc);
+}
/*
* IOCTL.
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e301c6f9e05..12bdeab91c8 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -277,6 +277,9 @@ struct radeon_crtc {
fixed20_12 hsc;
struct drm_display_mode native_mode;
int pll_id;
+ /* page flipping */
+ struct radeon_unpin_work *unpin_work;
+ int deferred_flip_completion;
};
struct radeon_encoder_primary_dac {
@@ -376,6 +379,7 @@ struct radeon_encoder {
int hdmi_audio_workaround;
int hdmi_buffer_status;
bool is_ext_encoder;
+ u16 caps;
};
struct radeon_connector_atom_dig {
@@ -442,10 +446,6 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
-/* radeon_get_crtc_scanoutpos() return flags */
-#define RADEON_SCANOUTPOS_VALID (1 << 0)
-#define RADEON_SCANOUTPOS_INVBL (1 << 1)
-#define RADEON_SCANOUTPOS_ACCURATE (1 << 2)
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
@@ -562,11 +562,12 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
-extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
-radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig *
@@ -662,4 +663,7 @@ int radeon_fbdev_total_size(struct radeon_device *rdev);
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index a598d0049aa..7d6b8e88f74 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,6 +34,7 @@
#include <drm/drmP.h>
#include "radeon_drm.h"
#include "radeon.h"
+#include "radeon_trace.h"
int radeon_ttm_init(struct radeon_device *rdev);
@@ -146,6 +147,7 @@ retry:
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&bo->rdev->gem.mutex);
}
+ trace_radeon_bo_create(bo);
return 0;
}
@@ -302,34 +304,9 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head)
{
if (lobj->wdomain) {
- list_add(&lobj->list, head);
+ list_add(&lobj->tv.head, head);
} else {
- list_add_tail(&lobj->list, head);
- }
-}
-
-int radeon_bo_list_reserve(struct list_head *head)
-{
- struct radeon_bo_list *lobj;
- int r;
-
- list_for_each_entry(lobj, head, list){
- r = radeon_bo_reserve(lobj->bo, false);
- if (unlikely(r != 0))
- return r;
- lobj->reserved = true;
- }
- return 0;
-}
-
-void radeon_bo_list_unreserve(struct list_head *head)
-{
- struct radeon_bo_list *lobj;
-
- list_for_each_entry(lobj, head, list) {
- /* only unreserve object we successfully reserved */
- if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
- radeon_bo_unreserve(lobj->bo);
+ list_add_tail(&lobj->tv.head, head);
}
}
@@ -340,14 +317,11 @@ int radeon_bo_list_validate(struct list_head *head)
u32 domain;
int r;
- list_for_each_entry(lobj, head, list) {
- lobj->reserved = false;
- }
- r = radeon_bo_list_reserve(head);
+ r = ttm_eu_reserve_buffers(head);
if (unlikely(r != 0)) {
return r;
}
- list_for_each_entry(lobj, head, list) {
+ list_for_each_entry(lobj, head, tv.head) {
bo = lobj->bo;
if (!bo->pin_count) {
domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
@@ -370,25 +344,6 @@ int radeon_bo_list_validate(struct list_head *head)
return 0;
}
-void radeon_bo_list_fence(struct list_head *head, void *fence)
-{
- struct radeon_bo_list *lobj;
- struct radeon_bo *bo;
- struct radeon_fence *old_fence = NULL;
-
- list_for_each_entry(lobj, head, list) {
- bo = lobj->bo;
- spin_lock(&bo->tbo.lock);
- old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
- bo->tbo.sync_obj = radeon_fence_ref(fence);
- bo->tbo.sync_obj_arg = NULL;
- spin_unlock(&bo->tbo.lock);
- if (old_fence) {
- radeon_fence_unref(&old_fence);
- }
- }
-}
-
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma)
{
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d143702b244..22d4c237dea 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,12 +126,12 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0))
return r;
- spin_lock(&bo->tbo.lock);
+ spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
- spin_unlock(&bo->tbo.lock);
+ spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo);
return r;
}
@@ -152,10 +152,7 @@ extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
-extern int radeon_bo_list_reserve(struct list_head *head);
-extern void radeon_bo_list_unreserve(struct list_head *head);
extern int radeon_bo_list_validate(struct list_head *head);
-extern void radeon_bo_list_fence(struct list_head *head, void *fence);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8c9b2ef32c6..3b1b2bf9cdd 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -167,13 +167,13 @@ static void radeon_set_power_state(struct radeon_device *rdev)
if (radeon_gui_idle(rdev)) {
sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk;
- if (sclk > rdev->clock.default_sclk)
- sclk = rdev->clock.default_sclk;
+ if (sclk > rdev->pm.default_sclk)
+ sclk = rdev->pm.default_sclk;
mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk;
- if (mclk > rdev->clock.default_mclk)
- mclk = rdev->clock.default_mclk;
+ if (mclk > rdev->pm.default_mclk)
+ mclk = rdev->pm.default_mclk;
/* upvolt before raising clocks, downvolt after lowering clocks */
if (sclk < rdev->pm.current_sclk)
@@ -405,20 +405,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
mutex_unlock(&rdev->pm.mutex);
} else if (strncmp("profile", buf, strlen("profile")) == 0) {
- bool flush_wq = false;
-
mutex_lock(&rdev->pm.mutex);
- if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
- cancel_delayed_work(&rdev->pm.dynpm_idle_work);
- flush_wq = true;
- }
/* disable dynpm */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.pm_method = PM_METHOD_PROFILE;
mutex_unlock(&rdev->pm.mutex);
- if (flush_wq)
- flush_workqueue(rdev->wq);
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
} else {
DRM_ERROR("invalid power method!\n");
goto fail;
@@ -447,8 +440,12 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
temp = rv770_get_temp(rdev);
break;
case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_NI:
temp = evergreen_get_temp(rdev);
break;
+ case THERMAL_TYPE_SUMO:
+ temp = sumo_get_temp(rdev);
+ break;
default:
temp = 0;
break;
@@ -487,6 +484,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_RV6XX:
case THERMAL_TYPE_RV770:
case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_SUMO:
rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
if (IS_ERR(rdev->pm.int_hwmon_dev)) {
err = PTR_ERR(rdev->pm.int_hwmon_dev);
@@ -520,34 +518,39 @@ static void radeon_hwmon_fini(struct radeon_device *rdev)
void radeon_pm_suspend(struct radeon_device *rdev)
{
- bool flush_wq = false;
-
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
- cancel_delayed_work(&rdev->pm.dynpm_idle_work);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
- flush_wq = true;
}
mutex_unlock(&rdev->pm.mutex);
- if (flush_wq)
- flush_workqueue(rdev->wq);
+
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
}
void radeon_pm_resume(struct radeon_device *rdev)
{
+ /* set up the default clocks if the MC ucode is loaded */
+ if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+ }
/* asic init will reset the default power state */
mutex_lock(&rdev->pm.mutex);
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
- rdev->pm.current_sclk = rdev->clock.default_sclk;
- rdev->pm.current_mclk = rdev->clock.default_mclk;
+ rdev->pm.current_sclk = rdev->pm.default_sclk;
+ rdev->pm.current_mclk = rdev->pm.default_mclk;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
radeon_pm_compute_clocks(rdev);
@@ -564,6 +567,8 @@ int radeon_pm_init(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
+ rdev->pm.default_sclk = rdev->clock.default_sclk;
+ rdev->pm.default_mclk = rdev->clock.default_mclk;
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
@@ -575,12 +580,24 @@ int radeon_pm_init(struct radeon_device *rdev)
radeon_combios_get_power_modes(rdev);
radeon_pm_print_states(rdev);
radeon_pm_init_profile(rdev);
+ /* set up the default clocks if the MC ucode is loaded */
+ if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ if (rdev->pm.default_sclk)
+ radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+ if (rdev->pm.default_mclk)
+ radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+ }
}
/* set up the internal thermal sensor if applicable */
ret = radeon_hwmon_init(rdev);
if (ret)
return ret;
+
+ INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+
if (rdev->pm.num_power_states > 1) {
/* where's the best place to put these? */
ret = device_create_file(rdev->dev, &dev_attr_power_profile);
@@ -594,8 +611,6 @@ int radeon_pm_init(struct radeon_device *rdev)
rdev->acpi_nb.notifier_call = radeon_acpi_event;
register_acpi_notifier(&rdev->acpi_nb);
#endif
- INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
-
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
}
@@ -609,25 +624,20 @@ int radeon_pm_init(struct radeon_device *rdev)
void radeon_pm_fini(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
- bool flush_wq = false;
-
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
rdev->pm.profile = PM_PROFILE_DEFAULT;
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
- /* cancel work */
- cancel_delayed_work(&rdev->pm.dynpm_idle_work);
- flush_wq = true;
/* reset default clocks */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
radeon_pm_set_clocks(rdev);
}
mutex_unlock(&rdev->pm.mutex);
- if (flush_wq)
- flush_workqueue(rdev->wq);
+
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
device_remove_file(rdev->dev, &dev_attr_power_profile);
device_remove_file(rdev->dev, &dev_attr_power_method);
@@ -686,12 +696,12 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
}
} else { /* count == 0 */
@@ -720,9 +730,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
- vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos);
- if ((vbl_status & RADEON_SCANOUTPOS_VALID) &&
- !(vbl_status & RADEON_SCANOUTPOS_INVBL))
+ vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+ if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
+ !(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false;
}
}
@@ -796,8 +806,8 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
radeon_pm_set_clocks(rdev);
}
- queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
@@ -814,9 +824,9 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
+ seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
- seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
+ seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
if (rdev->pm.current_vddc)
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 64928814de5..3cd4dace57c 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -55,6 +55,7 @@
#include "r500_reg.h"
#include "r600_reg.h"
#include "evergreen_reg.h"
+#include "ni_reg.h"
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
@@ -320,6 +321,15 @@
# define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8)
# define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9)
# define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10)
+# define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define R600_PCIE_LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define R600_PCIE_LC_RENEGOTIATE_EN (1 << 10)
+# define R600_PCIE_LC_SHORT_RECONFIG_EN (1 << 11)
+# define R600_PCIE_LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define R600_PCIE_LC_UPCONFIGURE_DIS (1 << 13)
+
+#define R600_TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
+#define R700_TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
#define RADEON_CACHE_CNTL 0x1724
#define RADEON_CACHE_LINE 0x0f0c /* PCI */
@@ -422,6 +432,7 @@
# define RADEON_CRTC_CSYNC_EN (1 << 4)
# define RADEON_CRTC_ICON_EN (1 << 15)
# define RADEON_CRTC_CUR_EN (1 << 16)
+# define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17)
# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
# define RADEON_CRTC_CUR_MODE_SHIFT 20
# define RADEON_CRTC_CUR_MODE_MONO 0
@@ -509,6 +520,8 @@
# define RADEON_CRTC_TILE_EN (1 << 15)
# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17)
+# define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28)
+# define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29)
#define R300_CRTC_TILE_X0_Y0 0x0350
#define R300_CRTC2_TILE_X0_Y0 0x0358
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
new file mode 100644
index 00000000000..eafd8160a15
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -0,0 +1,82 @@
+#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RADEON_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM radeon
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE radeon_trace
+
+TRACE_EVENT(radeon_bo_create,
+ TP_PROTO(struct radeon_bo *bo),
+ TP_ARGS(bo),
+ TP_STRUCT__entry(
+ __field(struct radeon_bo *, bo)
+ __field(u32, pages)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->pages = bo->tbo.num_pages;
+ ),
+ TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+);
+
+DECLARE_EVENT_CLASS(radeon_fence_request,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
new file mode 100644
index 00000000000..8175993df84
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -0,0 +1,9 @@
+/* Copyright Red Hat Inc 2010.
+ * Author : Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#define CREATE_TRACE_POINTS
+#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index b3f9f1d9200..ef422bbacfc 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -304,6 +304,22 @@ rv515 0x6d40
0x4630 US_CODE_ADDR
0x4634 US_CODE_RANGE
0x4638 US_CODE_OFFSET
+0x4640 US_FORMAT0_0
+0x4644 US_FORMAT0_1
+0x4648 US_FORMAT0_2
+0x464C US_FORMAT0_3
+0x4650 US_FORMAT0_4
+0x4654 US_FORMAT0_5
+0x4658 US_FORMAT0_6
+0x465C US_FORMAT0_7
+0x4660 US_FORMAT0_8
+0x4664 US_FORMAT0_9
+0x4668 US_FORMAT0_10
+0x466C US_FORMAT0_11
+0x4670 US_FORMAT0_12
+0x4674 US_FORMAT0_13
+0x4678 US_FORMAT0_14
+0x467C US_FORMAT0_15
0x46A4 US_OUT_FMT_0
0x46A8 US_OUT_FMT_1
0x46AC US_OUT_FMT_2
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index f1c6e02c2e6..b4192acaab5 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,56 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+ u32 tmp;
+
+ /* make sure flip is at vb rather than hb */
+ tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+ WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+ /* Lock the graphics update lock */
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+ WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
+
void rs600_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
@@ -515,10 +565,12 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.gui_idle) {
tmp |= S_000040_GUI_IDLE(1);
}
- if (rdev->irq.crtc_vblank_int[0]) {
+ if (rdev->irq.crtc_vblank_int[0] ||
+ rdev->irq.pflip[0]) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
- if (rdev->irq.crtc_vblank_int[1]) {
+ if (rdev->irq.crtc_vblank_int[1] ||
+ rdev->irq.pflip[1]) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.hpd[0]) {
@@ -534,7 +586,7 @@ int rs600_irq_set(struct radeon_device *rdev)
return 0;
}
-static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
+static inline u32 rs600_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
uint32_t irq_mask = S_000044_SW_INT(1);
@@ -547,27 +599,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
}
if (G_000044_DISPLAY_INT_STAT(irqs)) {
- *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
- if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
+ rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006534_D1MODE_VBLANK_STATUS,
S_006534_D1MODE_VBLANK_ACK(1));
}
- if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
WREG32(R_006D34_D2MODE_VBLANK_STATUS,
S_006D34_D2MODE_VBLANK_ACK(1));
}
- if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
}
- if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
}
} else {
- *r500_disp_int = 0;
+ rdev->irq.stat_regs.r500.disp_int = 0;
}
if (irqs) {
@@ -578,32 +630,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
void rs600_irq_disable(struct radeon_device *rdev)
{
- u32 tmp;
-
WREG32(R_000040_GEN_INT_CNTL, 0);
WREG32(R_006540_DxMODE_INT_MASK, 0);
/* Wait and acknowledge irq */
mdelay(1);
- rs600_irq_ack(rdev, &tmp);
+ rs600_irq_ack(rdev);
}
int rs600_irq_process(struct radeon_device *rdev)
{
- uint32_t status, msi_rearm;
- uint32_t r500_disp_int;
+ u32 status, msi_rearm;
bool queue_hotplug = false;
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
- status = rs600_irq_ack(rdev, &r500_disp_int);
- if (!status && !r500_disp_int) {
+ status = rs600_irq_ack(rdev);
+ if (!status && !rdev->irq.stat_regs.r500.disp_int) {
return IRQ_NONE;
}
- while (status || r500_disp_int) {
+ while (status || rdev->irq.stat_regs.r500.disp_int) {
/* SW interrupt */
- if (G_000044_SW_INT(status))
+ if (G_000044_SW_INT(status)) {
radeon_fence_process(rdev);
+ }
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
rdev->irq.gui_idle_acked = true;
@@ -611,30 +661,38 @@ int rs600_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
- if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[0])
+ radeon_crtc_handle_flip(rdev, 0);
}
- if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (rdev->irq.pflip[1])
+ radeon_crtc_handle_flip(rdev, 1);
}
- if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true;
DRM_DEBUG("HPD1\n");
}
- if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true;
DRM_DEBUG("HPD2\n");
}
- status = rs600_irq_ack(rdev, &r500_disp_int);
+ status = rs600_irq_ack(rdev);
}
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
- queue_work(rdev->wq, &rdev->hotplug_work);
+ schedule_work(&rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4dfead8cee3..3a264aa3a79 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -41,6 +41,41 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+ struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+ /* Lock the graphics update lock */
+ tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ if (radeon_crtc->crtc_id) {
+ WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ } else {
+ WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+ }
+ WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+ WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+ WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* Return current update_pending status: */
+ return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
/* get temperature in millidegrees */
u32 rv770_get_temp(struct radeon_device *rdev)
@@ -489,6 +524,49 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
+static void rv770_program_channel_remap(struct radeon_device *rdev)
+{
+ u32 tcp_chan_steer, mc_shared_chremap, tmp;
+ bool force_no_swizzle;
+
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ force_no_swizzle = false;
+ break;
+ case CHIP_RV710:
+ case CHIP_RV740:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ case 1:
+ default:
+ /* default mapping */
+ mc_shared_chremap = 0x00fac688;
+ break;
+ case 2:
+ case 3:
+ if (force_no_swizzle)
+ mc_shared_chremap = 0x00fac688;
+ else
+ mc_shared_chremap = 0x00bbc298;
+ break;
+ }
+
+ if (rdev->family == CHIP_RV740)
+ tcp_chan_steer = 0x00ef2a60;
+ else
+ tcp_chan_steer = 0x00fac688;
+
+ WREG32(TCP_CHAN_STEER, tcp_chan_steer);
+ WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
@@ -688,6 +766,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ rv770_program_channel_remap(rdev);
+
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
@@ -956,6 +1036,45 @@ static void rv770_vram_scratch_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->vram_scratch.robj);
}
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+ u64 size_bf, size_af;
+
+ if (mc->mc_vram_size > 0xE0000000) {
+ /* leave room for at least 512M GTT */
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xE0000000;
+ mc->mc_vram_size = 0xE0000000;
+ }
+ if (rdev->flags & RADEON_IS_AGP) {
+ size_bf = mc->gtt_start;
+ size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+ if (size_bf > size_af) {
+ if (mc->mc_vram_size > size_bf) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_bf;
+ mc->mc_vram_size = size_bf;
+ }
+ mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+ } else {
+ if (mc->mc_vram_size > size_af) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_af;
+ mc->mc_vram_size = size_af;
+ }
+ mc->vram_start = mc->gtt_end;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ } else {
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ rdev->mc.gtt_base_align = 0;
+ radeon_gtt_location(rdev, mc);
+ }
+}
+
int rv770_mc_init(struct radeon_device *rdev)
{
u32 tmp;
@@ -996,7 +1115,7 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
- r600_vram_gtt_location(rdev, &rdev->mc);
+ r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
@@ -1006,6 +1125,9 @@ static int rv770_startup(struct radeon_device *rdev)
{
int r;
+ /* enable pcie gen2 link */
+ rv770_pcie_gen2_enable(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -1244,3 +1366,75 @@ void rv770_fini(struct radeon_device *rdev)
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
}
+
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
+{
+ u32 link_width_cntl, lanes, speed_cntl, tmp;
+ u16 link_cntl2;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ /* x2 cards have a special sequence */
+ if (ASIC_IS_X2(rdev))
+ return;
+
+ /* advertise upconfig capability */
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+ lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+ link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+ LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= lanes | LC_RECONFIG_NOW |
+ LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ } else {
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+ (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+ tmp = RREG32(0x541c);
+ WREG32(0x541c, tmp | 0x8);
+ WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+ link_cntl2 = RREG16(0x4088);
+ link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+ link_cntl2 |= 0x2;
+ WREG16(0x4088, link_cntl2);
+ WREG32(MM_CFGREGS_CNTL, 0);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_GEN2_EN_STRAP;
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ } else {
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+ /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+ if (1)
+ link_width_cntl |= LC_UPCONFIGURE_DIS;
+ else
+ link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ }
+}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b7a5a20e81d..abc8cf5a367 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -138,6 +138,7 @@
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
@@ -303,6 +304,7 @@
#define BILINEAR_PRECISION_8_BIT (1 << 31)
#define TCP_CNTL 0x9610
+#define TCP_CHAN_STEER 0x9614
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
@@ -351,4 +353,49 @@
#define SRBM_STATUS 0x0E50
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
+#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
+#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
+# define LC_CURRENT_DATA_RATE (1 << 11)
+# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
+#define MM_CFGREGS_CNTL 0x544c
+# define MM_WR_TO_CFG_EN (1 << 3)
+#define LINK_CNTL2 0x88 /* F0 */
+# define TARGET_LINK_SPEED_MASK (0xf << 0)
+# define SELECTABLE_DEEMPHASIS (1 << 6)
+
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 934a96a7854..af61fc29e84 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
}
EXPORT_SYMBOL(ttm_bo_wait_unreserved);
-static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
}
}
-/**
- * Call with the lru_lock held.
- */
-
-static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
int put_count = 0;
@@ -227,9 +223,18 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
/**
* Deadlock avoidance for multi-bo reserving.
*/
- if (use_sequence && bo->seq_valid &&
- (sequence - bo->val_seq < (1 << 31))) {
- return -EAGAIN;
+ if (use_sequence && bo->seq_valid) {
+ /**
+ * We've already reserved this one.
+ */
+ if (unlikely(sequence == bo->val_seq))
+ return -EDEADLK;
+ /**
+ * Already reserved by a thread that will not back
+ * off for us. We need to back off.
+ */
+ if (unlikely(sequence - bo->val_seq < (1 << 31)))
+ return -EAGAIN;
}
if (no_wait)
@@ -267,6 +272,13 @@ static void ttm_bo_ref_bug(struct kref *list_kref)
BUG();
}
+void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+ bool never_free)
+{
+ kref_sub(&bo->list_kref, count,
+ (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
+}
+
int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence)
@@ -282,20 +294,24 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return ret;
}
+void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
+{
+ ttm_bo_add_to_lru(bo);
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+}
+
void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
struct ttm_bo_global *glob = bo->glob;
spin_lock(&glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
+ ttm_bo_unreserve_locked(bo);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unreserve);
@@ -362,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
int ret = 0;
if (old_is_pci || new_is_pci ||
- ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
- ttm_bo_unmap_virtual(bo);
+ ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
+ ret = ttm_mem_io_lock(old_man, true);
+ if (unlikely(ret != 0))
+ goto out_err;
+ ttm_bo_unmap_virtual_locked(bo);
+ ttm_mem_io_unlock(old_man);
+ }
/*
* Create and bind a ttm if required.
@@ -416,11 +437,9 @@ moved:
}
if (bo->mem.mm_node) {
- spin_lock(&bo->lock);
bo->offset = (bo->mem.start << PAGE_SHIFT) +
bdev->man[bo->mem.mem_type].gpu_offset;
bo->cur_placement = bo->mem.placement;
- spin_unlock(&bo->lock);
} else
bo->offset = 0;
@@ -452,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
-
ttm_bo_mem_put(bo, &bo->mem);
atomic_set(&bo->reserved, 0);
@@ -474,14 +492,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int put_count;
int ret;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
if (!bo->sync_obj) {
spin_lock(&glob->lru_lock);
/**
- * Lock inversion between bo::reserve and bo::lock here,
+ * Lock inversion between bo:reserve and bdev::fence_lock here,
* but that's OK, since we're only trylocking.
*/
@@ -490,14 +508,13 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
if (unlikely(ret == -EBUSY))
goto queue;
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return;
} else {
@@ -512,7 +529,7 @@ queue:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (sync_obj) {
driver->sync_obj_flush(sync_obj, sync_obj_arg);
@@ -537,14 +554,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool no_wait_reserve,
bool no_wait_gpu)
{
+ struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
int put_count;
int ret = 0;
retry:
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0))
return ret;
@@ -580,8 +598,7 @@ retry:
spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
return 0;
}
@@ -652,6 +669,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_buffer_object *bo =
container_of(kref, struct ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
if (likely(bo->vm_node != NULL)) {
rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
@@ -659,6 +677,9 @@ static void ttm_bo_release(struct kref *kref)
bo->vm_node = NULL;
}
write_unlock(&bdev->vm_lock);
+ ttm_mem_io_lock(man, false);
+ ttm_mem_io_free_vm(bo);
+ ttm_mem_io_unlock(man);
ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
write_lock(&bdev->vm_lock);
@@ -698,9 +719,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
struct ttm_placement placement;
int ret = 0;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) {
@@ -715,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- evict_mem.bus.io_reserved = false;
+ evict_mem.bus.io_reserved_vm = false;
+ evict_mem.bus.io_reserved_count = 0;
placement.fpfn = 0;
placement.lpfn = 0;
@@ -802,8 +824,7 @@ retry:
BUG_ON(ret != 0);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ttm_bo_unreserve(bo);
@@ -1036,6 +1057,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
{
int ret = 0;
struct ttm_mem_reg mem;
+ struct ttm_bo_device *bdev = bo->bdev;
BUG_ON(!atomic_read(&bo->reserved));
@@ -1044,15 +1066,16 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
- mem.bus.io_reserved = false;
+ mem.bus.io_reserved_vm = false;
+ mem.bus.io_reserved_count = 0;
/*
* Determine where to move the buffer.
*/
@@ -1163,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
}
bo->destroy = destroy;
- spin_lock_init(&bo->lock);
kref_init(&bo->kref);
kref_init(&bo->list_kref);
atomic_set(&bo->cpu_writers, 0);
@@ -1172,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
+ INIT_LIST_HEAD(&bo->io_reserve_lru);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
@@ -1181,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
- bo->mem.bus.io_reserved = false;
+ bo->mem.bus.io_reserved_vm = false;
+ bo->mem.bus.io_reserved_count = 0;
bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1355,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
BUG_ON(type >= TTM_NUM_MEM_TYPES);
man = &bdev->man[type];
BUG_ON(man->has_type);
+ man->io_reserve_fastpath = true;
+ man->use_io_reserve_lru = false;
+ mutex_init(&man->io_reserve_mutex);
+ INIT_LIST_HEAD(&man->io_reserve_lru);
ret = bdev->driver->init_mem_type(bdev, type, man);
if (ret)
@@ -1526,7 +1554,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->dev_mapping = NULL;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
-
+ bdev->val_seq = 0;
+ spin_lock_init(&bdev->fence_lock);
mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&glob->device_list_mutex);
@@ -1560,7 +1589,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return true;
}
-void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
loff_t offset = (loff_t) bo->addr_space_offset;
@@ -1569,8 +1598,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
if (!bdev->dev_mapping)
return;
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
- ttm_mem_io_free(bdev, &bo->mem);
+ ttm_mem_io_free_vm(bo);
+}
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+ ttm_mem_io_lock(man, false);
+ ttm_bo_unmap_virtual_locked(bo);
+ ttm_mem_io_unlock(man);
}
+
+
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
@@ -1650,6 +1691,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
{
struct ttm_bo_driver *driver = bo->bdev->driver;
+ struct ttm_bo_device *bdev = bo->bdev;
void *sync_obj;
void *sync_obj_arg;
int ret = 0;
@@ -1663,9 +1705,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&tmp_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
continue;
}
@@ -1674,29 +1716,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
sync_obj = driver->sync_obj_ref(bo->sync_obj);
sync_obj_arg = bo->sync_obj_arg;
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
lazy, interruptible);
if (unlikely(ret != 0)) {
driver->sync_obj_unref(&sync_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
return ret;
}
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
if (likely(bo->sync_obj == sync_obj &&
bo->sync_obj_arg == sync_obj_arg)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING,
&bo->priv_flags);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
driver->sync_obj_unref(&tmp_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
} else {
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
}
}
return 0;
@@ -1705,6 +1747,7 @@ EXPORT_SYMBOL(ttm_bo_wait);
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
+ struct ttm_bo_device *bdev = bo->bdev;
int ret = 0;
/*
@@ -1714,9 +1757,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
if (unlikely(ret != 0))
return ret;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, true, no_wait);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (likely(ret == 0))
atomic_inc(&bo->cpu_writers);
ttm_bo_unreserve(bo);
@@ -1782,16 +1825,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ttm_bo_list_ref_sub(bo, put_count, true);
/**
* Wait for GPU, then move to system cached.
*/
- spin_lock(&bo->lock);
+ spin_lock(&bo->bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->lock);
+ spin_unlock(&bo->bdev->fence_lock);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3106d5bcce3..77dbf408c0d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
{
- int ret;
+ if (likely(man->io_reserve_fastpath))
+ return 0;
+
+ if (interruptible)
+ return mutex_lock_interruptible(&man->io_reserve_mutex);
+
+ mutex_lock(&man->io_reserve_mutex);
+ return 0;
+}
- if (!mem->bus.io_reserved) {
- mem->bus.io_reserved = true;
+void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
+{
+ if (likely(man->io_reserve_fastpath))
+ return;
+
+ mutex_unlock(&man->io_reserve_mutex);
+}
+
+static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
+{
+ struct ttm_buffer_object *bo;
+
+ if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
+ return -EAGAIN;
+
+ bo = list_first_entry(&man->io_reserve_lru,
+ struct ttm_buffer_object,
+ io_reserve_lru);
+ list_del_init(&bo->io_reserve_lru);
+ ttm_bo_unmap_virtual_locked(bo);
+
+ return 0;
+}
+
+static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ int ret = 0;
+
+ if (!bdev->driver->io_mem_reserve)
+ return 0;
+ if (likely(man->io_reserve_fastpath))
+ return bdev->driver->io_mem_reserve(bdev, mem);
+
+ if (bdev->driver->io_mem_reserve &&
+ mem->bus.io_reserved_count++ == 0) {
+retry:
ret = bdev->driver->io_mem_reserve(bdev, mem);
+ if (ret == -EAGAIN) {
+ ret = ttm_mem_io_evict(man);
+ if (ret == 0)
+ goto retry;
+ }
+ }
+ return ret;
+}
+
+static void ttm_mem_io_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+ if (likely(man->io_reserve_fastpath))
+ return;
+
+ if (bdev->driver->io_mem_reserve &&
+ --mem->bus.io_reserved_count == 0 &&
+ bdev->driver->io_mem_free)
+ bdev->driver->io_mem_free(bdev, mem);
+
+}
+
+int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
+{
+ struct ttm_mem_reg *mem = &bo->mem;
+ int ret;
+
+ if (!mem->bus.io_reserved_vm) {
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[mem->mem_type];
+
+ ret = ttm_mem_io_reserve(bo->bdev, mem);
if (unlikely(ret != 0))
return ret;
+ mem->bus.io_reserved_vm = true;
+ if (man->use_io_reserve_lru)
+ list_add_tail(&bo->io_reserve_lru,
+ &man->io_reserve_lru);
}
return 0;
}
-void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{
- if (bdev->driver->io_mem_reserve) {
- if (mem->bus.io_reserved) {
- mem->bus.io_reserved = false;
- bdev->driver->io_mem_free(bdev, mem);
- }
+ struct ttm_mem_reg *mem = &bo->mem;
+
+ if (mem->bus.io_reserved_vm) {
+ mem->bus.io_reserved_vm = false;
+ list_del_init(&bo->io_reserve_lru);
+ ttm_mem_io_free(bo->bdev, mem);
}
}
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
void *addr;
*virtual = NULL;
+ (void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bdev, mem);
+ ttm_mem_io_unlock(man);
if (ret || !mem->bus.is_iomem)
return ret;
@@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
else
addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
+ (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
+ ttm_mem_io_unlock(man);
return -ENOMEM;
}
}
@@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
+ (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
+ ttm_mem_io_unlock(man);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg old_copy = *old_mem;
+ struct ttm_mem_reg old_copy;
void *old_iomap;
void *new_iomap;
int ret;
@@ -280,8 +370,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
}
mb();
out2:
- ttm_bo_free_old_node(bo);
-
+ old_copy = *old_mem;
*old_mem = *new_mem;
new_mem->mm_node = NULL;
@@ -292,9 +381,10 @@ out2:
}
out1:
- ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
+ ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
out:
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+ ttm_bo_mem_put(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -337,11 +427,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- spin_lock_init(&fbo->lock);
init_waitqueue_head(&fbo->event_queue);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
+ INIT_LIST_HEAD(&fbo->io_reserve_lru);
fbo->vm_node = NULL;
atomic_set(&fbo->cpu_writers, 0);
@@ -453,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[bo->mem.mem_type];
unsigned long offset, size;
int ret;
@@ -467,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
+ (void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+ ttm_mem_io_unlock(man);
if (ret)
return ret;
if (!bo->mem.bus.is_iomem) {
@@ -482,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
+ struct ttm_buffer_object *bo = map->bo;
+ struct ttm_mem_type_manager *man =
+ &bo->bdev->man[bo->mem.mem_type];
+
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
- ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
@@ -500,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
default:
BUG();
}
+ (void) ttm_mem_io_lock(man, false);
+ ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+ ttm_mem_io_unlock(man);
map->virtual = NULL;
map->page = NULL;
}
@@ -520,7 +620,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL;
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
if (bo->sync_obj) {
tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
@@ -529,7 +629,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bo->sync_obj_arg = sync_obj_arg;
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
if (ret)
@@ -552,7 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index fe6cb77899f..221b924aceb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int i;
unsigned long address = (unsigned long)vmf->virtual_address;
int retval = VM_FAULT_NOPAGE;
+ struct ttm_mem_type_manager *man =
+ &bdev->man[bo->mem.mem_type];
/*
* Work around locking order reversal in fault / nopfn
@@ -118,24 +120,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* move.
*/
- spin_lock(&bo->lock);
+ spin_lock(&bdev->fence_lock);
if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
ret = ttm_bo_wait(bo, false, true, false);
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) {
retval = (ret != -ERESTARTSYS) ?
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
goto out_unlock;
}
} else
- spin_unlock(&bo->lock);
+ spin_unlock(&bdev->fence_lock);
-
- ret = ttm_mem_io_reserve(bdev, &bo->mem);
- if (ret) {
- retval = VM_FAULT_SIGBUS;
+ ret = ttm_mem_io_lock(man, true);
+ if (unlikely(ret != 0)) {
+ retval = VM_FAULT_NOPAGE;
goto out_unlock;
}
+ ret = ttm_mem_io_reserve_vm(bo);
+ if (unlikely(ret != 0)) {
+ retval = VM_FAULT_SIGBUS;
+ goto out_io_unlock;
+ }
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff;
@@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
- goto out_unlock;
+ goto out_io_unlock;
}
/*
@@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
page = ttm_tt_get_page(ttm, page_offset);
if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM;
- goto out_unlock;
+ goto out_io_unlock;
} else if (unlikely(!page)) {
break;
}
@@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
else if (unlikely(ret != 0)) {
retval =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
- goto out_unlock;
+ goto out_io_unlock;
}
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))
break;
}
-
+out_io_unlock:
+ ttm_mem_io_unlock(man);
out_unlock:
ttm_bo_unreserve(bo);
return retval;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c285c2902d1..3832fe10b4d 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,7 +32,7 @@
#include <linux/sched.h>
#include <linux/module.h>
-void ttm_eu_backoff_reservation(struct list_head *list)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -41,10 +41,77 @@ void ttm_eu_backoff_reservation(struct list_head *list)
if (!entry->reserved)
continue;
+ if (entry->removed) {
+ ttm_bo_add_to_lru(bo);
+ entry->removed = false;
+
+ }
entry->reserved = false;
- ttm_bo_unreserve(bo);
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ }
+}
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ if (!entry->removed) {
+ entry->put_count = ttm_bo_del_from_lru(bo);
+ entry->removed = true;
+ }
}
}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ if (entry->put_count) {
+ ttm_bo_list_ref_sub(bo, entry->put_count, true);
+ entry->put_count = 0;
+ }
+ }
+}
+
+static int ttm_eu_wait_unreserved_locked(struct list_head *list,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int ret;
+
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_wait_unreserved(bo, true);
+ spin_lock(&glob->lru_lock);
+ if (unlikely(ret != 0))
+ ttm_eu_backoff_reservation_locked(list);
+ return ret;
+}
+
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_bo_global *glob;
+
+ if (list_empty(list))
+ return;
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
+ spin_lock(&glob->lru_lock);
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
/*
@@ -59,37 +126,76 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
* buffers in different orders.
*/
-int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+int ttm_eu_reserve_buffers(struct list_head *list)
{
+ struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
+ uint32_t val_seq;
+
+ if (list_empty(list))
+ return 0;
+
+ list_for_each_entry(entry, list, head) {
+ entry->reserved = false;
+ entry->put_count = 0;
+ entry->removed = false;
+ }
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
retry:
+ spin_lock(&glob->lru_lock);
+ val_seq = entry->bo->bdev->val_seq++;
+
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- entry->reserved = false;
- ret = ttm_bo_reserve(bo, true, false, true, val_seq);
- if (ret != 0) {
- ttm_eu_backoff_reservation(list);
- if (ret == -EAGAIN) {
- ret = ttm_bo_wait_unreserved(bo, true);
- if (unlikely(ret != 0))
- return ret;
- goto retry;
- } else
+retry_this_bo:
+ ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ ret = ttm_eu_wait_unreserved_locked(list, bo);
+ if (unlikely(ret != 0)) {
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
return ret;
+ }
+ goto retry_this_bo;
+ case -EAGAIN:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ ret = ttm_bo_wait_unreserved(bo, true);
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ default:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
}
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ttm_eu_backoff_reservation(list);
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
ret = ttm_bo_wait_cpu(bo, false);
if (ret)
return ret;
goto retry;
}
}
+
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -97,21 +203,36 @@ EXPORT_SYMBOL(ttm_eu_reserve_buffers);
void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
{
struct ttm_validate_buffer *entry;
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ struct ttm_bo_driver *driver;
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- struct ttm_bo_driver *driver = bo->bdev->driver;
- void *old_sync_obj;
+ if (list_empty(list))
+ return;
+
+ bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
+ bdev = bo->bdev;
+ driver = bdev->driver;
+ glob = bo->glob;
- spin_lock(&bo->lock);
- old_sync_obj = bo->sync_obj;
+ spin_lock(&bdev->fence_lock);
+ spin_lock(&glob->lru_lock);
+
+ list_for_each_entry(entry, list, head) {
+ bo = entry->bo;
+ entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
bo->sync_obj_arg = entry->new_sync_obj_arg;
- spin_unlock(&bo->lock);
- ttm_bo_unreserve(bo);
+ ttm_bo_unreserve_locked(bo);
entry->reserved = false;
- if (old_sync_obj)
- driver->sync_obj_unref(&old_sync_obj);
+ }
+ spin_unlock(&glob->lru_lock);
+ spin_unlock(&bdev->fence_lock);
+
+ list_for_each_entry(entry, list, head) {
+ if (entry->old_sync_obj)
+ driver->sync_obj_unref(&entry->old_sync_obj);
}
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e7a58d05504..10fc01f69c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -264,7 +264,6 @@ struct vmw_private {
*/
struct vmw_sw_context ctx;
- uint32_t val_seq;
struct mutex cmdbuf_mutex;
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 76954e3528c..41b95ed6dbc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -653,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
if (unlikely(ret != 0))
goto out_err;
- ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
- dev_priv->val_seq++);
+ ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index fe096a7cc0d..bfab60c938a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -480,9 +480,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->fix.smem_start = 0;
info->fix.smem_len = fb_size;
- info->fix.mmio_start = 0;
- info->fix.mmio_len = 0;
-
info->pseudo_palette = par->pseudo_palette;
info->screen_base = par->vmalloc;
info->screen_size = fb_size;