summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-11-18 22:19:03 -0800
committerDavid S. Miller <davem@davemloft.net>2009-11-18 22:19:03 -0800
commit3505d1a9fd65e2d3e00827857b6795d9d8983658 (patch)
tree941cfafdb57c427bb6b7ebf6354ee93b2a3693b5 /drivers/gpu
parentdfef948ed2ba69cf041840b5e860d6b4e16fa0b1 (diff)
parent66b00a7c93ec782d118d2c03bd599cfd041e80a1 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/sfc/sfe4001.c drivers/net/wireless/libertas/cmd.c drivers/staging/Kconfig drivers/staging/Makefile drivers/staging/rtl8187se/Kconfig drivers/staging/rtl8192e/Kconfig
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c15
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c156
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h43
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c10
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h30
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c305
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h49
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c193
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c35
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c3
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c358
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c12
-rw-r--r--drivers/gpu/drm/radeon/r100.c474
-rw-r--r--drivers/gpu/drm/radeon/r100d.h145
-rw-r--r--drivers/gpu/drm/radeon/r200.c3
-rw-r--r--drivers/gpu/drm/radeon/r300.c310
-rw-r--r--drivers/gpu/drm/radeon/r300d.h205
-rw-r--r--drivers/gpu/drm/radeon/r420.c8
-rw-r--r--drivers/gpu/drm/radeon/r420d.h24
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h9
-rw-r--r--drivers/gpu/drm/radeon/r520.c5
-rw-r--r--drivers/gpu/drm/radeon/r600.c420
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c56
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/r600d.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon.h69
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h276
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c282
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c287
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c199
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c273
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c131
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h36
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/rs100d.h40
-rw-r--r--drivers/gpu/drm/radeon/rs400.c277
-rw-r--r--drivers/gpu/drm/radeon/rs400d.h160
-rw-r--r--drivers/gpu/drm/radeon/rs600.c501
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h470
-rw-r--r--drivers/gpu/drm/radeon/rs690.c357
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h307
-rw-r--r--drivers/gpu/drm/radeon/rs690r.h99
-rw-r--r--drivers/gpu/drm/radeon/rv200d.h36
-rw-r--r--drivers/gpu/drm/radeon/rv250d.h123
-rw-r--r--drivers/gpu/drm/radeon/rv350d.h52
-rw-r--r--drivers/gpu/drm/radeon/rv515.c9
-rw-r--r--drivers/gpu/drm/radeon/rv770.c258
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h5
-rw-r--r--drivers/gpu/drm/ttm/ttm_global.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
77 files changed, 5206 insertions, 2378 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 8e7b0ebece0..5cae0b3eee9 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1556,8 +1556,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
struct drm_crtc *crtc;
int ret = 0;
- DRM_DEBUG_KMS("\n");
-
if (!req->flags) {
DRM_ERROR("no operation set\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 1fe4e1d344f..bbfd110a716 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -331,6 +331,7 @@ create_mode:
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->rb, cmdline_mode->interlace,
cmdline_mode->margins);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
list_add(&mode->head, &connector->modes);
return mode;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3c0d2b3aed7..cea665d86dd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -626,6 +626,12 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return NULL;
}
+ /* it is incorrect if hsync/vsync width is zero */
+ if (!hsync_pulse_width || !vsync_pulse_width) {
+ DRM_DEBUG_KMS("Incorrect Detailed timing. "
+ "Wrong Hsync/Vsync pulse width\n");
+ return NULL;
+ }
mode = drm_mode_create(dev);
if (!mode)
return NULL;
@@ -647,6 +653,15 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
+ /* perform the basic check for the detailed timing */
+ if (mode->hsync_end > mode->htotal ||
+ mode->vsync_end > mode->vtotal) {
+ drm_mode_destroy(dev, mode);
+ DRM_DEBUG_KMS("Incorrect detailed timing. "
+ "Sync is beyond the blank.\n");
+ return NULL;
+ }
+
drm_mode_set_name(mode);
if (pt->misc & DRM_EDID_PT_INTERLACED)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 819ddcbfcce..dc8e374a0b5 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -454,6 +454,109 @@ out_free:
}
EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
+static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, u16 regno, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ int pindex;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+ u32 *palette;
+ u32 value;
+ /* place color in psuedopalette */
+ if (regno > 16)
+ return -EINVAL;
+ palette = (u32 *)info->pseudo_palette;
+ red >>= (16 - info->var.red.length);
+ green >>= (16 - info->var.green.length);
+ blue >>= (16 - info->var.blue.length);
+ value = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset);
+ palette[regno] = value;
+ return 0;
+ }
+
+ pindex = regno;
+
+ if (fb->bits_per_pixel == 16) {
+ pindex = regno << 3;
+
+ if (fb->depth == 16 && regno > 63)
+ return -EINVAL;
+ if (fb->depth == 15 && regno > 31)
+ return -EINVAL;
+
+ if (fb->depth == 16) {
+ u16 r, g, b;
+ int i;
+ if (regno < 32) {
+ for (i = 0; i < 8; i++)
+ fb_helper->funcs->gamma_set(crtc, red,
+ green, blue, pindex + i);
+ }
+
+ fb_helper->funcs->gamma_get(crtc, &r,
+ &g, &b,
+ pindex >> 1);
+
+ for (i = 0; i < 4; i++)
+ fb_helper->funcs->gamma_set(crtc, r,
+ green, b,
+ (pindex >> 1) + i);
+ }
+ }
+
+ if (fb->depth != 16)
+ fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
+ return 0;
+}
+
+int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ u16 *red, *green, *blue, *transp;
+ struct drm_crtc *crtc;
+ int i, rc = 0;
+ int start;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
+ break;
+ }
+ if (i == fb_helper->crtc_count)
+ continue;
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ start = cmap->start;
+
+ for (i = 0; i < cmap->len; i++) {
+ u16 hred, hgreen, hblue, htransp = 0xffff;
+
+ hred = *red++;
+ hgreen = *green++;
+ hblue = *blue++;
+
+ if (transp)
+ htransp = *transp++;
+
+ rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
+ if (rc)
+ return rc;
+ }
+ crtc_funcs->load_lut(crtc);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(drm_fb_helper_setcmap);
+
int drm_fb_helper_setcolreg(unsigned regno,
unsigned red,
unsigned green,
@@ -465,10 +568,13 @@ int drm_fb_helper_setcolreg(unsigned regno,
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
int i;
+ int ret;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_framebuffer *fb = fb_helper->fb;
+ if (regno > 255)
+ return 1;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
for (i = 0; i < fb_helper->crtc_count; i++) {
if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
break;
@@ -476,35 +582,11 @@ int drm_fb_helper_setcolreg(unsigned regno,
if (i == fb_helper->crtc_count)
continue;
- if (regno > 255)
- return 1;
-
- if (fb->depth == 8) {
- fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
- return 0;
- }
+ ret = setcolreg(crtc, red, green, blue, regno, info);
+ if (ret)
+ return ret;
- if (regno < 16) {
- switch (fb->depth) {
- case 15:
- fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
- ((green & 0xf800) >> 6) |
- ((blue & 0xf800) >> 11);
- break;
- case 16:
- fb->pseudo_palette[regno] = (red & 0xf800) |
- ((green & 0xfc00) >> 5) |
- ((blue & 0xf800) >> 11);
- break;
- case 24:
- case 32:
- fb->pseudo_palette[regno] =
- (((red >> 8) & 0xff) << info->var.red.offset) |
- (((green >> 8) & 0xff) << info->var.green.offset) |
- (((blue >> 8) & 0xff) << info->var.blue.offset);
- break;
- }
- }
+ crtc_funcs->load_lut(crtc);
}
return 0;
}
@@ -625,7 +707,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
mutex_lock(&dev->mode_config.mutex);
- ret = crtc->funcs->set_config(&fb_helper->crtc_info->mode_set);
+ ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
mutex_unlock(&dev->mode_config.mutex);
if (ret)
return ret;
@@ -674,6 +756,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
EXPORT_SYMBOL(drm_fb_helper_pan_display);
int drm_fb_helper_single_fb_probe(struct drm_device *dev,
+ int preferred_bpp,
int (*fb_create)(struct drm_device *dev,
uint32_t fb_width,
uint32_t fb_height,
@@ -696,6 +779,11 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
struct drm_fb_helper *fb_helper;
uint32_t surface_depth = 24, surface_bpp = 32;
+ /* if driver picks 8 or 16 by default use that
+ for both depth/bpp */
+ if (preferred_bpp != surface_bpp) {
+ surface_depth = surface_bpp = preferred_bpp;
+ }
/* first up get a count of crtcs now in use and new min/maxes width/heights */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
@@ -851,10 +939,12 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
}
EXPORT_SYMBOL(drm_fb_helper_free);
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch)
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+ FB_VISUAL_TRUECOLOR;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 45d507ebd3f..e5b138be45f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1227,8 +1227,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
goto out;
/* Try to set up FBC with a reasonable compressed buffer size */
- if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) &&
- i915_powersave) {
+ if (I915_HAS_FBC(dev) && i915_powersave) {
int cfb_size;
/* Try to get an 8M buffer... */
@@ -1468,6 +1467,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->user_irq_lock);
spin_lock_init(&dev_priv->error_lock);
dev_priv->user_irq_refcount = 0;
+ dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b93814c0d3e..7f436ec075f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -89,7 +89,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
pci_set_power_state(dev->pdev, PCI_D3hot);
}
- dev_priv->suspended = 1;
+ /* Modeset on resume, not lid events */
+ dev_priv->modeset_on_lid = 0;
return 0;
}
@@ -124,7 +125,7 @@ static int i915_resume(struct drm_device *dev)
drm_helper_resume_force_mode(dev);
}
- dev_priv->suspended = 0;
+ dev_priv->modeset_on_lid = 0;
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b24b2d145b7..57204e29897 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -202,6 +202,7 @@ typedef struct drm_i915_private {
spinlock_t user_irq_lock;
/** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
int user_irq_refcount;
+ u32 trace_irq_seqno;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask_reg;
u32 pipestat[2];
@@ -273,7 +274,7 @@ typedef struct drm_i915_private {
struct drm_i915_display_funcs display;
/* Register state */
- bool suspended;
+ bool modeset_on_lid;
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
@@ -295,6 +296,12 @@ typedef struct drm_i915_private {
u32 saveVBLANK_A;
u32 saveVSYNC_A;
u32 saveBCLRPAT_A;
+ u32 saveTRANS_HTOTAL_A;
+ u32 saveTRANS_HBLANK_A;
+ u32 saveTRANS_HSYNC_A;
+ u32 saveTRANS_VTOTAL_A;
+ u32 saveTRANS_VBLANK_A;
+ u32 saveTRANS_VSYNC_A;
u32 savePIPEASTAT;
u32 saveDSPASTRIDE;
u32 saveDSPASIZE;
@@ -303,8 +310,11 @@ typedef struct drm_i915_private {
u32 saveDSPASURF;
u32 saveDSPATILEOFF;
u32 savePFIT_PGM_RATIOS;
+ u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
+ u32 saveBLC_CPU_PWM_CTL;
+ u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
u32 saveFPB1;
u32 saveDPLL_B;
@@ -316,6 +326,12 @@ typedef struct drm_i915_private {
u32 saveVBLANK_B;
u32 saveVSYNC_B;
u32 saveBCLRPAT_B;
+ u32 saveTRANS_HTOTAL_B;
+ u32 saveTRANS_HBLANK_B;
+ u32 saveTRANS_HSYNC_B;
+ u32 saveTRANS_VTOTAL_B;
+ u32 saveTRANS_VBLANK_B;
+ u32 saveTRANS_VSYNC_B;
u32 savePIPEBSTAT;
u32 saveDSPBSTRIDE;
u32 saveDSPBSIZE;
@@ -341,6 +357,7 @@ typedef struct drm_i915_private {
u32 savePFIT_CONTROL;
u32 save_palette_a[256];
u32 save_palette_b[256];
+ u32 saveDPFC_CB_BASE;
u32 saveFBC_CFB_BASE;
u32 saveFBC_LL_BASE;
u32 saveFBC_CONTROL;
@@ -348,6 +365,12 @@ typedef struct drm_i915_private {
u32 saveIER;
u32 saveIIR;
u32 saveIMR;
+ u32 saveDEIER;
+ u32 saveDEIMR;
+ u32 saveGTIER;
+ u32 saveGTIMR;
+ u32 saveFDI_RXA_IMR;
+ u32 saveFDI_RXB_IMR;
u32 saveCACHE_MODE_0;
u32 saveD_STATE;
u32 saveDSPCLK_GATE_D;
@@ -381,6 +404,16 @@ typedef struct drm_i915_private {
u32 savePIPEB_DP_LINK_M;
u32 savePIPEA_DP_LINK_N;
u32 savePIPEB_DP_LINK_N;
+ u32 saveFDI_RXA_CTL;
+ u32 saveFDI_TXA_CTL;
+ u32 saveFDI_RXB_CTL;
+ u32 saveFDI_TXB_CTL;
+ u32 savePFA_CTL_1;
+ u32 savePFB_CTL_1;
+ u32 savePFA_WIN_SZ;
+ u32 savePFB_WIN_SZ;
+ u32 savePFA_WIN_POS;
+ u32 savePFB_WIN_POS;
struct {
struct drm_mm gtt_space;
@@ -491,6 +524,8 @@ typedef struct drm_i915_private {
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
} mm;
struct sdvo_device_mapping sdvo_mappings[2];
+ /* indicate whether the LVDS_BORDER should be enabled or not */
+ unsigned int lvds_border_bits;
/* Reclocking support */
bool render_reclock_avail;
@@ -665,6 +700,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_user_irq_get(struct drm_device *dev);
+void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
void i915_user_irq_put(struct drm_device *dev);
extern void i915_enable_interrupt (struct drm_device *dev);
@@ -979,7 +1015,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
-#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev)))
+#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
+ (IS_I9XX(dev) || IS_GM45(dev)) && \
+ !IS_IGD(dev) && \
+ !IS_IGDNG(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 40727d4c291..abfc27b0c2e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1770,7 +1770,7 @@ i915_gem_retire_requests(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
- if (!dev_priv->hw_status_page)
+ if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
return;
seqno = i915_get_gem_seqno(dev);
@@ -1794,6 +1794,12 @@ i915_gem_retire_requests(struct drm_device *dev)
} else
break;
}
+
+ if (unlikely (dev_priv->trace_irq_seqno &&
+ i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+ i915_user_irq_put(dev);
+ dev_priv->trace_irq_seqno = 0;
+ }
}
void
@@ -3352,7 +3358,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
- trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
count = nbox ? nbox : 1;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4dfeec7cdd4..c3ceffa46ea 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -725,6 +725,16 @@ void i915_user_irq_put(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
+void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (dev_priv->trace_irq_seqno == 0)
+ i915_user_irq_get(dev);
+
+ dev_priv->trace_irq_seqno = seqno;
+}
+
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0466ddbeba3..1687edf6879 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -968,6 +968,8 @@
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
+/* Enable border for unscaled (or aspect-scaled) display */
+#define LVDS_BORDER_ENABLE (1 << 15)
/*
* Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
* pixel.
@@ -1078,6 +1080,8 @@
#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+#define BLC_HIST_CTL 0x61260
+
/* TV port control */
#define TV_CTL 0x68000
/** Enables the TV encoder */
@@ -1780,6 +1784,11 @@
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
+#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
+#define PIPE_8BPC (0 << 5)
+#define PIPE_10BPC (1 << 5)
+#define PIPE_6BPC (2 << 5)
+#define PIPE_12BPC (3 << 5)
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -1790,17 +1799,29 @@
#define DSPARB_AEND_SHIFT 0
#define DSPFW1 0x70034
+#define DSPFW_SR_SHIFT 23
+#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_PLANEB_SHIFT 8
#define DSPFW2 0x70038
+#define DSPFW_CURSORA_MASK 0x00003f00
+#define DSPFW_CURSORA_SHIFT 16
#define DSPFW3 0x7003c
+#define DSPFW_HPLL_SR_EN (1<<31)
+#define DSPFW_CURSOR_SR_SHIFT 24
#define IGD_SELF_REFRESH_EN (1<<30)
/* FIFO watermark sizes etc */
+#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
#define I830_FIFO_LINE_SIZE 32
+
+#define G4X_FIFO_SIZE 127
#define I945_FIFO_SIZE 127 /* 945 & 965 */
#define I915_FIFO_SIZE 95
#define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95
+
+#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */
@@ -2030,6 +2051,11 @@
#define PFA_CTL_1 0x68080
#define PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
+#define PF_FILTER_MASK (3<<23)
+#define PF_FILTER_PROGRAMMED (0<<23)
+#define PF_FILTER_MED_3x3 (1<<23)
+#define PF_FILTER_EDGE_ENHANCE (2<<23)
+#define PF_FILTER_EDGE_SOFTEN (3<<23)
#define PFA_WIN_SZ 0x68074
#define PFB_WIN_SZ 0x68874
#define PFA_WIN_POS 0x68070
@@ -2149,11 +2175,11 @@
#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
#define DREF_SSC_SOURCE_DISABLE (0<<11)
#define DREF_SSC_SOURCE_ENABLE (2<<11)
-#define DREF_SSC_SOURCE_MASK (2<<11)
+#define DREF_SSC_SOURCE_MASK (3<<11)
#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
-#define DREF_NONSPREAD_SOURCE_MASK (2<<9)
+#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
#define DREF_SSC4_DOWNSPREAD (0<<6)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index bd6d8d91ca9..992d5617e79 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -32,11 +32,15 @@
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll_reg;
- if (pipe == PIPE_A)
- return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
- else
- return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
+ if (IS_IGDNG(dev)) {
+ dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
+ } else {
+ dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
+ }
+
+ return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
}
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
@@ -49,6 +53,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
+ if (IS_IGDNG(dev))
+ reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
@@ -68,6 +75,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
+ if (IS_IGDNG(dev))
+ reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
@@ -232,10 +242,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
- dev_priv->saveFPA0 = I915_READ(FPA0);
- dev_priv->saveFPA1 = I915_READ(FPA1);
- dev_priv->saveDPLL_A = I915_READ(DPLL_A);
- if (IS_I965G(dev))
+ if (IS_IGDNG(dev)) {
+ dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
+ dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
+ dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
+ } else {
+ dev_priv->saveFPA0 = I915_READ(FPA0);
+ dev_priv->saveFPA1 = I915_READ(FPA1);
+ dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+ }
+ if (IS_I965G(dev) && !IS_IGDNG(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -243,7 +259,24 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+ if (!IS_IGDNG(dev))
+ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+ if (IS_IGDNG(dev)) {
+ dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
+ dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
+
+ dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1);
+ dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
+ dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
+
+ dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
+ dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
+ dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
+ dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A);
+ dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A);
+ dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A);
+ }
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
@@ -260,10 +293,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
- dev_priv->saveFPB0 = I915_READ(FPB0);
- dev_priv->saveFPB1 = I915_READ(FPB1);
- dev_priv->saveDPLL_B = I915_READ(DPLL_B);
- if (IS_I965G(dev))
+ if (IS_IGDNG(dev)) {
+ dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
+ dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
+ dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
+ } else {
+ dev_priv->saveFPB0 = I915_READ(FPB0);
+ dev_priv->saveFPB1 = I915_READ(FPB1);
+ dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+ }
+ if (IS_I965G(dev) && !IS_IGDNG(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -271,7 +310,24 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+ if (!IS_IGDNG(dev))
+ dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
+
+ if (IS_IGDNG(dev)) {
+ dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
+ dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
+
+ dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1);
+ dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
+ dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
+
+ dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
+ dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
+ dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
+ dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B);
+ dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B);
+ dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B);
+ }
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
@@ -290,23 +346,41 @@ static void i915_save_modeset_reg(struct drm_device *dev)
static void i915_restore_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_a_reg, fpa0_reg, fpa1_reg;
+ int dpll_b_reg, fpb0_reg, fpb1_reg;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ if (IS_IGDNG(dev)) {
+ dpll_a_reg = PCH_DPLL_A;
+ dpll_b_reg = PCH_DPLL_B;
+ fpa0_reg = PCH_FPA0;
+ fpb0_reg = PCH_FPB0;
+ fpa1_reg = PCH_FPA1;
+ fpb1_reg = PCH_FPB1;
+ } else {
+ dpll_a_reg = DPLL_A;
+ dpll_b_reg = DPLL_B;
+ fpa0_reg = FPA0;
+ fpb0_reg = FPB0;
+ fpa1_reg = FPA1;
+ fpb1_reg = FPB1;
+ }
+
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
- I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
+ I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
}
- I915_WRITE(FPA0, dev_priv->saveFPA0);
- I915_WRITE(FPA1, dev_priv->saveFPA1);
+ I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
+ I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
/* Actually enable it */
- I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
+ I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
DRM_UDELAY(150);
- if (IS_I965G(dev))
+ if (IS_I965G(dev) && !IS_IGDNG(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
DRM_UDELAY(150);
@@ -317,7 +391,24 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
- I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+ if (!IS_IGDNG(dev))
+ I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+
+ if (IS_IGDNG(dev)) {
+ I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
+ I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+
+ I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1);
+ I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
+ I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+
+ I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
+ I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
+ I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
+ I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
+ I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
+ I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+ }
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
@@ -339,14 +430,14 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
- I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
+ I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
}
- I915_WRITE(FPB0, dev_priv->saveFPB0);
- I915_WRITE(FPB1, dev_priv->saveFPB1);
+ I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
+ I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
/* Actually enable it */
- I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
+ I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
DRM_UDELAY(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
@@ -359,7 +450,24 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
- I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+ if (!IS_IGDNG(dev))
+ I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+
+ if (IS_IGDNG(dev)) {
+ I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
+ I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+
+ I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1);
+ I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
+ I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+
+ I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
+ I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
+ I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
+ I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
+ I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
+ I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+ }
/* Restore plane info */
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
@@ -404,21 +512,43 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
- dev_priv->saveADPA = I915_READ(ADPA);
+ if (IS_IGDNG(dev)) {
+ dev_priv->saveADPA = I915_READ(PCH_ADPA);
+ } else {
+ dev_priv->saveADPA = I915_READ(ADPA);
+ }
/* LVDS state */
- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
- if (IS_I965G(dev))
- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
- if (IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->saveLVDS = I915_READ(LVDS);
- if (!IS_I830(dev) && !IS_845G(dev))
+ if (IS_IGDNG(dev)) {
+ dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+ dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+ dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+ dev_priv->saveLVDS = I915_READ(PCH_LVDS);
+ } else {
+ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+ if (IS_I965G(dev))
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ dev_priv->saveLVDS = I915_READ(LVDS);
+ }
+
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
- dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+
+ if (IS_IGDNG(dev)) {
+ dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+ dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+ dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+ } else {
+ dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+ dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ }
/* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) {
@@ -437,16 +567,23 @@ void i915_save_display(struct drm_device *dev)
/* FIXME: save TV & SDVO state */
/* FBC state */
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ if (IS_GM45(dev)) {
+ dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ } else {
+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ }
/* VGA state */
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
- dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+ if (IS_IGDNG(dev))
+ dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
+ else
+ dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
i915_save_vga(dev);
}
@@ -485,22 +622,41 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
- I915_WRITE(ADPA, dev_priv->saveADPA);
+ if (IS_IGDNG(dev))
+ I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
+ else
+ I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
- if (IS_I965G(dev))
+ if (IS_I965G(dev) && !IS_IGDNG(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
- if (IS_MOBILE(dev) && !IS_I830(dev))
+
+ if (IS_IGDNG(dev)) {
+ I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
+ } else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
- if (!IS_I830(dev) && !IS_845G(dev))
+
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+ if (IS_IGDNG(dev)) {
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
+ I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+ } else {
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
+ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
+ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
+ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+ }
/* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) {
@@ -511,13 +667,22 @@ void i915_restore_display(struct drm_device *dev)
/* FIXME: restore TV & SDVO state */
/* FBC info */
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ if (IS_GM45(dev)) {
+ g4x_disable_fbc(dev);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ } else {
+ i8xx_disable_fbc(dev);
+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ }
/* VGA state */
- I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+ if (IS_IGDNG(dev))
+ I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
+ else
+ I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -543,8 +708,17 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
/* Interrupt state */
- dev_priv->saveIER = I915_READ(IER);
- dev_priv->saveIMR = I915_READ(IMR);
+ if (IS_IGDNG(dev)) {
+ dev_priv->saveDEIER = I915_READ(DEIER);
+ dev_priv->saveDEIMR = I915_READ(DEIMR);
+ dev_priv->saveGTIER = I915_READ(GTIER);
+ dev_priv->saveGTIMR = I915_READ(GTIMR);
+ dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
+ dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
+ } else {
+ dev_priv->saveIER = I915_READ(IER);
+ dev_priv->saveIMR = I915_READ(IMR);
+ }
/* Clock gating state */
dev_priv->saveD_STATE = I915_READ(D_STATE);
@@ -609,8 +783,17 @@ int i915_restore_state(struct drm_device *dev)
i915_restore_display(dev);
/* Interrupt state */
- I915_WRITE (IER, dev_priv->saveIER);
- I915_WRITE (IMR, dev_priv->saveIMR);
+ if (IS_IGDNG(dev)) {
+ I915_WRITE(DEIER, dev_priv->saveDEIER);
+ I915_WRITE(DEIMR, dev_priv->saveDEIMR);
+ I915_WRITE(GTIER, dev_priv->saveGTIER);
+ I915_WRITE(GTIMR, dev_priv->saveGTIMR);
+ I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
+ I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
+ } else {
+ I915_WRITE (IER, dev_priv->saveIER);
+ I915_WRITE (IMR, dev_priv->saveIMR);
+ }
/* Clock gating state */
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 5567a40816f..01840d9bc38 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -158,16 +158,17 @@ TRACE_EVENT(i915_gem_request_submit,
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
- __field(struct drm_device *, dev)
+ __field(u32, dev)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev;
+ __entry->dev = dev->primary->index;
__entry->seqno = seqno;
+ i915_trace_irq_get(dev, seqno);
),
- TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
TRACE_EVENT(i915_gem_request_flush,
@@ -178,20 +179,20 @@ TRACE_EVENT(i915_gem_request_flush,
TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
TP_STRUCT__entry(
- __field(struct drm_device *, dev)
+ __field(u32, dev)
__field(u32, seqno)
__field(u32, flush_domains)
__field(u32, invalidate_domains)
),
TP_fast_assign(
- __entry->dev = dev;
+ __entry->dev = dev->primary->index;
__entry->seqno = seqno;
__entry->flush_domains = flush_domains;
__entry->invalidate_domains = invalidate_domains;
),
- TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x",
+ TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x",
__entry->dev, __entry->seqno,
__entry->flush_domains, __entry->invalidate_domains)
);
@@ -204,16 +205,16 @@ TRACE_EVENT(i915_gem_request_complete,
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
- __field(struct drm_device *, dev)
+ __field(u32, dev)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev;
+ __entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
- TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
TRACE_EVENT(i915_gem_request_retire,
@@ -223,16 +224,16 @@ TRACE_EVENT(i915_gem_request_retire,
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
- __field(struct drm_device *, dev)
+ __field(u32, dev)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev;
+ __entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
- TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
TRACE_EVENT(i915_gem_request_wait_begin,
@@ -242,16 +243,16 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
- __field(struct drm_device *, dev)
+ __field(u32, dev)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev;
+ __entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
- TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
TRACE_EVENT(i915_gem_request_wait_end,
@@ -261,16 +262,16 @@ TRACE_EVENT(i915_gem_request_wait_end,
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
- __field(struct drm_device *, dev)
+ __field(u32, dev)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev;
+ __entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
- TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
TRACE_EVENT(i915_ring_wait_begin,
@@ -280,14 +281,14 @@ TRACE_EVENT(i915_ring_wait_begin,
TP_ARGS(dev),
TP_STRUCT__entry(
- __field(struct drm_device *, dev)
+ __field(u32, dev)
),
TP_fast_assign(
- __entry->dev = dev;
+ __entry->dev = dev->primary->index;
),
- TP_printk("dev=%p", __entry->dev)
+ TP_printk("dev=%u", __entry->dev)
);
TRACE_EVENT(i915_ring_wait_end,
@@ -297,14 +298,14 @@ TRACE_EVENT(i915_ring_wait_end,
TP_ARGS(dev),
TP_STRUCT__entry(
- __field(struct drm_device *, dev)
+ __field(u32, dev)
),
TP_fast_assign(
- __entry->dev = dev;
+ __entry->dev = dev->primary->index;
),
- TP_printk("dev=%p", __entry->dev)
+ TP_printk("dev=%u", __entry->dev)
);
#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4337414846b..96cd256e60e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -351,20 +351,18 @@ parse_driver_features(struct drm_i915_private *dev_priv,
struct drm_device *dev = dev_priv->dev;
struct bdb_driver_features *driver;
- /* set default for chips without eDP */
- if (!SUPPORTS_EDP(dev)) {
- dev_priv->edp_support = 0;
- return;
- }
-
driver = find_section(bdb, BDB_DRIVER_FEATURES);
if (!driver)
return;
- if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ if (driver && SUPPORTS_EDP(dev) &&
+ driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
dev_priv->edp_support = 1;
+ } else {
+ dev_priv->edp_support = 0;
+ }
- if (driver->dual_frequency)
+ if (driver && driver->dual_frequency)
dev_priv->render_reclock_avail = true;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 93ff6c03733..3ba6546b7c7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -943,6 +943,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
clock.p = (clock.p1 * clock.p2);
clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
+ clock.vco = 0;
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
@@ -1260,9 +1261,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- /* Pre-i965 needs to install a fence for tiled scan-out */
- if (!IS_I965G(dev) &&
- obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+ /* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
+ * whereas 965+ only requires a fence if using framebuffer compression.
+ * For simplicity, we always install a fence as the cost is not that onerous.
+ */
+ if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
obj_priv->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj);
if (ret != 0) {
@@ -1513,7 +1516,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Enable panel fitting for LVDS */
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
temp = I915_READ(pf_ctl_reg);
- I915_WRITE(pf_ctl_reg, temp | PF_ENABLE);
+ I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
/* currently full aspect */
I915_WRITE(pf_win_pos, 0);
@@ -1801,6 +1804,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
+ intel_update_watermarks(dev);
+
/* Enable the DPLL */
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) == 0) {
@@ -1838,7 +1843,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Give the overlay scaler a chance to enable if it's on this pipe */
//intel_crtc_dpms_video(crtc, true); TODO
- intel_update_watermarks(dev);
break;
case DRM_MODE_DPMS_OFF:
intel_update_watermarks(dev);
@@ -2082,7 +2086,7 @@ fdi_reduce_ratio(u32 *num, u32 *den)
#define LINK_N 0x80000
static void
-igdng_compute_m_n(int bytes_per_pixel, int nlanes,
+igdng_compute_m_n(int bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct fdi_m_n *m_n)
{
@@ -2092,7 +2096,8 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes,
temp = (u64) DATA_N * pixel_clock;
temp = div_u64(temp, link_clock);
- m_n->gmch_m = div_u64(temp * bytes_per_pixel, nlanes);
+ m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes);
+ m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
m_n->gmch_n = DATA_N;
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
@@ -2140,6 +2145,13 @@ static struct intel_watermark_params igd_cursor_hplloff_wm = {
IGD_CURSOR_GUARD_WM,
IGD_FIFO_LINE_SIZE
};
+static struct intel_watermark_params g4x_wm_info = {
+ G4X_FIFO_SIZE,
+ G4X_MAX_WM,
+ G4X_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
static struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE,
I915_MAX_WM,
@@ -2430,17 +2442,74 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static void g4x_update_wm(struct drm_device *dev, int unused, int unused2,
- int unused3, int unused4)
+static void g4x_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fw_blc_self = I915_READ(FW_BLC_SELF);
+ int total_size, cacheline_size;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr;
+ struct intel_watermark_params planea_params, planeb_params;
+ unsigned long line_time_us;
+ int sr_clock, sr_entries = 0, entries_required;
- if (i915_powersave)
- fw_blc_self |= FW_BLC_SELF_EN;
- else
- fw_blc_self &= ~FW_BLC_SELF_EN;
- I915_WRITE(FW_BLC_SELF, fw_blc_self);
+ /* Create copies of the base settings for each pipe */
+ planea_params = planeb_params = g4x_wm_info;
+
+ /* Grab a couple of global values before we overwrite them */
+ total_size = planea_params.fifo_size;
+ cacheline_size = planea_params.cacheline_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
+ 1000;
+ entries_required /= G4X_FIFO_LINE_SIZE;
+ planea_wm = entries_required + planea_params.guard_size;
+
+ entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
+ 1000;
+ entries_required /= G4X_FIFO_LINE_SIZE;
+ planeb_wm = entries_required + planeb_params.guard_size;
+
+ cursora_wm = cursorb_wm = 16;
+ cursor_sr = 32;
+
+ DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ /* Calc sr entries for one plane configs */
+ if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+ /* self-refresh has much higher latency */
+ const static int sr_latency_ns = 12000;
+
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+ line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+ /* Use ns/us then divide to preserve precision */
+ sr_entries = (((sr_latency_ns / line_time_us) + 1) *
+ pixel_size * sr_hdisplay) / 1000;
+ sr_entries = roundup(sr_entries / cacheline_size, 1);
+ DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
+ planea_wm, planeb_wm, sr_entries);
+
+ planea_wm &= 0x3f;
+ planeb_wm &= 0x3f;
+
+ I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm);
+ I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ /* HPLL off in SR has some issues on G4x... disable it */
+ I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
@@ -2586,6 +2655,9 @@ static void intel_update_watermarks(struct drm_device *dev)
unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
int enabled = 0, pixel_size = 0;
+ if (!dev_priv->display.update_wm)
+ return;
+
/* Get the clock config from both planes */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
intel_crtc = to_intel_crtc(crtc);
@@ -2763,7 +2835,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */
if (IS_IGDNG(dev)) {
- int lane, link_bw;
+ int lane, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (is_edp) {
@@ -2782,10 +2854,72 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
lane = 4;
link_bw = 270000;
}
- igdng_compute_m_n(3, lane, target_clock,
+
+ /* determine panel color depth */
+ temp = I915_READ(pipeconf_reg);
+
+ switch (temp & PIPE_BPC_MASK) {
+ case PIPE_8BPC:
+ bpp = 24;
+ break;
+ case PIPE_10BPC:
+ bpp = 30;
+ break;
+ case PIPE_6BPC:
+ bpp = 18;
+ break;
+ case PIPE_12BPC:
+ bpp = 36;
+ break;
+ default:
+ DRM_ERROR("unknown pipe bpc value\n");
+ bpp = 24;
+ }
+
+ igdng_compute_m_n(bpp, lane, target_clock,
link_bw, &m_n);
}
+ /* Ironlake: try to setup display ref clock before DPLL
+ * enabling. This is only under driver's control after
+ * PCH B stepping, previous chipset stepping should be
+ * ignoring this setting.
+ */
+ if (IS_IGDNG(dev)) {
+ temp = I915_READ(PCH_DREF_CONTROL);
+ /* Always enable nonspread source */
+ temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+ temp |= DREF_NONSPREAD_SOURCE_ENABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_ENABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+
+ udelay(200);
+
+ if (is_edp) {
+ if (dev_priv->lvds_use_ssc) {
+ temp |= DREF_SSC1_ENABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+
+ udelay(200);
+
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ } else {
+ temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ }
+ }
+ }
+
if (IS_IGD(dev)) {
fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
@@ -2936,6 +3070,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
lvds = I915_READ(lvds_reg);
lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+ /* set the corresponsding LVDS_BORDER bit */
+ lvds |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
@@ -3095,7 +3231,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_gem_object *bo;
struct drm_i915_gem_object *obj_priv;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
uint32_t temp = I915_READ(control);
@@ -3182,9 +3317,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
drm_gem_object_unreference(intel_crtc->cursor_bo);
}
- if ((IS_I965G(dev) || plane == 0))
- intel_update_fbc(crtc, &crtc->mode);
-
mutex_unlock(&dev->struct_mutex);
intel_crtc->cursor_addr = addr;
@@ -3244,6 +3376,16 @@ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
intel_crtc->lut_b[regno] = blue >> 8;
}
+void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ *red = intel_crtc->lut_r[regno] << 8;
+ *green = intel_crtc->lut_g[regno] << 8;
+ *blue = intel_crtc->lut_b[regno] << 8;
+}
+
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size)
{
@@ -3835,6 +3977,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base = intel_pipe_set_base,
.prepare = intel_crtc_prepare,
.commit = intel_crtc_commit,
+ .load_lut = intel_crtc_load_lut,
};
static const struct drm_crtc_funcs intel_crtc_funcs = {
@@ -4117,7 +4260,9 @@ void intel_init_clock_gating(struct drm_device *dev)
* Disable clock gating reported to work incorrectly according to the
* specs, but enable as much else as we can.
*/
- if (IS_G4X(dev)) {
+ if (IS_IGDNG(dev)) {
+ return;
+ } else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
@@ -4205,7 +4350,9 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed;
/* For FIFO watermark updates */
- if (IS_G4X(dev))
+ if (IS_IGDNG(dev))
+ dev_priv->display.update_wm = NULL;
+ else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
else if (IS_I965G(dev))
dev_priv->display.update_wm = i965_update_wm;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f4856a51047..d83447557f9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -400,7 +400,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
{
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- DRM_ERROR("i2c_init %s\n", name);
+ DRM_DEBUG_KMS("i2c_init %s\n", name);
dp_priv->algo.running = false;
dp_priv->algo.address = 0;
dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8aa4b7f30da..ef61fe9507e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -175,6 +175,8 @@ extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
+extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
extern int intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index e85d7e9eed7..2b0fe54cd92 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -60,10 +60,12 @@ static struct fb_ops intelfb_ops = {
.fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
};
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
+ .gamma_get = intel_crtc_fb_gamma_get,
};
@@ -123,6 +125,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
struct device *device = &dev->pdev->dev;
int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ /* we don't do packed 24bpp */
+ if (surface_bpp == 24)
+ surface_bpp = 32;
+
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
@@ -206,7 +212,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
// memset(info->screen_base, 0, size);
- drm_fb_helper_fill_fix(info, fb->pitch);
+ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
@@ -244,7 +250,7 @@ int intelfb_probe(struct drm_device *dev)
int ret;
DRM_DEBUG("\n");
- ret = drm_fb_helper_single_fb_probe(dev, intelfb_create);
+ ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
return ret;
}
EXPORT_SYMBOL(intelfb_probe);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index fa304e13601..663ab6de0b5 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -223,7 +223,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
connector = &intel_output->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_DVID);
+ DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
intel_output->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 98ae3d73577..05598ae10c4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -380,7 +380,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
adjusted_mode->crtc_vblank_start + vsync_pos;
/* keep the vsync width constant */
adjusted_mode->crtc_vsync_end =
- adjusted_mode->crtc_vblank_start + vsync_width;
+ adjusted_mode->crtc_vsync_start + vsync_width;
border = 1;
break;
case DRM_MODE_SCALE_ASPECT:
@@ -526,6 +526,14 @@ out:
lvds_priv->pfit_control = pfit_control;
lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
/*
+ * When there exists the border, it means that the LVDS_BORDR
+ * should be enabled.
+ */
+ if (border)
+ dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE;
+ else
+ dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE);
+ /*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
* user's requested refresh rate.
@@ -656,6 +664,15 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 0;
}
+/*
+ * Lid events. Note the use of 'modeset_on_lid':
+ * - we set it on lid close, and reset it on open
+ * - we use it as a "only once" bit (ie we ignore
+ * duplicate events where it was already properly
+ * set/reset)
+ * - the suspend/resume paths will also set it to
+ * zero, since they restore the mode ("lid open").
+ */
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
{
@@ -663,13 +680,19 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
container_of(nb, struct drm_i915_private, lid_notifier);
struct drm_device *dev = dev_priv->dev;
- if (acpi_lid_open() && !dev_priv->suspended) {
- mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ if (!acpi_lid_open()) {
+ dev_priv->modeset_on_lid = 1;
+ return NOTIFY_OK;
}
- drm_sysfs_hotplug_event(dev_priv->dev);
+ if (!dev_priv->modeset_on_lid)
+ return NOTIFY_OK;
+
+ dev_priv->modeset_on_lid = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
return NOTIFY_OK;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c64eab493fb..9ca917931af 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1082,7 +1082,8 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
/* Ensure TV refresh is close to desired refresh */
- if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10)
+ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
+ < 1000)
return MODE_OK;
return MODE_CLOCK_RANGE;
}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 09a28923f46..b5713eedd6e 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o
+ r600_blit_kms.o radeon_pm.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 5d402086bc4..c11ddddfb3b 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2314,7 +2314,7 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
UCHAR ucSS_Step;
UCHAR ucSS_Delay;
UCHAR ucSS_Id;
- UCHAR ucRecommandedRef_Div;
+ UCHAR ucRecommendedRef_Div;
UCHAR ucSS_Range; /* it was reserved for V11 */
} ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 6a015929dee..c15287a590f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -31,10 +31,6 @@
#include "atom.h"
#include "atom-bits.h"
-/* evil but including atombios.h is much worse */
-bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
- SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing,
- int32_t *pixel_clock);
static void atombios_overscan_setup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -248,18 +244,18 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
+ atombios_enable_crtc(crtc, 1);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 1);
- atombios_enable_crtc(crtc, 1);
atombios_blank_crtc(crtc, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_blank_crtc(crtc, 1);
- atombios_enable_crtc(crtc, 0);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 0);
+ atombios_enable_crtc(crtc, 0);
break;
}
@@ -270,59 +266,147 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
static void
atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
- SET_CRTC_USING_DTD_TIMING_PARAMETERS * crtc_param)
+ struct drm_display_mode *mode)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param;
+ SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
+ u16 misc = 0;
- conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size);
- conv_param.usH_Blanking_Time =
- cpu_to_le16(crtc_param->usH_Blanking_Time);
- conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size);
- conv_param.usV_Blanking_Time =
- cpu_to_le16(crtc_param->usV_Blanking_Time);
- conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset);
- conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
- conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset);
- conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
- conv_param.susModeMiscInfo.usAccess =
- cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
- conv_param.ucCRTC = crtc_param->ucCRTC;
+ memset(&args, 0, sizeof(args));
+ args.usH_Size = cpu_to_le16(mode->crtc_hdisplay);
+ args.usH_Blanking_Time =
+ cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay);
+ args.usV_Size = cpu_to_le16(mode->crtc_vdisplay);
+ args.usV_Blanking_Time =
+ cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay);
+ args.usH_SyncOffset =
+ cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay);
+ args.usH_SyncWidth =
+ cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+ args.usV_SyncOffset =
+ cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay);
+ args.usV_SyncWidth =
+ cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+ /*args.ucH_Border = mode->hborder;*/
+ /*args.ucV_Border = mode->vborder;*/
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= ATOM_VSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= ATOM_HSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_CSYNC)
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
printk("executing set crtc dtd timing\n");
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-void atombios_crtc_set_timing(struct drm_crtc *crtc,
- SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *
- crtc_param)
+static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param;
+ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
+ u16 misc = 0;
- conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total);
- conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp);
- conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart);
- conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
- conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total);
- conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp);
- conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart);
- conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
- conv_param.susModeMiscInfo.usAccess =
- cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
- conv_param.ucCRTC = crtc_param->ucCRTC;
- conv_param.ucOverscanRight = crtc_param->ucOverscanRight;
- conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft;
- conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom;
- conv_param.ucOverscanTop = crtc_param->ucOverscanTop;
- conv_param.ucReserved = crtc_param->ucReserved;
+ memset(&args, 0, sizeof(args));
+ args.usH_Total = cpu_to_le16(mode->crtc_htotal);
+ args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay);
+ args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start);
+ args.usH_SyncWidth =
+ cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+ args.usV_Total = cpu_to_le16(mode->crtc_vtotal);
+ args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay);
+ args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start);
+ args.usV_SyncWidth =
+ cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= ATOM_VSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= ATOM_HSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_CSYNC)
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
printk("executing set crtc timing\n");
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_set_ss(struct drm_crtc *crtc, int enable)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder = NULL;
+ struct radeon_encoder *radeon_encoder = NULL;
+ struct radeon_encoder_atom_dig *dig = NULL;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args;
+ ENABLE_LVDS_SS_PARAMETERS legacy_args;
+ uint16_t percentage = 0;
+ uint8_t type = 0, step = 0, delay = 0, range = 0;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ /* only enable spread spectrum on LVDS */
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ dig = radeon_encoder->enc_priv;
+ if (dig && dig->ss) {
+ percentage = dig->ss->percentage;
+ type = dig->ss->type;
+ step = dig->ss->step;
+ delay = dig->ss->delay;
+ range = dig->ss->range;
+ } else if (enable)
+ return;
+ } else if (enable)
+ return;
+ break;
+ }
+ }
+
+ if (!radeon_encoder)
+ return;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ memset(&args, 0, sizeof(args));
+ args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
+ args.ucSpreadSpectrumType = type;
+ args.ucSpreadSpectrumStep = step;
+ args.ucSpreadSpectrumDelay = delay;
+ args.ucSpreadSpectrumRange = range;
+ args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.ucEnable = enable;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ } else {
+ memset(&legacy_args, 0, sizeof(legacy_args));
+ legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
+ legacy_args.ucSpreadSpectrumType = type;
+ legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
+ legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
+ legacy_args.ucEnable = enable;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args);
+ }
}
void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
@@ -333,12 +417,13 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
uint8_t frev, crev;
- int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+ int index;
SET_PIXEL_CLOCK_PS_ALLOCATION args;
PIXEL_CLOCK_PARAMETERS *spc1_ptr;
PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
- uint32_t sclock = mode->clock;
+ uint32_t pll_clock = mode->clock;
+ uint32_t adjusted_clock;
uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
int pll_flags = 0;
@@ -346,8 +431,6 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
memset(&args, 0, sizeof(args));
if (ASIC_IS_AVIVO(rdev)) {
- uint32_t ss_cntl;
-
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
@@ -358,15 +441,6 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
-
- /* disable spread spectrum clocking for now -- thanks Hedy Lamarr */
- if (radeon_crtc->crtc_id == 0) {
- ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
- WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1);
- } else {
- ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
- WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1);
- }
} else {
pll_flags |= RADEON_PLL_LEGACY;
@@ -393,14 +467,43 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
}
+ /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
+ * accordingly based on the encoder/transmitter to work around
+ * special hw requirements.
+ */
+ if (ASIC_IS_DCE3(rdev)) {
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args;
+
+ if (!encoder)
+ return;
+
+ memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
+ adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
+ adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
+ adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
+
+ index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
+ atom_execute_table(rdev->mode_info.atom_context,
+ index, (uint32_t *)&adjust_pll_args);
+ adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10;
+ } else {
+ /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+ if (ASIC_IS_AVIVO(rdev) &&
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
+ adjusted_clock = mode->clock * 2;
+ else
+ adjusted_clock = mode->clock;
+ }
+
if (radeon_crtc->crtc_id == 0)
pll = &rdev->clock.p1pll;
else
pll = &rdev->clock.p2pll;
- radeon_compute_pll(pll, mode->clock, &sclock, &fb_div, &frac_fb_div,
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
+ index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
@@ -409,7 +512,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
switch (crev) {
case 1:
spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
- spc1_ptr->usPixelClock = cpu_to_le16(sclock);
+ spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
spc1_ptr->ucFracFbDiv = frac_fb_div;
@@ -422,7 +525,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
case 2:
spc2_ptr =
(PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
- spc2_ptr->usPixelClock = cpu_to_le16(sclock);
+ spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
spc2_ptr->ucFracFbDiv = frac_fb_div;
@@ -437,7 +540,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
return;
spc3_ptr =
(PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
- spc3_ptr->usPixelClock = cpu_to_le16(sclock);
+ spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
spc3_ptr->ucFracFbDiv = frac_fb_div;
@@ -527,6 +630,16 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
WREG32(AVIVO_D1VGA_CONTROL, 0);
else
WREG32(AVIVO_D2VGA_CONTROL, 0);
+
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id) {
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+ } else {
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+ }
+ }
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32) fb_location);
WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
@@ -563,6 +676,10 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
radeon_fb = to_radeon_framebuffer(old_fb);
radeon_gem_object_unpin(radeon_fb->obj);
}
+
+ /* Bytes per pixel may have changed */
+ radeon_bandwidth_update(rdev);
+
return 0;
}
@@ -574,134 +691,24 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder;
- SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing;
- int need_tv_timings = 0;
- bool ret;
/* TODO color tiling */
- memset(&crtc_timing, 0, sizeof(crtc_timing));
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- /* find tv std */
- if (encoder->crtc == crtc) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-
- if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
- struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
- if (tv_dac) {
- if (tv_dac->tv_std == TV_STD_NTSC ||
- tv_dac->tv_std == TV_STD_NTSC_J ||
- tv_dac->tv_std == TV_STD_PAL_M)
- need_tv_timings = 1;
- else
- need_tv_timings = 2;
- break;
- }
- }
- }
- }
-
- crtc_timing.ucCRTC = radeon_crtc->crtc_id;
- if (need_tv_timings) {
- ret = radeon_atom_get_tv_timings(rdev, need_tv_timings - 1,
- &crtc_timing, &adjusted_mode->clock);
- if (ret == false)
- need_tv_timings = 0;
- }
-
- if (!need_tv_timings) {
- crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
- crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
- crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
- crtc_timing.usH_SyncWidth =
- adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
-
- crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
- crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
- crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
- crtc_timing.usV_SyncWidth =
- adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
- crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
- }
+ atombios_set_ss(crtc, 0);
atombios_crtc_set_pll(crtc, adjusted_mode);
- atombios_crtc_set_timing(crtc, &crtc_timing);
+ atombios_set_ss(crtc, 1);
+ atombios_crtc_set_timing(crtc, adjusted_mode);
if (ASIC_IS_AVIVO(rdev))
atombios_crtc_set_base(crtc, x, y, old_fb);
else {
- if (radeon_crtc->crtc_id == 0) {
- SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing;
- memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing));
-
- /* setup FP shadow regs on R4xx */
- crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id;
- crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay;
- crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay;
- crtc_dtd_timing.usH_Blanking_Time =
- adjusted_mode->crtc_hblank_end -
- adjusted_mode->crtc_hdisplay;
- crtc_dtd_timing.usV_Blanking_Time =
- adjusted_mode->crtc_vblank_end -
- adjusted_mode->crtc_vdisplay;
- crtc_dtd_timing.usH_SyncOffset =
- adjusted_mode->crtc_hsync_start -
- adjusted_mode->crtc_hdisplay;
- crtc_dtd_timing.usV_SyncOffset =
- adjusted_mode->crtc_vsync_start -
- adjusted_mode->crtc_vdisplay;
- crtc_dtd_timing.usH_SyncWidth =
- adjusted_mode->crtc_hsync_end -
- adjusted_mode->crtc_hsync_start;
- crtc_dtd_timing.usV_SyncWidth =
- adjusted_mode->crtc_vsync_end -
- adjusted_mode->crtc_vsync_start;
- /* crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder; */
- /* crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder; */
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- crtc_dtd_timing.susModeMiscInfo.usAccess |=
- ATOM_VSYNC_POLARITY;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- crtc_dtd_timing.susModeMiscInfo.usAccess |=
- ATOM_HSYNC_POLARITY;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
- crtc_dtd_timing.susModeMiscInfo.usAccess |=
- ATOM_COMPOSITESYNC;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- crtc_dtd_timing.susModeMiscInfo.usAccess |=
- ATOM_INTERLACE;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- crtc_dtd_timing.susModeMiscInfo.usAccess |=
- ATOM_DOUBLE_CLOCK_MODE;
-
- atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing);
- }
+ if (radeon_crtc->crtc_id == 0)
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_legacy_atom_set_surface(crtc);
}
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
- radeon_bandwidth_update(rdev);
return 0;
}
@@ -733,6 +740,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.mode_set_base = atombios_crtc_set_base,
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
+ .load_lut = radeon_crtc_load_lut,
};
void radeon_atombios_init_crtc(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index fb211e585de..0d79577c157 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -561,7 +561,7 @@ struct table {
char *gpu_prefix;
};
-struct offset *offset_new(unsigned o)
+static struct offset *offset_new(unsigned o)
{
struct offset *offset;
@@ -573,12 +573,12 @@ struct offset *offset_new(unsigned o)
return offset;
}
-void table_offset_add(struct table *t, struct offset *offset)
+static void table_offset_add(struct table *t, struct offset *offset)
{
list_add_tail(&offset->list, &t->offsets);
}
-void table_init(struct table *t)
+static void table_init(struct table *t)
{
INIT_LIST_HEAD(&t->offsets);
t->offset_max = 0;
@@ -586,7 +586,7 @@ void table_init(struct table *t)
t->table = NULL;
}
-void table_print(struct table *t)
+static void table_print(struct table *t)
{
unsigned nlloop, i, j, n, c, id;
@@ -611,7 +611,7 @@ void table_print(struct table *t)
printf("};\n");
}
-int table_build(struct table *t)
+static int table_build(struct table *t)
{
struct offset *offset;
unsigned i, m;
@@ -631,7 +631,7 @@ int table_build(struct table *t)
}
static char gpu_name[10];
-int parser_auth(struct table *t, const char *filename)
+static int parser_auth(struct table *t, const char *filename)
{
FILE *file;
regex_t mask_rex;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e6cce24de80..c9e93eabcf1 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -32,6 +32,9 @@
#include "radeon_reg.h"
#include "radeon.h"
#include "r100d.h"
+#include "rs100d.h"
+#include "rv200d.h"
+#include "rv250d.h"
#include <linux/firmware.h>
#include <linux/platform_device.h>
@@ -60,18 +63,7 @@ MODULE_FIRMWARE(FIRMWARE_R520);
/* This files gather functions specifics to:
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
- *
- * Some of these functions might be used by newer ASICs.
*/
-int r200_init(struct radeon_device *rdev);
-void r100_hdp_reset(struct radeon_device *rdev);
-void r100_gpu_init(struct radeon_device *rdev);
-int r100_gui_wait_for_idle(struct radeon_device *rdev);
-int r100_mc_wait_for_idle(struct radeon_device *rdev);
-void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
-void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
-int r100_debugfs_mc_info_init(struct radeon_device *rdev);
-
/*
* PCI GART
@@ -152,136 +144,6 @@ void r100_pci_gart_fini(struct radeon_device *rdev)
radeon_gart_fini(rdev);
}
-
-/*
- * MC
- */
-void r100_mc_disable_clients(struct radeon_device *rdev)
-{
- uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
-
- /* FIXME: is this function correct for rs100,rs200,rs300 ? */
- if (r100_gui_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait GUI idle while "
- "programming pipes. Bad things might happen.\n");
- }
-
- /* stop display and memory access */
- ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
- WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
- crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
- WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
- crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
-
- r100_gpu_wait_for_vsync(rdev);
-
- WREG32(RADEON_CRTC_GEN_CNTL,
- (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
- RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
-
- if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
- crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
-
- r100_gpu_wait_for_vsync2(rdev);
- WREG32(RADEON_CRTC2_GEN_CNTL,
- (crtc2_gen_cntl &
- ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
- RADEON_CRTC2_DISP_REQ_EN_B);
- }
-
- udelay(500);
-}
-
-void r100_mc_setup(struct radeon_device *rdev)
-{
- uint32_t tmp;
- int r;
-
- r = r100_debugfs_mc_info_init(rdev);
- if (r) {
- DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
- }
- /* Write VRAM size in case we are limiting it */
- WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
- /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM,
- * if the aperture is 64MB but we have 32MB VRAM
- * we report only 32MB VRAM but we have to set MC_FB_LOCATION
- * to 64MB, otherwise the gpu accidentially dies */
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
- tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
- WREG32(RADEON_MC_FB_LOCATION, tmp);
-
- /* Enable bus mastering */
- tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
- WREG32(RADEON_BUS_CNTL, tmp);
-
- if (rdev->flags & RADEON_IS_AGP) {
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
- tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
- WREG32(RADEON_MC_AGP_LOCATION, tmp);
- WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
- } else {
- WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
- WREG32(RADEON_AGP_BASE, 0);
- }
-
- tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
- tmp |= (7 << 28);
- WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
- WREG32(RADEON_HOST_PATH_CNTL, tmp);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
-}
-
-int r100_mc_init(struct radeon_device *rdev)
-{
- int r;
-
- if (r100_debugfs_rbbm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for RBBM !\n");
- }
-
- r100_gpu_init(rdev);
- /* Disable gart which also disable out of gart access */
- r100_pci_gart_disable(rdev);
-
- /* Setup GPU memory space */
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r) {
- printk(KERN_WARNING "[drm] Disabling AGP\n");
- rdev->flags &= ~RADEON_IS_AGP;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- } else {
- rdev->mc.gtt_location = rdev->mc.agp_base;
- }
- }
- r = radeon_mc_setup(rdev);
- if (r) {
- return r;
- }
-
- r100_mc_disable_clients(rdev);
- if (r100_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait MC idle while "
- "programming pipes. Bad things might happen.\n");
- }
-
- r100_mc_setup(rdev);
- return 0;
-}
-
-void r100_mc_fini(struct radeon_device *rdev)
-{
-}
-
-
-/*
- * Interrupts
- */
int r100_irq_set(struct radeon_device *rdev)
{
uint32_t tmp = 0;
@@ -324,7 +186,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
int r100_irq_process(struct radeon_device *rdev)
{
- uint32_t status;
+ uint32_t status, msi_rearm;
status = r100_irq_ack(rdev);
if (!status) {
@@ -347,6 +209,21 @@ int r100_irq_process(struct radeon_device *rdev)
}
status = r100_irq_ack(rdev);
}
+ if (rdev->msi_enabled) {
+ switch (rdev->family) {
+ case CHIP_RS400:
+ case CHIP_RS480:
+ msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
+ WREG32(RADEON_AIC_CNTL, msi_rearm);
+ WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
+ break;
+ default:
+ msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
+ WREG32(RADEON_MSI_REARM_EN, msi_rearm);
+ WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
+ break;
+ }
+ }
return IRQ_HANDLED;
}
@@ -358,10 +235,6 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
return RREG32(RADEON_CRTC2_CRNT_FRAME);
}
-
-/*
- * Fence emission
- */
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
@@ -377,16 +250,12 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
-
-/*
- * Writeback
- */
int r100_wb_init(struct radeon_device *rdev)
{
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_object_create(rdev, NULL, 4096,
+ r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
true,
RADEON_GEM_DOMAIN_GTT,
false, &rdev->wb.wb_obj);
@@ -504,10 +373,6 @@ int r100_copy_blit(struct radeon_device *rdev,
return r;
}
-
-/*
- * CP
- */
static int r100_cp_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
@@ -612,6 +477,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
}
return err;
}
+
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -712,19 +578,19 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
indirect1_start = 16;
/* cp setup */
WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
- WREG32(RADEON_CP_RB_CNTL,
-#ifdef __BIG_ENDIAN
- RADEON_BUF_SWAP_32BIT |
-#endif
- REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
+ tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
REG_SET(RADEON_MAX_FETCH, max_fetch) |
RADEON_RB_NO_UPDATE);
+#ifdef __BIG_ENDIAN
+ tmp |= RADEON_BUF_SWAP_32BIT;
+#endif
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+
/* Set ring address */
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
/* Force read & write ptr to 0 */
- tmp = RREG32(RADEON_CP_RB_CNTL);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
@@ -978,7 +844,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
- reg = header >> 2;
+ reg = CP_PACKET0_GET_REG(header);
mutex_lock(&p->rdev->ddev->mode_config.mutex);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -1990,7 +1856,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
r100_pll_errata_after_data(rdev);
}
-int r100_init(struct radeon_device *rdev)
+void r100_set_safe_registers(struct radeon_device *rdev)
{
if (ASIC_IS_RN50(rdev)) {
rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
@@ -1999,9 +1865,8 @@ int r100_init(struct radeon_device *rdev)
rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
} else {
- return r200_init(rdev);
+ r200_set_safe_registers(rdev);
}
- return 0;
}
/*
@@ -2299,9 +2164,11 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mode1 = &rdev->mode_info.crtcs[0]->base.mode;
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
}
- if (rdev->mode_info.crtcs[1]->base.enabled) {
- mode2 = &rdev->mode_info.crtcs[1]->base.mode;
- pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
+ if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+ if (rdev->mode_info.crtcs[1]->base.enabled) {
+ mode2 = &rdev->mode_info.crtcs[1]->base.mode;
+ pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
+ }
}
min_mem_eff.full = rfixed_const_8(0);
@@ -2512,7 +2379,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the total latency for the display data.
*/
- disp_latency_overhead.full = rfixed_const(80);
+ disp_latency_overhead.full = rfixed_const(8);
disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
@@ -2710,8 +2577,11 @@ void r100_bandwidth_update(struct radeon_device *rdev)
static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
DRM_ERROR("pitch %d\n", t->pitch);
+ DRM_ERROR("use_pitch %d\n", t->use_pitch);
DRM_ERROR("width %d\n", t->width);
+ DRM_ERROR("width_11 %d\n", t->width_11);
DRM_ERROR("height %d\n", t->height);
+ DRM_ERROR("height_11 %d\n", t->height_11);
DRM_ERROR("num levels %d\n", t->num_levels);
DRM_ERROR("depth %d\n", t->txdepth);
DRM_ERROR("bpp %d\n", t->cpp);
@@ -2771,15 +2641,17 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
else
w = track->textures[u].pitch / (1 << i);
} else {
- w = track->textures[u].width / (1 << i);
+ w = track->textures[u].width;
if (rdev->family >= CHIP_RV515)
w |= track->textures[u].width_11;
+ w = w / (1 << i);
if (track->textures[u].roundup_w)
w = roundup_pow_of_two(w);
}
- h = track->textures[u].height / (1 << i);
+ h = track->textures[u].height;
if (rdev->family >= CHIP_RV515)
h |= track->textures[u].height_11;
+ h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
size += w * h;
@@ -3114,7 +2986,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
WREG32(R_000740_CP_CSQ_CNTL, 0);
/* Save few CRTC registers */
- save->GENMO_WT = RREG32(R_0003C0_GENMO_WT);
+ save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
@@ -3124,7 +2996,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
}
/* Disable VGA aperture access */
- WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT);
+ WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
/* Disable cursor, overlay, crtc */
WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
@@ -3156,10 +3028,264 @@ void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
rdev->mc.vram_location);
}
/* Restore CRTC registers */
- WREG32(R_0003C0_GENMO_WT, save->GENMO_WT);
+ WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
}
}
+
+void r100_vga_render_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG8(R_0003C2_GENMO_WT);
+ WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
+}
+
+static void r100_debugfs(struct radeon_device *rdev)
+{
+ int r;
+
+ r = r100_debugfs_mc_info_init(rdev);
+ if (r)
+ dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
+}
+
+static void r100_mc_program(struct radeon_device *rdev)
+{
+ struct r100_mc_save save;
+
+ /* Stops all mc clients */
+ r100_mc_stop(rdev, &save);
+ if (rdev->flags & RADEON_IS_AGP) {
+ WREG32(R_00014C_MC_AGP_LOCATION,
+ S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+ S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+ WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+ if (rdev->family > CHIP_RV200)
+ WREG32(R_00015C_AGP_BASE_2,
+ upper_32_bits(rdev->mc.agp_base) & 0xff);
+ } else {
+ WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
+ WREG32(R_000170_AGP_BASE, 0);
+ if (rdev->family > CHIP_RV200)
+ WREG32(R_00015C_AGP_BASE_2, 0);
+ }
+ /* Wait for mc idle */
+ if (r100_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
+ /* Program MC, should be a 32bits limited address space */
+ WREG32(R_000148_MC_FB_LOCATION,
+ S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+ S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+ r100_mc_resume(rdev, &save);
+}
+
+void r100_clock_startup(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (radeon_dynclks != -1 && radeon_dynclks)
+ radeon_legacy_set_clock_gating(rdev, 1);
+ /* We need to force on some of the block */
+ tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+ tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+ if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
+ tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
+ WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r100_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ r100_mc_program(rdev);
+ /* Resume clock */
+ r100_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+ r100_gpu_init(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+ /* Enable IRQ */
+ rdev->irq.sw_int = true;
+ r100_irq_set(rdev);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ return r;
+ }
+ r = r100_wb_init(rdev);
+ if (r)
+ dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+ r = r100_ib_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ return r;
+ }
+ return 0;
+}
+
+int r100_resume(struct radeon_device *rdev)
+{
+ /* Make sur GART are not working */
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ r100_clock_startup(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ radeon_combios_asic_init(rdev->ddev);
+ /* Resume clock after posting */
+ r100_clock_startup(rdev);
+ return r100_startup(rdev);
+}
+
+int r100_suspend(struct radeon_device *rdev)
+{
+ r100_cp_disable(rdev);
+ r100_wb_disable(rdev);
+ r100_irq_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
+ return 0;
+}
+
+void r100_fini(struct radeon_device *rdev)
+{
+ r100_suspend(rdev);
+ r100_cp_fini(rdev);
+ r100_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ radeon_gem_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_object_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+int r100_mc_init(struct radeon_device *rdev)
+{
+ int r;
+ u32 tmp;
+
+ /* Setup GPU memory space */
+ rdev->mc.vram_location = 0xFFFFFFFFUL;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ if (rdev->flags & RADEON_IS_IGP) {
+ tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
+ rdev->mc.vram_location = tmp << 16;
+ }
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ printk(KERN_WARNING "[drm] Disabling AGP\n");
+ rdev->flags &= ~RADEON_IS_AGP;
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ } else {
+ rdev->mc.gtt_location = rdev->mc.agp_base;
+ }
+ }
+ r = radeon_mc_setup(rdev);
+ if (r)
+ return r;
+ return 0;
+}
+
+int r100_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Register debugfs file specific to this group of asics */
+ r100_debugfs(rdev);
+ /* Disable VGA */
+ r100_vga_render_disable(rdev);
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* TODO: disable VGA need to use VGA request */
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+ return -EINVAL;
+ } else {
+ r = radeon_combios_init(rdev);
+ if (r)
+ return r;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (!radeon_card_posted(rdev) && rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ radeon_combios_asic_init(rdev->ddev);
+ }
+ /* Set asic errata */
+ r100_errata(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Get vram informations */
+ r100_vram_info(rdev);
+ /* Initialize memory controller (also test AGP) */
+ r = r100_mc_init(rdev);
+ if (r)
+ return r;
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_object_init(rdev);
+ if (r)
+ return r;
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_init(rdev);
+ if (r)
+ return r;
+ }
+ r100_set_safe_registers(rdev);
+ rdev->accel_working = true;
+ r = r100_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ r100_suspend(rdev);
+ r100_cp_fini(rdev);
+ r100_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index c4b257ec920..df29a630c46 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -381,6 +381,24 @@
#define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24)
#define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F)
#define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF
+#define R_000148_MC_FB_LOCATION 0x000148
+#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000148_MC_FB_START 0xFFFF0000
+#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000148_MC_FB_TOP 0x0000FFFF
+#define R_00014C_MC_AGP_LOCATION 0x00014C
+#define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
+#define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
+#define C_00014C_MC_AGP_START 0xFFFF0000
+#define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_00014C_MC_AGP_TOP 0x0000FFFF
+#define R_000170_AGP_BASE 0x000170
+#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_000170_AGP_BASE_ADDR 0x00000000
#define R_00023C_DISPLAY_BASE_ADDR 0x00023C
#define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
#define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
@@ -403,25 +421,25 @@
#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31)
#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1)
#define C_000360_CUR2_LOCK 0x7FFFFFFF
-#define R_0003C0_GENMO_WT 0x0003C0
-#define S_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0)
-#define G_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1)
-#define C_0003C0_GENMO_MONO_ADDRESS_B 0xFFFFFFFE
-#define S_0003C0_VGA_RAM_EN(x) (((x) & 0x1) << 1)
-#define G_0003C0_VGA_RAM_EN(x) (((x) >> 1) & 0x1)
-#define C_0003C0_VGA_RAM_EN 0xFFFFFFFD
-#define S_0003C0_VGA_CKSEL(x) (((x) & 0x3) << 2)
-#define G_0003C0_VGA_CKSEL(x) (((x) >> 2) & 0x3)
-#define C_0003C0_VGA_CKSEL 0xFFFFFFF3
-#define S_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5)
-#define G_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1)
-#define C_0003C0_ODD_EVEN_MD_PGSEL 0xFFFFFFDF
-#define S_0003C0_VGA_HSYNC_POL(x) (((x) & 0x1) << 6)
-#define G_0003C0_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1)
-#define C_0003C0_VGA_HSYNC_POL 0xFFFFFFBF
-#define S_0003C0_VGA_VSYNC_POL(x) (((x) & 0x1) << 7)
-#define G_0003C0_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1)
-#define C_0003C0_VGA_VSYNC_POL 0xFFFFFF7F
+#define R_0003C2_GENMO_WT 0x0003C0
+#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0)
+#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1)
+#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE
+#define S_0003C2_VGA_RAM_EN(x) (((x) & 0x1) << 1)
+#define G_0003C2_VGA_RAM_EN(x) (((x) >> 1) & 0x1)
+#define C_0003C2_VGA_RAM_EN 0xFD
+#define S_0003C2_VGA_CKSEL(x) (((x) & 0x3) << 2)
+#define G_0003C2_VGA_CKSEL(x) (((x) >> 2) & 0x3)
+#define C_0003C2_VGA_CKSEL 0xF3
+#define S_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5)
+#define G_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1)
+#define C_0003C2_ODD_EVEN_MD_PGSEL 0xDF
+#define S_0003C2_VGA_HSYNC_POL(x) (((x) & 0x1) << 6)
+#define G_0003C2_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1)
+#define C_0003C2_VGA_HSYNC_POL 0xBF
+#define S_0003C2_VGA_VSYNC_POL(x) (((x) & 0x1) << 7)
+#define G_0003C2_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1)
+#define C_0003C2_VGA_VSYNC_POL 0x7F
#define R_0003F8_CRTC2_GEN_CNTL 0x0003F8
#define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0)
#define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1)
@@ -545,6 +563,46 @@
#define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5)
#define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF)
#define C_000774_SCRATCH_ADDR 0x0000001F
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
#define R_000E40_RBBM_STATUS 0x000E40
#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
@@ -604,4 +662,53 @@
#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+
+#define R_00000D_SCLK_CNTL 0x00000D
+#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
+#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
+#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
+#define S_00000D_TCLK_SRC_SEL(x) (((x) & 0x7) << 8)
+#define G_00000D_TCLK_SRC_SEL(x) (((x) >> 8) & 0x7)
+#define C_00000D_TCLK_SRC_SEL 0xFFFFF8FF
+#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
+#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
+#define C_00000D_FORCE_CP 0xFFFEFFFF
+#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
+#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
+#define C_00000D_FORCE_HDP 0xFFFDFFFF
+#define S_00000D_FORCE_DISP(x) (((x) & 0x1) << 18)
+#define G_00000D_FORCE_DISP(x) (((x) >> 18) & 0x1)
+#define C_00000D_FORCE_DISP 0xFFFBFFFF
+#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
+#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
+#define C_00000D_FORCE_TOP 0xFFF7FFFF
+#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
+#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
+#define C_00000D_FORCE_E2 0xFFEFFFFF
+#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
+#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
+#define C_00000D_FORCE_SE 0xFFDFFFFF
+#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
+#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
+#define C_00000D_FORCE_IDCT 0xFFBFFFFF
+#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
+#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
+#define C_00000D_FORCE_VIP 0xFF7FFFFF
+#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
+#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
+#define C_00000D_FORCE_RE 0xFEFFFFFF
+#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
+#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
+#define C_00000D_FORCE_PB 0xFDFFFFFF
+#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
+#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
+#define C_00000D_FORCE_TAM 0xFBFFFFFF
+#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
+#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
+#define C_00000D_FORCE_TDM 0xF7FFFFFF
+#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
+#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
+#define C_00000D_FORCE_RB 0xEFFFFFFF
+
+
#endif
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index cf7fea5ff2e..eb740fc3549 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -447,9 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return 0;
}
-int r200_init(struct radeon_device *rdev)
+void r200_set_safe_registers(struct radeon_device *rdev)
{
rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
- return 0;
}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 1ebea8cc8c9..2f43ee8e404 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -33,43 +33,16 @@
#include "radeon_drm.h"
#include "r100_track.h"
#include "r300d.h"
-
+#include "rv350d.h"
#include "r300_reg_safe.h"
-/* r300,r350,rv350,rv370,rv380 depends on : */
-void r100_hdp_reset(struct radeon_device *rdev);
-int r100_cp_reset(struct radeon_device *rdev);
-int r100_rb2d_reset(struct radeon_device *rdev);
-int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
-int r100_pci_gart_enable(struct radeon_device *rdev);
-void r100_mc_setup(struct radeon_device *rdev);
-void r100_mc_disable_clients(struct radeon_device *rdev);
-int r100_gui_wait_for_idle(struct radeon_device *rdev);
-int r100_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx);
-int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
-int r100_cs_parse_packet0(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- const unsigned *auth, unsigned n,
- radeon_packet0_check_t check);
-int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- struct radeon_object *robj);
-
-/* This files gather functions specifics to:
- * r300,r350,rv350,rv370,rv380
- *
- * Some of these functions might be used by newer ASICs.
- */
-void r300_gpu_init(struct radeon_device *rdev);
-int r300_mc_wait_for_idle(struct radeon_device *rdev);
-int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
-
+/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */
/*
* rv370,rv380 PCIE GART
*/
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -140,7 +113,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
@@ -182,59 +155,6 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
radeon_gart_fini(rdev);
}
-/*
- * MC
- */
-int r300_mc_init(struct radeon_device *rdev)
-{
- int r;
-
- if (r100_debugfs_rbbm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for RBBM !\n");
- }
-
- r300_gpu_init(rdev);
- r100_pci_gart_disable(rdev);
- if (rdev->flags & RADEON_IS_PCIE) {
- rv370_pcie_gart_disable(rdev);
- }
-
- /* Setup GPU memory space */
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r) {
- printk(KERN_WARNING "[drm] Disabling AGP\n");
- rdev->flags &= ~RADEON_IS_AGP;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- } else {
- rdev->mc.gtt_location = rdev->mc.agp_base;
- }
- }
- r = radeon_mc_setup(rdev);
- if (r) {
- return r;
- }
-
- /* Program GPU memory space */
- r100_mc_disable_clients(rdev);
- if (r300_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait MC idle while "
- "programming pipes. Bad things might happen.\n");
- }
- r100_mc_setup(rdev);
- return 0;
-}
-
-void r300_mc_fini(struct radeon_device *rdev)
-{
-}
-
-
-/*
- * Fence emission
- */
void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
@@ -260,10 +180,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
-
-/*
- * Global GPU functions
- */
int r300_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -582,11 +498,6 @@ void r300_vram_info(struct radeon_device *rdev)
r100_vram_init_sizes(rdev);
}
-
-/*
- * PCIE Lanes
- */
-
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
{
uint32_t link_width_cntl, mask;
@@ -646,10 +557,6 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
}
-
-/*
- * Debugfs info
- */
#if defined(CONFIG_DEBUG_FS)
static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
{
@@ -680,7 +587,7 @@ static struct drm_info_list rv370_pcie_gart_info_list[] = {
};
#endif
-int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
@@ -689,10 +596,6 @@ int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
#endif
}
-
-/*
- * CS functions
- */
static int r300_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
@@ -1226,12 +1129,6 @@ void r300_set_reg_safe(struct radeon_device *rdev)
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
}
-int r300_init(struct radeon_device *rdev)
-{
- r300_set_reg_safe(rdev);
- return 0;
-}
-
void r300_mc_program(struct radeon_device *rdev)
{
struct r100_mc_save save;
@@ -1265,3 +1162,198 @@ void r300_mc_program(struct radeon_device *rdev)
S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
r100_mc_resume(rdev, &save);
}
+
+void r300_clock_startup(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (radeon_dynclks != -1 && radeon_dynclks)
+ radeon_legacy_set_clock_gating(rdev, 1);
+ /* We need to force on some of the block */
+ tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+ tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+ if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
+ tmp |= S_00000D_FORCE_VAP(1);
+ WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r300_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ r300_mc_program(rdev);
+ /* Resume clock */
+ r300_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+ r300_gpu_init(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ if (rdev->flags & RADEON_IS_PCIE) {
+ r = rv370_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+ /* Enable IRQ */
+ rdev->irq.sw_int = true;
+ r100_irq_set(rdev);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ return r;
+ }
+ r = r100_wb_init(rdev);
+ if (r)
+ dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+ r = r100_ib_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ return r;
+ }
+ return 0;
+}
+
+int r300_resume(struct radeon_device *rdev)
+{
+ /* Make sur GART are not working */
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ r300_clock_startup(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ radeon_combios_asic_init(rdev->ddev);
+ /* Resume clock after posting */
+ r300_clock_startup(rdev);
+ return r300_startup(rdev);
+}
+
+int r300_suspend(struct radeon_device *rdev)
+{
+ r100_cp_disable(rdev);
+ r100_wb_disable(rdev);
+ r100_irq_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_disable(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_disable(rdev);
+ return 0;
+}
+
+void r300_fini(struct radeon_device *rdev)
+{
+ r300_suspend(rdev);
+ r100_cp_fini(rdev);
+ r100_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ radeon_gem_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_object_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+int r300_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Disable VGA */
+ r100_vga_render_disable(rdev);
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* TODO: disable VGA need to use VGA request */
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+ return -EINVAL;
+ } else {
+ r = radeon_combios_init(rdev);
+ if (r)
+ return r;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (!radeon_card_posted(rdev) && rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ radeon_combios_asic_init(rdev->ddev);
+ }
+ /* Set asic errata */
+ r300_errata(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Get vram informations */
+ r300_vram_info(rdev);
+ /* Initialize memory controller (also test AGP) */
+ r = r420_mc_init(rdev);
+ if (r)
+ return r;
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_object_init(rdev);
+ if (r)
+ return r;
+ if (rdev->flags & RADEON_IS_PCIE) {
+ r = rv370_pcie_gart_init(rdev);
+ if (r)
+ return r;
+ }
+ if (rdev->flags & RADEON_IS_PCI) {
+ r = r100_pci_gart_init(rdev);
+ if (r)
+ return r;
+ }
+ r300_set_reg_safe(rdev);
+ rdev->accel_working = true;
+ r = r300_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ r300_suspend(rdev);
+ r100_cp_fini(rdev);
+ r100_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCIE)
+ rv370_pcie_gart_fini(rdev);
+ if (rdev->flags & RADEON_IS_PCI)
+ r100_pci_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index d4fa3eb1074..4c73114f0de 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -96,6 +96,211 @@
#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_000170_AGP_BASE_ADDR 0x00000000
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
+#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
+#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
+#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
+#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
+#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_VAP_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
+#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
+#define C_000E40_TIM_BUSY 0xFDFFFFFF
+#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
+#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
+#define C_000E40_GA_BUSY 0xFBFFFFFF
+#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
+#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
+#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+#define R_00000D_SCLK_CNTL 0x00000D
+#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
+#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
+#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
+#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3)
+#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1)
+#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7
+#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4)
+#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1)
+#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF
+#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5)
+#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1)
+#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF
+#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6)
+#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1)
+#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF
+#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7)
+#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1)
+#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F
+#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8)
+#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1)
+#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF
+#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9)
+#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1)
+#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF
+#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10)
+#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1)
+#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF
+#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11)
+#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1)
+#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF
+#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12)
+#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1)
+#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF
+#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13)
+#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1)
+#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF
+#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14)
+#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1)
+#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF
+#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15)
+#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1)
+#define C_00000D_FORCE_DISP2 0xFFFF7FFF
+#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
+#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
+#define C_00000D_FORCE_CP 0xFFFEFFFF
+#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
+#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
+#define C_00000D_FORCE_HDP 0xFFFDFFFF
+#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18)
+#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1)
+#define C_00000D_FORCE_DISP1 0xFFFBFFFF
+#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
+#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
+#define C_00000D_FORCE_TOP 0xFFF7FFFF
+#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
+#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
+#define C_00000D_FORCE_E2 0xFFEFFFFF
+#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
+#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
+#define C_00000D_FORCE_SE 0xFFDFFFFF
+#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
+#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
+#define C_00000D_FORCE_IDCT 0xFFBFFFFF
+#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
+#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
+#define C_00000D_FORCE_VIP 0xFF7FFFFF
+#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
+#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
+#define C_00000D_FORCE_RE 0xFEFFFFFF
+#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
+#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
+#define C_00000D_FORCE_PB 0xFDFFFFFF
+#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
+#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
+#define C_00000D_FORCE_TAM 0xFBFFFFFF
+#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
+#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
+#define C_00000D_FORCE_TDM 0xF7FFFFFF
+#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
+#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
+#define C_00000D_FORCE_RB 0xEFFFFFFF
+#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
+#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
+#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
+#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30)
+#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1)
+#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF
+#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
+#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
+#define C_00000D_FORCE_OV0 0x7FFFFFFF
+
#endif
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 49a2fdc57d2..1cefdbcc085 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -155,6 +155,9 @@ static void r420_debugfs(struct radeon_device *rdev)
static void r420_clock_resume(struct radeon_device *rdev)
{
u32 sclk_cntl;
+
+ if (radeon_dynclks != -1 && radeon_dynclks)
+ radeon_atom_set_clock_gating(rdev, 1);
sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
if (rdev->family == CHIP_R420)
@@ -167,6 +170,8 @@ static int r420_startup(struct radeon_device *rdev)
int r;
r300_mc_program(rdev);
+ /* Resume clock */
+ r420_clock_resume(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
if (rdev->flags & RADEON_IS_PCIE) {
@@ -267,7 +272,6 @@ int r420_init(struct radeon_device *rdev)
{
int r;
- rdev->new_init_path = true;
/* Initialize scratch registers */
radeon_scratch_init(rdev);
/* Initialize surface registers */
@@ -307,6 +311,8 @@ int r420_init(struct radeon_device *rdev)
}
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
r300_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
diff --git a/drivers/gpu/drm/radeon/r420d.h b/drivers/gpu/drm/radeon/r420d.h
index a48a7db1e2a..fc78d31a0b4 100644
--- a/drivers/gpu/drm/radeon/r420d.h
+++ b/drivers/gpu/drm/radeon/r420d.h
@@ -212,9 +212,9 @@
#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
#define C_00000D_FORCE_E2 0xFFEFFFFF
-#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
-#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
-#define C_00000D_FORCE_SE 0xFFDFFFFF
+#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21)
+#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1)
+#define C_00000D_FORCE_VAP 0xFFDFFFFF
#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
#define C_00000D_FORCE_IDCT 0xFFBFFFFF
@@ -224,24 +224,24 @@
#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
#define C_00000D_FORCE_RE 0xFEFFFFFF
-#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
-#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
-#define C_00000D_FORCE_PB 0xFDFFFFFF
+#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25)
+#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1)
+#define C_00000D_FORCE_SR 0xFDFFFFFF
#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26)
#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1)
#define C_00000D_FORCE_PX 0xFBFFFFFF
#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27)
#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1)
#define C_00000D_FORCE_TX 0xF7FFFFFF
-#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
-#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
-#define C_00000D_FORCE_RB 0xEFFFFFFF
+#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28)
+#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1)
+#define C_00000D_FORCE_US 0xEFFFFFFF
#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
-#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30)
-#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1)
-#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF
+#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30)
+#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1)
+#define C_00000D_FORCE_SU 0xBFFFFFFF
#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
#define C_00000D_FORCE_OV0 0x7FFFFFFF
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 868add6e166..7baa7395556 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -384,9 +384,16 @@
# define AVIVO_D1GRPH_TILED (1 << 20)
# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
+/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
+ * block and vice versa. This applies to GRPH, CUR, etc.
+ */
#define AVIVO_D1GRPH_LUT_SEL 0x6108
#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
#define AVIVO_D1GRPH_PITCH 0x6120
#define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124
#define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128
@@ -404,6 +411,8 @@
# define AVIVO_D1CURSOR_MODE_MASK (3 << 8)
# define AVIVO_D1CURSOR_MODE_24BPP 2
#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408
+#define R700_D1CUR_SURFACE_ADDRESS_HIGH 0x6c0c
+#define R700_D2CUR_SURFACE_ADDRESS_HIGH 0x640c
#define AVIVO_D1CUR_SIZE 0x6410
#define AVIVO_D1CUR_POSITION 0x6414
#define AVIVO_D1CUR_HOT_SPOT 0x6418
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 0bf13fccdaf..f7435185c0a 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -186,7 +186,7 @@ static int r520_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
rdev->irq.sw_int = true;
- r100_irq_set(rdev);
+ rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -228,7 +228,6 @@ int r520_init(struct radeon_device *rdev)
{
int r;
- rdev->new_init_path = true;
/* Initialize scratch registers */
radeon_scratch_init(rdev);
/* Initialize surface registers */
@@ -261,6 +260,8 @@ int r520_init(struct radeon_device *rdev)
}
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
r520_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2e4e60edbff..278f646bc18 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -65,16 +65,11 @@ MODULE_FIRMWARE("radeon/RV710_me.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
-/* This files gather functions specifics to:
- * r600,rv610,rv630,rv620,rv635,rv670
- *
- * Some of these functions might be used by newer ASICs.
- */
+/* r600,rv610,rv630,rv620,rv635,rv670 */
int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
-
/*
* R600 PCIE GART
*/
@@ -168,7 +163,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
@@ -225,6 +220,40 @@ void r600_pcie_gart_fini(struct radeon_device *rdev)
radeon_gart_fini(rdev);
}
+void r600_agp_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+ ENABLE_WAIT_L2_QUERY;
+ WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
+ WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+ WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+ WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
int r600_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
@@ -240,14 +269,9 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-static void r600_mc_resume(struct radeon_device *rdev)
+static void r600_mc_program(struct radeon_device *rdev)
{
- u32 d1vga_control, d2vga_control;
- u32 vga_render_control, vga_hdp_control;
- u32 d1crtc_control, d2crtc_control;
- u32 new_d1grph_primary, new_d1grph_secondary;
- u32 new_d2grph_primary, new_d2grph_secondary;
- u64 old_vram_start;
+ struct rv515_mc_save save;
u32 tmp;
int i, j;
@@ -261,85 +285,51 @@ static void r600_mc_resume(struct radeon_device *rdev)
}
WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
- d1vga_control = RREG32(D1VGA_CONTROL);
- d2vga_control = RREG32(D2VGA_CONTROL);
- vga_render_control = RREG32(VGA_RENDER_CONTROL);
- vga_hdp_control = RREG32(VGA_HDP_CONTROL);
- d1crtc_control = RREG32(D1CRTC_CONTROL);
- d2crtc_control = RREG32(D2CRTC_CONTROL);
- old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
- new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
- new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
- new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
- new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
- new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
- new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
- new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
- new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
-
- /* Stop all video */
- WREG32(D1VGA_CONTROL, 0);
- WREG32(D2VGA_CONTROL, 0);
- WREG32(VGA_RENDER_CONTROL, 0);
- WREG32(D1CRTC_UPDATE_LOCK, 1);
- WREG32(D2CRTC_UPDATE_LOCK, 1);
- WREG32(D1CRTC_CONTROL, 0);
- WREG32(D2CRTC_CONTROL, 0);
- WREG32(D1CRTC_UPDATE_LOCK, 0);
- WREG32(D2CRTC_UPDATE_LOCK, 0);
-
- mdelay(1);
+ rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "[drm] MC not idle !\n");
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
-
- /* Lockout access through VGA aperture*/
+ /* Lockout access through VGA aperture (doesn't exist before R600) */
WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
-
/* Update configuration */
- WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
- WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12);
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+ /* VRAM before AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.gtt_end >> 12);
+ } else {
+ /* VRAM after AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.gtt_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ } else {
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
+ }
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
- tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16;
+ tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
- WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
+ WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
if (rdev->flags & RADEON_IS_AGP) {
- WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16);
- WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+ WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
+ WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
} else {
WREG32(MC_VM_AGP_BASE, 0);
WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
}
- WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
- WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
- WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
- WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
- WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
-
- /* Unlock host access */
- WREG32(VGA_HDP_CONTROL, vga_hdp_control);
-
- mdelay(1);
if (r600_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "[drm] MC not idle !\n");
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
-
- /* Restore video state */
- WREG32(D1CRTC_UPDATE_LOCK, 1);
- WREG32(D2CRTC_UPDATE_LOCK, 1);
- WREG32(D1CRTC_CONTROL, d1crtc_control);
- WREG32(D2CRTC_CONTROL, d2crtc_control);
- WREG32(D1CRTC_UPDATE_LOCK, 0);
- WREG32(D2CRTC_UPDATE_LOCK, 0);
- WREG32(D1VGA_CONTROL, d1vga_control);
- WREG32(D2VGA_CONTROL, d2vga_control);
- WREG32(VGA_RENDER_CONTROL, vga_render_control);
-
+ rv515_mc_resume(rdev, &save);
/* we need to own VRAM, so turn off the VGA renderer here
* to stop it overwriting our objects */
rv515_vga_render_disable(rdev);
@@ -349,11 +339,10 @@ int r600_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u32 tmp;
- int chansize;
+ int chansize, numchan;
int r;
/* Get VRAM informations */
- rdev->mc.vram_width = 128;
rdev->mc.vram_is_ddr = true;
tmp = RREG32(RAMCFG);
if (tmp & CHANSIZE_OVERRIDE) {
@@ -363,17 +352,23 @@ int r600_mc_init(struct radeon_device *rdev)
} else {
chansize = 32;
}
- if (rdev->family == CHIP_R600) {
- rdev->mc.vram_width = 8 * chansize;
- } else if (rdev->family == CHIP_RV670) {
- rdev->mc.vram_width = 4 * chansize;
- } else if ((rdev->family == CHIP_RV610) ||
- (rdev->family == CHIP_RV620)) {
- rdev->mc.vram_width = chansize;
- } else if ((rdev->family == CHIP_RV630) ||
- (rdev->family == CHIP_RV635)) {
- rdev->mc.vram_width = 2 * chansize;
+ tmp = RREG32(CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
}
+ rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
@@ -414,40 +409,34 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
} else {
- if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
- rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
- 0xFFFF) << 24;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
- /* Enough place after vram */
- rdev->mc.gtt_location = tmp;
- } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
- /* Enough place before vram */
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
+ 0xFFFF) << 24;
+ tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
+ if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
+ /* Enough place after vram */
+ rdev->mc.gtt_location = tmp;
+ } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
+ /* Enough place before vram */
+ rdev->mc.gtt_location = 0;
+ } else {
+ /* Not enough place after or before shrink
+ * gart size
+ */
+ if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
rdev->mc.gtt_location = 0;
+ rdev->mc.gtt_size = rdev->mc.vram_location;
} else {
- /* Not enough place after or before shrink
- * gart size
- */
- if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
- rdev->mc.gtt_location = 0;
- rdev->mc.gtt_size = rdev->mc.vram_location;
- } else {
- rdev->mc.gtt_location = tmp;
- rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
- }
+ rdev->mc.gtt_location = tmp;
+ rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
}
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- } else {
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
+ rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size;
+ rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size;
+ rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
@@ -463,6 +452,7 @@ int r600_mc_init(struct radeon_device *rdev)
*/
int r600_gpu_soft_reset(struct radeon_device *rdev)
{
+ struct rv515_mc_save save;
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
@@ -480,13 +470,25 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
u32 srbm_reset = 0;
+ u32 tmp;
+ dev_info(rdev->dev, "GPU softreset \n");
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
+ RREG32(R_008010_GRBM_STATUS));
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
+ RREG32(R_008014_GRBM_STATUS2));
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
+ RREG32(R_000E50_SRBM_STATUS));
+ rv515_mc_stop(rdev, &save);
+ if (r600_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
/* Disable CP parsing/prefetching */
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
- WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) |
+ tmp = S_008020_SOFT_RESET_CR(1) |
S_008020_SOFT_RESET_DB(1) |
S_008020_SOFT_RESET_CB(1) |
S_008020_SOFT_RESET_PA(1) |
@@ -498,14 +500,18 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008020_SOFT_RESET_TC(1) |
S_008020_SOFT_RESET_TA(1) |
S_008020_SOFT_RESET_VC(1) |
- S_008020_SOFT_RESET_VGT(1));
+ S_008020_SOFT_RESET_VGT(1);
+ dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
udelay(50);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
}
/* Reset CP (we always reset CP) */
- WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1));
+ tmp = S_008020_SOFT_RESET_CP(1);
+ dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
udelay(50);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
@@ -533,6 +539,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
+ if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
+ srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
+ dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
+ WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
+ (void)RREG32(R_000E60_SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(R_000E60_SRBM_SOFT_RESET, 0);
+ (void)RREG32(R_000E60_SRBM_SOFT_RESET);
WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
udelay(50);
@@ -540,6 +554,17 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
+ RREG32(R_008010_GRBM_STATUS));
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
+ RREG32(R_008014_GRBM_STATUS2));
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
+ RREG32(R_000E50_SRBM_STATUS));
+ /* After reset we need to reinit the asic as GPU often endup in an
+ * incoherent state.
+ */
+ atom_asic_init(rdev->mode_info.atom_context);
+ rv515_mc_resume(rdev, &save);
return 0;
}
@@ -833,7 +858,8 @@ void r600_gpu_init(struct radeon_device *rdev)
((rdev->family) == CHIP_RV630) ||
((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
} else {
WREG32(DB_DEBUG, 0);
@@ -850,7 +876,8 @@ void r600_gpu_init(struct radeon_device *rdev)
tmp = RREG32(SQ_MS_FIFO_SIZES);
if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
tmp = (CACHE_FIFO_SIZE(0xa) |
FETCH_FIFO_HIWATER(0xa) |
DONE_FIFO_HIWATER(0xe0) |
@@ -893,7 +920,8 @@ void r600_gpu_init(struct radeon_device *rdev)
NUM_ES_STACK_ENTRIES(0));
} else if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
/* no vertex cache */
sq_config &= ~VC_ENABLE;
@@ -950,7 +978,8 @@ void r600_gpu_init(struct radeon_device *rdev)
if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
} else {
WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
@@ -976,8 +1005,9 @@ void r600_gpu_init(struct radeon_device *rdev)
tmp = rdev->config.r600.max_pipes * 16;
switch (rdev->family) {
case CHIP_RV610:
- case CHIP_RS780:
case CHIP_RV620:
+ case CHIP_RS780:
+ case CHIP_RS880:
tmp += 32;
break;
case CHIP_RV670:
@@ -1018,8 +1048,9 @@ void r600_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_RV610:
- case CHIP_RS780:
case CHIP_RV620:
+ case CHIP_RS780:
+ case CHIP_RS880:
tmp = TC_L2_SIZE(8);
break;
case CHIP_RV630:
@@ -1241,19 +1272,17 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
- WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE |
- (drm_order(4096/8) << 8) | rb_bufsz);
-#else
- WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz);
+ tmp |= BUF_SWAP_32BIT;
#endif
+ WREG32(CP_RB_CNTL, tmp);
WREG32(CP_SEM_WAIT_TIMER, 0x4);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
/* Initialize the ring buffer's read and write pointers */
- tmp = RREG32(CP_RB_CNTL);
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0);
@@ -1350,32 +1379,47 @@ int r600_ring_test(struct radeon_device *rdev)
return r;
}
-/*
- * Writeback
- */
-int r600_wb_init(struct radeon_device *rdev)
+void r600_wb_disable(struct radeon_device *rdev)
+{
+ WREG32(SCRATCH_UMSK, 0);
+ if (rdev->wb.wb_obj) {
+ radeon_object_kunmap(rdev->wb.wb_obj);
+ radeon_object_unpin(rdev->wb.wb_obj);
+ }
+}
+
+void r600_wb_fini(struct radeon_device *rdev)
+{
+ r600_wb_disable(rdev);
+ if (rdev->wb.wb_obj) {
+ radeon_object_unref(&rdev->wb.wb_obj);
+ rdev->wb.wb = NULL;
+ rdev->wb.wb_obj = NULL;
+ }
+}
+
+int r600_wb_enable(struct radeon_device *rdev)
{
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_object_create(rdev, NULL, 4096,
- true,
- RADEON_GEM_DOMAIN_GTT,
- false, &rdev->wb.wb_obj);
+ r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
if (r) {
- DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
return r;
}
- r = radeon_object_pin(rdev->wb.wb_obj,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
+ r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
if (r) {
- DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
+ r600_wb_fini(rdev);
return r;
}
r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
if (r) {
- DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
+ r600_wb_fini(rdev);
return r;
}
}
@@ -1386,21 +1430,6 @@ int r600_wb_init(struct radeon_device *rdev)
return 0;
}
-void r600_wb_fini(struct radeon_device *rdev)
-{
- if (rdev->wb.wb_obj) {
- radeon_object_kunmap(rdev->wb.wb_obj);
- radeon_object_unpin(rdev->wb.wb_obj);
- radeon_object_unref(&rdev->wb.wb_obj);
- rdev->wb.wb = NULL;
- rdev->wb.wb_obj = NULL;
- }
-}
-
-
-/*
- * CS
- */
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
@@ -1424,8 +1453,8 @@ int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
{
- r600_blit_prepare_copy(rdev, num_pages * 4096);
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096);
+ r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence);
return 0;
}
@@ -1477,11 +1506,14 @@ int r600_startup(struct radeon_device *rdev)
{
int r;
- r600_gpu_reset(rdev);
- r600_mc_resume(rdev);
- r = r600_pcie_gart_enable(rdev);
- if (r)
- return r;
+ r600_mc_program(rdev);
+ if (rdev->flags & RADEON_IS_AGP) {
+ r600_agp_enable(rdev);
+ } else {
+ r = r600_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
r600_gpu_init(rdev);
r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
@@ -1500,9 +1532,8 @@ int r600_startup(struct radeon_device *rdev)
r = r600_cp_resume(rdev);
if (r)
return r;
- r = r600_wb_init(rdev);
- if (r)
- return r;
+ /* write back buffer are not vital so don't worry about failure */
+ r600_wb_enable(rdev);
return 0;
}
@@ -1524,15 +1555,12 @@ int r600_resume(struct radeon_device *rdev)
{
int r;
- if (radeon_gpu_reset(rdev)) {
- /* FIXME: what do we want to do here ? */
- }
+ /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
/* post card */
- if (rdev->is_atom_bios) {
- atom_asic_init(rdev->mode_info.atom_context);
- } else {
- radeon_combios_asic_init(rdev->ddev);
- }
+ atom_asic_init(rdev->mode_info.atom_context);
/* Initialize clocks */
r = radeon_clocks_init(rdev);
if (r) {
@@ -1545,7 +1573,7 @@ int r600_resume(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_test(rdev);
+ r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
@@ -1553,13 +1581,12 @@ int r600_resume(struct radeon_device *rdev)
return r;
}
-
int r600_suspend(struct radeon_device *rdev)
{
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->cp.ready = false;
-
+ r600_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
/* unpin shaders bo */
radeon_object_unpin(rdev->r600_blit.shader_obj);
@@ -1576,7 +1603,6 @@ int r600_init(struct radeon_device *rdev)
{
int r;
- rdev->new_init_path = true;
r = radeon_dummy_page_init(rdev);
if (r)
return r;
@@ -1593,8 +1619,10 @@ int r600_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Must be an ATOMBIOS */
- if (!rdev->is_atom_bios)
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
return -EINVAL;
+ }
r = radeon_atombios_init(rdev);
if (r)
return r;
@@ -1607,24 +1635,20 @@ int r600_init(struct radeon_device *rdev)
r600_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
r = radeon_clocks_init(rdev);
if (r)
return r;
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
return r;
r = r600_mc_init(rdev);
- if (r) {
- if (rdev->flags & RADEON_IS_AGP) {
- /* Retry with disabling AGP */
- r600_fini(rdev);
- rdev->flags &= ~RADEON_IS_AGP;
- return r600_init(rdev);
- }
+ if (r)
return r;
- }
/* Memory manager */
r = radeon_object_init(rdev);
if (r)
@@ -1653,12 +1677,10 @@ int r600_init(struct radeon_device *rdev)
r = r600_startup(rdev);
if (r) {
- if (rdev->flags & RADEON_IS_AGP) {
- /* Retry with disabling AGP */
- r600_fini(rdev);
- rdev->flags &= ~RADEON_IS_AGP;
- return r600_init(rdev);
- }
+ r600_suspend(rdev);
+ r600_wb_fini(rdev);
+ radeon_ring_fini(rdev);
+ r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
@@ -1667,7 +1689,7 @@ int r600_init(struct radeon_device *rdev)
DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
- r = radeon_ib_test(rdev);
+ r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
rdev->accel_working = false;
@@ -1683,19 +1705,15 @@ void r600_fini(struct radeon_device *rdev)
r600_blit_fini(rdev);
radeon_ring_fini(rdev);
+ r600_wb_fini(rdev);
r600_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
-#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP)
radeon_agp_fini(rdev);
-#endif
radeon_object_fini(rdev);
- if (rdev->is_atom_bios)
- radeon_atombios_fini(rdev);
- else
- radeon_combios_fini(rdev);
+ radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index d988eece018..5ea43234758 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -774,11 +774,10 @@ r600_blit_swap(struct drm_device *dev,
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int cb_format, tex_format;
+ int sx2, sy2, dx2, dy2;
u64 vb_addr;
u32 *vb;
- vb = r600_nomm_get_vb_ptr(dev);
-
if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
r600_nomm_put_vb(dev);
@@ -787,19 +786,13 @@ r600_blit_swap(struct drm_device *dev,
return;
set_shaders(dev);
- vb = r600_nomm_get_vb_ptr(dev);
}
+ vb = r600_nomm_get_vb_ptr(dev);
- if (cpp == 4) {
- cb_format = COLOR_8_8_8_8;
- tex_format = FMT_8_8_8_8;
- } else if (cpp == 2) {
- cb_format = COLOR_5_6_5;
- tex_format = FMT_5_6_5;
- } else {
- cb_format = COLOR_8;
- tex_format = FMT_8;
- }
+ sx2 = sx + w;
+ sy2 = sy + h;
+ dx2 = dx + w;
+ dy2 = dy + h;
vb[0] = i2f(dx);
vb[1] = i2f(dy);
@@ -807,31 +800,46 @@ r600_blit_swap(struct drm_device *dev,
vb[3] = i2f(sy);
vb[4] = i2f(dx);
- vb[5] = i2f(dy + h);
+ vb[5] = i2f(dy2);
vb[6] = i2f(sx);
- vb[7] = i2f(sy + h);
+ vb[7] = i2f(sy2);
+
+ vb[8] = i2f(dx2);
+ vb[9] = i2f(dy2);
+ vb[10] = i2f(sx2);
+ vb[11] = i2f(sy2);
- vb[8] = i2f(dx + w);
- vb[9] = i2f(dy + h);
- vb[10] = i2f(sx + w);
- vb[11] = i2f(sy + h);
+ switch(cpp) {
+ case 4:
+ cb_format = COLOR_8_8_8_8;
+ tex_format = FMT_8_8_8_8;
+ break;
+ case 2:
+ cb_format = COLOR_5_6_5;
+ tex_format = FMT_5_6_5;
+ break;
+ default:
+ cb_format = COLOR_8;
+ tex_format = FMT_8;
+ break;
+ }
/* src */
set_tex_resource(dev_priv, tex_format,
src_pitch / cpp,
- sy + h, src_pitch / cpp,
+ sy2, src_pitch / cpp,
src_gpu_addr);
cp_set_surface_sync(dev_priv,
- R600_TC_ACTION_ENA, (src_pitch * (sy + h)), src_gpu_addr);
+ R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr);
/* dst */
set_render_target(dev_priv, cb_format,
- dst_pitch / cpp, dy + h,
+ dst_pitch / cpp, dy2,
dst_gpu_addr);
/* scissors */
- set_scissors(dev_priv, dx, dy, dx + w, dy + h);
+ set_scissors(dev_priv, dx, dy, dx2, dy2);
/* Vertex buffer setup */
vb_addr = dev_priv->gart_buffers_offset +
@@ -844,7 +852,7 @@ r600_blit_swap(struct drm_device *dev,
cp_set_surface_sync(dev_priv,
R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
- dst_pitch * (dy + h), dst_gpu_addr);
+ dst_pitch * dy2, dst_gpu_addr);
dev_priv->blit_vb->used += 12 * 4;
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index acae33e2ad5..dbf716e1fbf 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -368,7 +368,7 @@ set_default_state(struct radeon_device *rdev)
if ((rdev->family == CHIP_RV610) ||
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RS780) ||
- (rdev->family == CHIP_RS780) ||
+ (rdev->family == CHIP_RS880) ||
(rdev->family == CHIP_RV710))
sq_config = 0;
else
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d28970db6a2..0d820764f34 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -252,7 +252,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
- reg = header >> 2;
+ reg = CP_PACKET0_GET_REG(header);
mutex_lock(&p->rdev->ddev->mode_config.mutex);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
@@ -466,6 +466,23 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
switch (reg) {
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_FBUF_RING_BASE:
+ case SQ_REDUC_RING_BASE:
+ case SX_MEMORY_EXPORT_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
case CP_COHER_BASE:
/* use PACKET3_SURFACE_SYNC */
return -EINVAL;
@@ -487,6 +504,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
reg = start_reg + (4 * i);
switch (reg) {
case DB_DEPTH_BASE:
+ case DB_HTILE_DATA_BASE:
case CB_COLOR0_BASE:
case CB_COLOR1_BASE:
case CB_COLOR2_BASE:
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4a9028a85c9..27ab428b149 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -119,6 +119,7 @@
#define DB_DEBUG 0x9830
#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
#define DB_DEPTH_BASE 0x2800C
+#define DB_HTILE_DATA_BASE 0x28014
#define DB_WATERMARKS 0x9838
#define DEPTH_FREE(x) ((x) << 0)
#define DEPTH_FLUSH(x) ((x) << 5)
@@ -171,6 +172,14 @@
#define SQ_STACK_RESOURCE_MGMT_2 0x8c14
# define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
# define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_ESGS_RING_BASE 0x8c40
+#define SQ_GSVS_RING_BASE 0x8c48
+#define SQ_ESTMP_RING_BASE 0x8c50
+#define SQ_GSTMP_RING_BASE 0x8c58
+#define SQ_VSTMP_RING_BASE 0x8c60
+#define SQ_PSTMP_RING_BASE 0x8c68
+#define SQ_FBUF_RING_BASE 0x8c70
+#define SQ_REDUC_RING_BASE 0x8c78
#define GRBM_CNTL 0x8000
# define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -271,6 +280,10 @@
#define PCIE_PORT_INDEX 0x0038
#define PCIE_PORT_DATA 0x003C
+#define CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+
#define RAMCFG 0x2408
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000001
@@ -352,6 +365,7 @@
#define SX_MISC 0x28350
+#define SX_MEMORY_EXPORT_BASE 0x9010
#define SX_DEBUG_1 0x9054
#define SMX_EVENT_RELEASE (1 << 0)
#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
@@ -643,6 +657,7 @@
#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
+#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1)
#define R_000E60_SRBM_SOFT_RESET 0x0E60
#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
#define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 950b346e343..757f5cd3774 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -139,6 +139,10 @@ struct radeon_clock {
uint32_t default_sclk;
};
+/*
+ * Power management
+ */
+int radeon_pm_init(struct radeon_device *rdev);
/*
* Fences.
@@ -276,6 +280,8 @@ union radeon_gart_table {
struct radeon_gart_table_vram vram;
};
+#define RADEON_GPU_PAGE_SIZE 4096
+
struct radeon_gart {
dma_addr_t table_addr;
unsigned num_gpu_pages;
@@ -590,18 +596,8 @@ struct radeon_asic {
void (*fini)(struct radeon_device *rdev);
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
- void (*errata)(struct radeon_device *rdev);
- void (*vram_info)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
int (*gpu_reset)(struct radeon_device *rdev);
- int (*mc_init)(struct radeon_device *rdev);
- void (*mc_fini)(struct radeon_device *rdev);
- int (*wb_init)(struct radeon_device *rdev);
- void (*wb_fini)(struct radeon_device *rdev);
- int (*gart_init)(struct radeon_device *rdev);
- void (*gart_fini)(struct radeon_device *rdev);
- int (*gart_enable)(struct radeon_device *rdev);
- void (*gart_disable)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -611,7 +607,6 @@ struct radeon_asic {
void (*ring_start)(struct radeon_device *rdev);
int (*ring_test)(struct radeon_device *rdev);
void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
- int (*ib_test)(struct radeon_device *rdev);
int (*irq_set)(struct radeon_device *rdev);
int (*irq_process)(struct radeon_device *rdev);
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
@@ -632,7 +627,9 @@ struct radeon_asic {
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
+ uint32_t (*get_engine_clock)(struct radeon_device *rdev);
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+ uint32_t (*get_memory_clock)(struct radeon_device *rdev);
void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
@@ -789,12 +786,12 @@ struct radeon_device {
bool shutdown;
bool suspend;
bool need_dma32;
- bool new_init_path;
bool accel_working;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
struct r600_blit r600_blit;
+ int msi_enabled; /* msi enabled */
};
int radeon_device_init(struct radeon_device *rdev,
@@ -949,28 +946,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
-#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
-#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
-#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev))
-#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
-#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev))
-#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev))
-#define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev))
-#define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev))
-#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev))
-#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
-#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
-#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
-#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
-#define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev))
#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
@@ -978,7 +961,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
+#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
@@ -996,6 +981,7 @@ extern void radeon_clocks_fini(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
+extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
@@ -1031,11 +1017,27 @@ extern int r100_wb_init(struct radeon_device *rdev);
extern void r100_hdp_reset(struct radeon_device *rdev);
extern int r100_rb2d_reset(struct radeon_device *rdev);
extern int r100_cp_reset(struct radeon_device *rdev);
+extern void r100_vga_render_disable(struct radeon_device *rdev);
+extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ struct radeon_object *robj);
+extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ const unsigned *auth, unsigned n,
+ radeon_packet0_check_t check);
+extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx);
+
+/* rv200,rv250,rv280 */
+extern void r200_set_safe_registers(struct radeon_device *rdev);
/* r300,r350,rv350,rv370,rv380 */
extern void r300_set_reg_safe(struct radeon_device *rdev);
extern void r300_mc_program(struct radeon_device *rdev);
extern void r300_vram_info(struct radeon_device *rdev);
+extern void r300_clock_startup(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
extern int rv370_pcie_gart_init(struct radeon_device *rdev);
extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
@@ -1066,6 +1068,18 @@ extern void rv515_clock_startup(struct radeon_device *rdev);
extern void rv515_debugfs(struct radeon_device *rdev);
extern int rv515_suspend(struct radeon_device *rdev);
+/* rs400 */
+extern int rs400_gart_init(struct radeon_device *rdev);
+extern int rs400_gart_enable(struct radeon_device *rdev);
+extern void rs400_gart_adjust_size(struct radeon_device *rdev);
+extern void rs400_gart_disable(struct radeon_device *rdev);
+extern void rs400_gart_fini(struct radeon_device *rdev);
+
+/* rs600 */
+extern void rs600_set_safe_registers(struct radeon_device *rdev);
+extern int rs600_irq_set(struct radeon_device *rdev);
+extern void rs600_irq_disable(struct radeon_device *rdev);
+
/* rs690, rs740 */
extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1,
@@ -1083,8 +1097,9 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev);
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern int r600_ib_test(struct radeon_device *rdev);
extern int r600_ring_test(struct radeon_device *rdev);
-extern int r600_wb_init(struct radeon_device *rdev);
extern void r600_wb_fini(struct radeon_device *rdev);
+extern int r600_wb_enable(struct radeon_device *rdev);
+extern void r600_wb_disable(struct radeon_device *rdev);
extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c8a4e7b5663..c18fbee387d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -31,38 +31,30 @@
/*
* common functions
*/
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
/*
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
-int r100_init(struct radeon_device *rdev);
-int r200_init(struct radeon_device *rdev);
+extern int r100_init(struct radeon_device *rdev);
+extern void r100_fini(struct radeon_device *rdev);
+extern int r100_suspend(struct radeon_device *rdev);
+extern int r100_resume(struct radeon_device *rdev);
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-void r100_errata(struct radeon_device *rdev);
-void r100_vram_info(struct radeon_device *rdev);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
int r100_gpu_reset(struct radeon_device *rdev);
-int r100_mc_init(struct radeon_device *rdev);
-void r100_mc_fini(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
-int r100_wb_init(struct radeon_device *rdev);
-void r100_wb_fini(struct radeon_device *rdev);
-int r100_pci_gart_init(struct radeon_device *rdev);
-void r100_pci_gart_fini(struct radeon_device *rdev);
-int r100_pci_gart_enable(struct radeon_device *rdev);
-void r100_pci_gart_disable(struct radeon_device *rdev);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
-void r100_cp_fini(struct radeon_device *rdev);
-void r100_cp_disable(struct radeon_device *rdev);
void r100_cp_commit(struct radeon_device *rdev);
void r100_ring_start(struct radeon_device *rdev);
int r100_irq_set(struct radeon_device *rdev);
@@ -83,33 +75,21 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r100_ib_test(struct radeon_device *rdev);
int r100_ring_test(struct radeon_device *rdev);
static struct radeon_asic r100_asic = {
.init = &r100_init,
- .errata = &r100_errata,
- .vram_info = &r100_vram_info,
+ .fini = &r100_fini,
+ .suspend = &r100_suspend,
+ .resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r100_gpu_reset,
- .mc_init = &r100_mc_init,
- .mc_fini = &r100_mc_fini,
- .wb_init = &r100_wb_init,
- .wb_fini = &r100_wb_fini,
- .gart_init = &r100_pci_gart_init,
- .gart_fini = &r100_pci_gart_fini,
- .gart_enable = &r100_pci_gart_enable,
- .gart_disable = &r100_pci_gart_disable,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
.cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
- .ib_test = &r100_ib_test,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
@@ -118,7 +98,9 @@ static struct radeon_asic r100_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = NULL,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = NULL,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
@@ -131,55 +113,38 @@ static struct radeon_asic r100_asic = {
/*
* r300,r350,rv350,rv380
*/
-int r300_init(struct radeon_device *rdev);
-void r300_errata(struct radeon_device *rdev);
-void r300_vram_info(struct radeon_device *rdev);
-int r300_gpu_reset(struct radeon_device *rdev);
-int r300_mc_init(struct radeon_device *rdev);
-void r300_mc_fini(struct radeon_device *rdev);
-void r300_ring_start(struct radeon_device *rdev);
-void r300_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
-int r300_cs_parse(struct radeon_cs_parser *p);
-int rv370_pcie_gart_init(struct radeon_device *rdev);
-void rv370_pcie_gart_fini(struct radeon_device *rdev);
-int rv370_pcie_gart_enable(struct radeon_device *rdev);
-void rv370_pcie_gart_disable(struct radeon_device *rdev);
-void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
-int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
-void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
-int r300_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence);
-
+extern int r300_init(struct radeon_device *rdev);
+extern void r300_fini(struct radeon_device *rdev);
+extern int r300_suspend(struct radeon_device *rdev);
+extern int r300_resume(struct radeon_device *rdev);
+extern int r300_gpu_reset(struct radeon_device *rdev);
+extern void r300_ring_start(struct radeon_device *rdev);
+extern void r300_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+extern int r300_cs_parse(struct radeon_cs_parser *p);
+extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
+extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int r300_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence);
static struct radeon_asic r300_asic = {
.init = &r300_init,
- .errata = &r300_errata,
- .vram_info = &r300_vram_info,
+ .fini = &r300_fini,
+ .suspend = &r300_suspend,
+ .resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
- .mc_init = &r300_mc_init,
- .mc_fini = &r300_mc_fini,
- .wb_init = &r100_wb_init,
- .wb_fini = &r100_wb_fini,
- .gart_init = &r100_pci_gart_init,
- .gart_fini = &r100_pci_gart_fini,
- .gart_enable = &r100_pci_gart_enable,
- .gart_disable = &r100_pci_gart_disable,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
- .ib_test = &r100_ib_test,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
@@ -188,7 +153,9 @@ static struct radeon_asic r300_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = NULL,
.set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
@@ -209,26 +176,14 @@ static struct radeon_asic r420_asic = {
.fini = &r420_fini,
.suspend = &r420_suspend,
.resume = &r420_resume,
- .errata = NULL,
- .vram_info = NULL,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
- .mc_init = NULL,
- .mc_fini = NULL,
- .wb_init = NULL,
- .wb_fini = NULL,
- .gart_enable = NULL,
- .gart_disable = NULL,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_init = NULL,
- .cp_fini = NULL,
- .cp_disable = NULL,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
- .ib_test = NULL,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
@@ -237,7 +192,9 @@ static struct radeon_asic r420_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
@@ -250,42 +207,27 @@ static struct radeon_asic r420_asic = {
/*
* rs400,rs480
*/
-void rs400_errata(struct radeon_device *rdev);
-void rs400_vram_info(struct radeon_device *rdev);
-int rs400_mc_init(struct radeon_device *rdev);
-void rs400_mc_fini(struct radeon_device *rdev);
-int rs400_gart_init(struct radeon_device *rdev);
-void rs400_gart_fini(struct radeon_device *rdev);
-int rs400_gart_enable(struct radeon_device *rdev);
-void rs400_gart_disable(struct radeon_device *rdev);
+extern int rs400_init(struct radeon_device *rdev);
+extern void rs400_fini(struct radeon_device *rdev);
+extern int rs400_suspend(struct radeon_device *rdev);
+extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
static struct radeon_asic rs400_asic = {
- .init = &r300_init,
- .errata = &rs400_errata,
- .vram_info = &rs400_vram_info,
+ .init = &rs400_init,
+ .fini = &rs400_fini,
+ .suspend = &rs400_suspend,
+ .resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
- .mc_init = &rs400_mc_init,
- .mc_fini = &rs400_mc_fini,
- .wb_init = &r100_wb_init,
- .wb_fini = &r100_wb_fini,
- .gart_init = &rs400_gart_init,
- .gart_fini = &rs400_gart_fini,
- .gart_enable = &rs400_gart_enable,
- .gart_disable = &rs400_gart_disable,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
- .ib_test = &r100_ib_test,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
@@ -294,7 +236,9 @@ static struct radeon_asic rs400_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = NULL,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
@@ -307,18 +251,13 @@ static struct radeon_asic rs400_asic = {
/*
* rs600.
*/
-int rs600_init(struct radeon_device *rdev);
-void rs600_errata(struct radeon_device *rdev);
-void rs600_vram_info(struct radeon_device *rdev);
-int rs600_mc_init(struct radeon_device *rdev);
-void rs600_mc_fini(struct radeon_device *rdev);
+extern int rs600_init(struct radeon_device *rdev);
+extern void rs600_fini(struct radeon_device *rdev);
+extern int rs600_suspend(struct radeon_device *rdev);
+extern int rs600_resume(struct radeon_device *rdev);
int rs600_irq_set(struct radeon_device *rdev);
int rs600_irq_process(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
-int rs600_gart_init(struct radeon_device *rdev);
-void rs600_gart_fini(struct radeon_device *rdev);
-int rs600_gart_enable(struct radeon_device *rdev);
-void rs600_gart_disable(struct radeon_device *rdev);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -326,28 +265,17 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic rs600_asic = {
.init = &rs600_init,
- .errata = &rs600_errata,
- .vram_info = &rs600_vram_info,
+ .fini = &rs600_fini,
+ .suspend = &rs600_suspend,
+ .resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
- .mc_init = &rs600_mc_init,
- .mc_fini = &rs600_mc_fini,
- .wb_init = &r100_wb_init,
- .wb_fini = &r100_wb_fini,
- .gart_init = &rs600_gart_init,
- .gart_fini = &rs600_gart_fini,
- .gart_enable = &rs600_gart_enable,
- .gart_disable = &rs600_gart_disable,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
- .ib_test = &r100_ib_test,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
@@ -356,7 +284,9 @@ static struct radeon_asic rs600_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
@@ -367,37 +297,26 @@ static struct radeon_asic rs600_asic = {
/*
* rs690,rs740
*/
-void rs690_errata(struct radeon_device *rdev);
-void rs690_vram_info(struct radeon_device *rdev);
-int rs690_mc_init(struct radeon_device *rdev);
-void rs690_mc_fini(struct radeon_device *rdev);
+int rs690_init(struct radeon_device *rdev);
+void rs690_fini(struct radeon_device *rdev);
+int rs690_resume(struct radeon_device *rdev);
+int rs690_suspend(struct radeon_device *rdev);
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs690_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic rs690_asic = {
- .init = &rs600_init,
- .errata = &rs690_errata,
- .vram_info = &rs690_vram_info,
+ .init = &rs690_init,
+ .fini = &rs690_fini,
+ .suspend = &rs690_suspend,
+ .resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &r300_gpu_reset,
- .mc_init = &rs690_mc_init,
- .mc_fini = &rs690_mc_fini,
- .wb_init = &r100_wb_init,
- .wb_fini = &r100_wb_fini,
- .gart_init = &rs400_gart_init,
- .gart_fini = &rs400_gart_fini,
- .gart_enable = &rs400_gart_enable,
- .gart_disable = &rs400_gart_disable,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
- .cp_init = &r100_cp_init,
- .cp_fini = &r100_cp_fini,
- .cp_disable = &r100_cp_disable,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
- .ib_test = &r100_ib_test,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
@@ -406,7 +325,9 @@ static struct radeon_asic rs690_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r300_copy_dma,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
@@ -435,28 +356,14 @@ static struct radeon_asic rv515_asic = {
.fini = &rv515_fini,
.suspend = &rv515_suspend,
.resume = &rv515_resume,
- .errata = NULL,
- .vram_info = NULL,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &rv515_gpu_reset,
- .mc_init = NULL,
- .mc_fini = NULL,
- .wb_init = NULL,
- .wb_fini = NULL,
- .gart_init = &rv370_pcie_gart_init,
- .gart_fini = &rv370_pcie_gart_fini,
- .gart_enable = NULL,
- .gart_disable = NULL,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_init = NULL,
- .cp_fini = NULL,
- .cp_disable = NULL,
.cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
- .ib_test = NULL,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
@@ -465,7 +372,9 @@ static struct radeon_asic rv515_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
@@ -485,28 +394,14 @@ static struct radeon_asic r520_asic = {
.fini = &rv515_fini,
.suspend = &rv515_suspend,
.resume = &r520_resume,
- .errata = NULL,
- .vram_info = NULL,
.vga_set_state = &r100_vga_set_state,
.gpu_reset = &rv515_gpu_reset,
- .mc_init = NULL,
- .mc_fini = NULL,
- .wb_init = NULL,
- .wb_fini = NULL,
- .gart_init = NULL,
- .gart_fini = NULL,
- .gart_enable = NULL,
- .gart_disable = NULL,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_init = NULL,
- .cp_fini = NULL,
- .cp_disable = NULL,
.cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
- .ib_test = NULL,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
@@ -515,7 +410,9 @@ static struct radeon_asic r520_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
@@ -554,37 +451,23 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t offset, uint32_t obj_size);
int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r600_ib_test(struct radeon_device *rdev);
int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
static struct radeon_asic r600_asic = {
- .errata = NULL,
.init = &r600_init,
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
.cp_commit = &r600_cp_commit,
- .vram_info = NULL,
.vga_set_state = &r600_vga_set_state,
.gpu_reset = &r600_gpu_reset,
- .mc_init = NULL,
- .mc_fini = NULL,
- .wb_init = &r600_wb_init,
- .wb_fini = &r600_wb_fini,
- .gart_enable = NULL,
- .gart_disable = NULL,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
- .cp_init = NULL,
- .cp_fini = NULL,
- .cp_disable = NULL,
- .ring_start = NULL,
.ring_test = &r600_ring_test,
.ring_ib_execute = &r600_ring_ib_execute,
- .ib_test = &r600_ib_test,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
@@ -592,7 +475,9 @@ static struct radeon_asic r600_asic = {
.copy_blit = &r600_copy_blit,
.copy_dma = &r600_copy_blit,
.copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
@@ -611,30 +496,17 @@ int rv770_resume(struct radeon_device *rdev);
int rv770_gpu_reset(struct radeon_device *rdev);
static struct radeon_asic rv770_asic = {
- .errata = NULL,
.init = &rv770_init,
.fini = &rv770_fini,
.suspend = &rv770_suspend,
.resume = &rv770_resume,
.cp_commit = &r600_cp_commit,
- .vram_info = NULL,
.gpu_reset = &rv770_gpu_reset,
.vga_set_state = &r600_vga_set_state,
- .mc_init = NULL,
- .mc_fini = NULL,
- .wb_init = &r600_wb_init,
- .wb_fini = &r600_wb_fini,
- .gart_enable = NULL,
- .gart_disable = NULL,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
- .cp_init = NULL,
- .cp_fini = NULL,
- .cp_disable = NULL,
- .ring_start = NULL,
.ring_test = &r600_ring_test,
.ring_ib_execute = &r600_ring_ib_execute,
- .ib_test = &r600_ib_test,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
@@ -642,7 +514,9 @@ static struct radeon_asic rv770_asic = {
.copy_blit = &r600_copy_blit,
.copy_dma = &r600_copy_blit,
.copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5b6c08cee40..2ed88a82093 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -46,7 +46,8 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- bool linkb, uint32_t igp_lane_info);
+ bool linkb, uint32_t igp_lane_info,
+ uint16_t connector_object_id);
/* from radeon_legacy_encoder.c */
extern void
@@ -193,6 +194,23 @@ const int supported_devices_connector_convert[] = {
DRM_MODE_CONNECTOR_DisplayPort
};
+const uint16_t supported_devices_connector_object_id_convert[] = {
+ CONNECTOR_OBJECT_ID_NONE,
+ CONNECTOR_OBJECT_ID_VGA,
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D, /* not all boards support DL */
+ CONNECTOR_OBJECT_ID_VGA, /* technically DVI-A */
+ CONNECTOR_OBJECT_ID_COMPOSITE,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ CONNECTOR_OBJECT_ID_LVDS,
+ CONNECTOR_OBJECT_ID_9PIN_DIN,
+ CONNECTOR_OBJECT_ID_9PIN_DIN,
+ CONNECTOR_OBJECT_ID_DISPLAYPORT,
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A,
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B,
+ CONNECTOR_OBJECT_ID_SVIDEO
+};
+
const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DVII,
@@ -229,7 +247,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_OBJECT_HEADER *obj_header;
int i, j, path_size, device_support;
int connector_type;
- uint16_t igp_lane_info, conn_id;
+ uint16_t igp_lane_info, conn_id, connector_object_id;
bool linkb;
struct radeon_i2c_bus_rec ddc_bus;
@@ -277,7 +295,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_DEVICE_CV_SUPPORT)
continue;
- if ((rdev->family == CHIP_RS780) &&
+ /* IGP chips */
+ if ((rdev->flags & RADEON_IS_IGP) &&
(con_obj_id ==
CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) {
uint16_t igp_offset = 0;
@@ -311,6 +330,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
connector_type =
object_connector_convert
[ct];
+ connector_object_id = ct;
igp_lane_info =
slot_config & 0xffff;
} else
@@ -321,6 +341,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
igp_lane_info = 0;
connector_type =
object_connector_convert[con_obj_id];
+ connector_object_id = con_obj_id;
}
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -425,7 +446,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
le16_to_cpu(path->
usDeviceTag),
connector_type, &ddc_bus,
- linkb, igp_lane_info);
+ linkb, igp_lane_info,
+ connector_object_id);
}
}
@@ -435,6 +457,45 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
return true;
}
+static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
+ int connector_type,
+ uint16_t devices)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ return supported_devices_connector_object_id_convert
+ [connector_type];
+ } else if (((connector_type == DRM_MODE_CONNECTOR_DVII) ||
+ (connector_type == DRM_MODE_CONNECTOR_DVID)) &&
+ (devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, XTMDS_Info);
+ uint16_t size, data_offset;
+ uint8_t frev, crev;
+ ATOM_XTMDS_INFO *xtmds;
+
+ atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+ xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
+
+ if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+ else
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+ } else {
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ else
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+ }
+ } else {
+ return supported_devices_connector_object_id_convert
+ [connector_type];
+ }
+}
+
struct bios_connector {
bool valid;
uint16_t line_mux;
@@ -593,14 +654,20 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
/* add the connectors */
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
- if (bios_connectors[i].valid)
+ if (bios_connectors[i].valid) {
+ uint16_t connector_object_id =
+ atombios_get_connector_object_id(dev,
+ bios_connectors[i].connector_type,
+ bios_connectors[i].devices);
radeon_add_atom_connector(dev,
bios_connectors[i].line_mux,
bios_connectors[i].devices,
bios_connectors[i].
connector_type,
&bios_connectors[i].ddc_bus,
- false, 0);
+ false, 0,
+ connector_object_id);
+ }
}
radeon_link_encoder_connector(dev);
@@ -641,8 +708,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
le16_to_cpu(firmware_info->info.usReferenceClock);
p1pll->reference_div = 0;
- p1pll->pll_out_min =
- le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+ if (crev < 2)
+ p1pll->pll_out_min =
+ le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+ else
+ p1pll->pll_out_min =
+ le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
p1pll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
@@ -651,6 +722,16 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
p1pll->pll_out_min = 64800;
else
p1pll->pll_out_min = 20000;
+ } else if (p1pll->pll_out_min > 64800) {
+ /* Limiting the pll output range is a good thing generally as
+ * it limits the number of possible pll combinations for a given
+ * frequency presumably to the ones that work best on each card.
+ * However, certain duallink DVI monitors seem to like
+ * pll combinations that would be limited by this at least on
+ * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
+ * family.
+ */
+ p1pll->pll_out_min = 64800;
}
p1pll->pll_in_min =
@@ -767,6 +848,46 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
return false;
}
+static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
+ radeon_encoder
+ *encoder,
+ int id)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
+ uint16_t data_offset;
+ struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+ uint8_t frev, crev;
+ struct radeon_atom_ss *ss = NULL;
+
+ if (id > ATOM_MAX_SS_ENTRY)
+ return NULL;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+ &crev, &data_offset);
+
+ ss_info =
+ (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
+
+ if (ss_info) {
+ ss =
+ kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
+
+ if (!ss)
+ return NULL;
+
+ ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage);
+ ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType;
+ ss->step = ss_info->asSS_Info[id].ucSS_Step;
+ ss->delay = ss_info->asSS_Info[id].ucSS_Delay;
+ ss->range = ss_info->asSS_Info[id].ucSS_Range;
+ ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div;
+ }
+ return ss;
+}
+
union lvds_info {
struct _ATOM_LVDS_INFO info;
struct _ATOM_LVDS_INFO_V12 info_12;
@@ -798,27 +919,31 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
if (!lvds)
return NULL;
- lvds->native_mode.dotclock =
+ lvds->native_mode.clock =
le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
- lvds->native_mode.panel_xres =
+ lvds->native_mode.hdisplay =
le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
- lvds->native_mode.panel_yres =
+ lvds->native_mode.vdisplay =
le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
- lvds->native_mode.hblank =
- le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
- lvds->native_mode.hoverplus =
- le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
- lvds->native_mode.hsync_width =
- le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
- lvds->native_mode.vblank =
- le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
- lvds->native_mode.voverplus =
- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
- lvds->native_mode.vsync_width =
- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
+ lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
+ lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
+ lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
+ lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
le16_to_cpu(lvds_info->info.usOffDelayInMs);
lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+ /* set crtc values */
+ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
+ lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
encoder->native_mode = lvds->native_mode;
}
@@ -857,8 +982,7 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
}
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
- SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing,
- int32_t *pixel_clock)
+ struct drm_display_mode *mode)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
ATOM_ANALOG_TV_INFO *tv_info;
@@ -866,7 +990,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
ATOM_DTD_FORMAT *dtd_timings;
int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
u8 frev, crev;
- uint16_t data_offset;
+ u16 data_offset, misc;
atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset);
@@ -876,28 +1000,37 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
if (index > MAX_SUPPORTED_TV_TIMING)
return false;
- crtc_timing->usH_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
- crtc_timing->usH_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp);
- crtc_timing->usH_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart);
- crtc_timing->usH_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth);
-
- crtc_timing->usV_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total);
- crtc_timing->usV_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp);
- crtc_timing->usV_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart);
- crtc_timing->usV_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth);
-
- crtc_timing->susModeMiscInfo = tv_info->aModeTimings[index].susModeMiscInfo;
-
- crtc_timing->ucOverscanRight = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanRight);
- crtc_timing->ucOverscanLeft = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanLeft);
- crtc_timing->ucOverscanBottom = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanBottom);
- crtc_timing->ucOverscanTop = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanTop);
- *pixel_clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
+ mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
+ mode->crtc_hdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp);
+ mode->crtc_hsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart);
+ mode->crtc_hsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart) +
+ le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth);
+
+ mode->crtc_vtotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total);
+ mode->crtc_vdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp);
+ mode->crtc_vsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart);
+ mode->crtc_vsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart) +
+ le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth);
+
+ mode->flags = 0;
+ misc = le16_to_cpu(tv_info->aModeTimings[index].susModeMiscInfo.usAccess);
+ if (misc & ATOM_VSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (misc & ATOM_HSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (misc & ATOM_COMPOSITESYNC)
+ mode->flags |= DRM_MODE_FLAG_CSYNC;
+ if (misc & ATOM_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (misc & ATOM_DOUBLE_CLOCK_MODE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+ mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
if (index == 1) {
/* PAL timings appear to have wrong values for totals */
- crtc_timing->usH_Total -= 1;
- crtc_timing->usV_Total -= 1;
+ mode->crtc_htotal -= 1;
+ mode->crtc_vtotal -= 1;
}
break;
case 2:
@@ -906,17 +1039,36 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
return false;
dtd_timings = &tv_info_v1_2->aModeTimings[index];
- crtc_timing->usH_Total = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHBlanking_Time);
- crtc_timing->usH_Disp = le16_to_cpu(dtd_timings->usHActive);
- crtc_timing->usH_SyncStart = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHSyncOffset);
- crtc_timing->usH_SyncWidth = le16_to_cpu(dtd_timings->usHSyncWidth);
- crtc_timing->usV_Total = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVBlanking_Time);
- crtc_timing->usV_Disp = le16_to_cpu(dtd_timings->usVActive);
- crtc_timing->usV_SyncStart = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVSyncOffset);
- crtc_timing->usV_SyncWidth = le16_to_cpu(dtd_timings->usVSyncWidth);
-
- crtc_timing->susModeMiscInfo.usAccess = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess);
- *pixel_clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
+ mode->crtc_htotal = le16_to_cpu(dtd_timings->usHActive) +
+ le16_to_cpu(dtd_timings->usHBlanking_Time);
+ mode->crtc_hdisplay = le16_to_cpu(dtd_timings->usHActive);
+ mode->crtc_hsync_start = le16_to_cpu(dtd_timings->usHActive) +
+ le16_to_cpu(dtd_timings->usHSyncOffset);
+ mode->crtc_hsync_end = mode->crtc_hsync_start +
+ le16_to_cpu(dtd_timings->usHSyncWidth);
+
+ mode->crtc_vtotal = le16_to_cpu(dtd_timings->usVActive) +
+ le16_to_cpu(dtd_timings->usVBlanking_Time);
+ mode->crtc_vdisplay = le16_to_cpu(dtd_timings->usVActive);
+ mode->crtc_vsync_start = le16_to_cpu(dtd_timings->usVActive) +
+ le16_to_cpu(dtd_timings->usVSyncOffset);
+ mode->crtc_vsync_end = mode->crtc_vsync_start +
+ le16_to_cpu(dtd_timings->usVSyncWidth);
+
+ mode->flags = 0;
+ misc = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess);
+ if (misc & ATOM_VSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (misc & ATOM_HSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (misc & ATOM_COMPOSITESYNC)
+ mode->flags |= DRM_MODE_FLAG_CSYNC;
+ if (misc & ATOM_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (misc & ATOM_DOUBLE_CLOCK_MODE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+ mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
break;
}
return true;
@@ -981,6 +1133,24 @@ void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
+{
+ GET_ENGINE_CLOCK_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ return args.ulReturnEngineClock;
+}
+
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
+{
+ GET_MEMORY_CLOCK_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ return args.ulReturnMemoryClock;
+}
+
void radeon_atom_set_engine_clock(struct radeon_device *rdev,
uint32_t eng_clock)
{
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 2e938f7496f..10bd50a7db8 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence);
+ r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) {
goto out_cleanup;
}
@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence);
+ r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 96e37a6e7ce..906921740c6 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -33,12 +33,47 @@
/*
* BIOS.
*/
+
+/* If you boot an IGP board with a discrete card as the primary,
+ * the IGP rom is not accessible via the rom bar as the IGP rom is
+ * part of the system bios. On boot, the system bios puts a
+ * copy of the igp rom at the start of vram if a discrete card is
+ * present.
+ */
+static bool igp_read_bios_from_vram(struct radeon_device *rdev)
+{
+ uint8_t __iomem *bios;
+ resource_size_t vram_base;
+ resource_size_t size = 256 * 1024; /* ??? */
+
+ rdev->bios = NULL;
+ vram_base = drm_get_resource_start(rdev->ddev, 0);
+ bios = ioremap(vram_base, size);
+ if (!bios) {
+ return false;
+ }
+
+ if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ iounmap(bios);
+ return false;
+ }
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+ if (rdev->bios == NULL) {
+ iounmap(bios);
+ return false;
+ }
+ memcpy(rdev->bios, bios, size);
+ iounmap(bios);
+ return true;
+}
+
static bool radeon_read_bios(struct radeon_device *rdev)
{
uint8_t __iomem *bios;
size_t size;
rdev->bios = NULL;
+ /* XXX: some cards may return 0 for rom size? ddx has a workaround */
bios = pci_map_rom(rdev->pdev, &size);
if (!bios) {
return false;
@@ -341,7 +376,9 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
static bool radeon_read_disabled_bios(struct radeon_device *rdev)
{
- if (rdev->family >= CHIP_RV770)
+ if (rdev->flags & RADEON_IS_IGP)
+ return igp_read_bios_from_vram(rdev);
+ else if (rdev->family >= CHIP_RV770)
return r700_read_disabled_bios(rdev);
else if (rdev->family >= CHIP_R600)
return r600_read_disabled_bios(rdev);
@@ -356,7 +393,12 @@ bool radeon_get_bios(struct radeon_device *rdev)
bool r;
uint16_t tmp;
- r = radeon_read_bios(rdev);
+ if (rdev->flags & RADEON_IS_IGP) {
+ r = igp_read_bios_from_vram(rdev);
+ if (r == false)
+ r = radeon_read_bios(rdev);
+ } else
+ r = radeon_read_bios(rdev);
if (r == false) {
r = radeon_read_disabled_bios(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 152eef13197..a8135416762 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -32,7 +32,7 @@
#include "atom.h"
/* 10 khz */
-static uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
{
struct radeon_pll *spll = &rdev->clock.spll;
uint32_t fb_div, ref_div, post_div, sclk;
@@ -411,7 +411,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
R300_PIXCLK_TRANS_ALWAYS_ONb |
R300_PIXCLK_TVO_ALWAYS_ONb |
R300_P2G2CLK_ALWAYS_ONb |
- R300_P2G2CLK_ALWAYS_ONb);
+ R300_P2G2CLK_DAC_ALWAYS_ONb);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
} else if (rdev->family >= CHIP_RV350) {
tmp = RREG32_PLL(R300_SCLK_CNTL2);
@@ -464,7 +464,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
R300_PIXCLK_TRANS_ALWAYS_ONb |
R300_PIXCLK_TVO_ALWAYS_ONb |
R300_P2G2CLK_ALWAYS_ONb |
- R300_P2G2CLK_ALWAYS_ONb);
+ R300_P2G2CLK_DAC_ALWAYS_ONb);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
tmp = RREG32_PLL(RADEON_MCLK_MISC);
@@ -654,7 +654,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
R300_PIXCLK_TRANS_ALWAYS_ONb |
R300_PIXCLK_TVO_ALWAYS_ONb |
R300_P2G2CLK_ALWAYS_ONb |
- R300_P2G2CLK_ALWAYS_ONb |
+ R300_P2G2CLK_DAC_ALWAYS_ONb |
R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
} else if (rdev->family >= CHIP_RV350) {
@@ -705,7 +705,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
R300_PIXCLK_TRANS_ALWAYS_ONb |
R300_PIXCLK_TVO_ALWAYS_ONb |
R300_P2G2CLK_ALWAYS_ONb |
- R300_P2G2CLK_ALWAYS_ONb |
+ R300_P2G2CLK_DAC_ALWAYS_ONb |
R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 748265a105b..5253cbf6db1 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -49,7 +49,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t connector_id,
uint32_t supported_device,
int connector_type,
- struct radeon_i2c_bus_rec *i2c_bus);
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint16_t connector_object_id);
/* from radeon_legacy_encoder.c */
extern void
@@ -808,25 +809,25 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf;
if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
- lvds->native_mode.panel_yres =
+ lvds->native_mode.vdisplay =
((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
RADEON_VERT_PANEL_SHIFT) + 1;
else
- lvds->native_mode.panel_yres =
+ lvds->native_mode.vdisplay =
(RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1;
if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE)
- lvds->native_mode.panel_xres =
+ lvds->native_mode.hdisplay =
(((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >>
RADEON_HORZ_PANEL_SHIFT) + 1) * 8;
else
- lvds->native_mode.panel_xres =
+ lvds->native_mode.hdisplay =
((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8;
- if ((lvds->native_mode.panel_xres < 640) ||
- (lvds->native_mode.panel_yres < 480)) {
- lvds->native_mode.panel_xres = 640;
- lvds->native_mode.panel_yres = 480;
+ if ((lvds->native_mode.hdisplay < 640) ||
+ (lvds->native_mode.vdisplay < 480)) {
+ lvds->native_mode.hdisplay = 640;
+ lvds->native_mode.vdisplay = 480;
}
ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3;
@@ -846,8 +847,8 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
lvds->panel_vcc_delay = 200;
DRM_INFO("Panel info derived from registers\n");
- DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
- lvds->native_mode.panel_yres);
+ DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+ lvds->native_mode.vdisplay);
return lvds;
}
@@ -882,11 +883,11 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
DRM_INFO("Panel ID String: %s\n", stmp);
- lvds->native_mode.panel_xres = RBIOS16(lcd_info + 0x19);
- lvds->native_mode.panel_yres = RBIOS16(lcd_info + 0x1b);
+ lvds->native_mode.hdisplay = RBIOS16(lcd_info + 0x19);
+ lvds->native_mode.vdisplay = RBIOS16(lcd_info + 0x1b);
- DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
- lvds->native_mode.panel_yres);
+ DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+ lvds->native_mode.vdisplay);
lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
@@ -944,27 +945,25 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
if (tmp == 0)
break;
- if ((RBIOS16(tmp) == lvds->native_mode.panel_xres) &&
+ if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
(RBIOS16(tmp + 2) ==
- lvds->native_mode.panel_yres)) {
- lvds->native_mode.hblank =
- (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
- lvds->native_mode.hoverplus =
- (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) -
- 1) * 8;
- lvds->native_mode.hsync_width =
- RBIOS8(tmp + 23) * 8;
-
- lvds->native_mode.vblank = (RBIOS16(tmp + 24) -
- RBIOS16(tmp + 26));
- lvds->native_mode.voverplus =
- ((RBIOS16(tmp + 28) & 0x7ff) -
- RBIOS16(tmp + 26));
- lvds->native_mode.vsync_width =
- ((RBIOS16(tmp + 28) & 0xf800) >> 11);
- lvds->native_mode.dotclock =
- RBIOS16(tmp + 9) * 10;
+ lvds->native_mode.vdisplay)) {
+ lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8;
+ lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8;
+ lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) +
+ RBIOS16(tmp + 21)) * 8;
+
+ lvds->native_mode.vtotal = RBIOS16(tmp + 24);
+ lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff;
+ lvds->native_mode.vsync_end =
+ ((RBIOS16(tmp + 28) & 0xf800) >> 11) +
+ (RBIOS16(tmp + 28) & 0x7ff);
+
+ lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
lvds->native_mode.flags = 0;
+ /* set crtc values */
+ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
}
}
} else {
@@ -1178,7 +1177,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 0,
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
} else if (rdev->flags & RADEON_IS_MOBILITY) {
/* LVDS */
ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
@@ -1190,7 +1190,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 0,
ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
@@ -1202,7 +1203,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
} else {
/* DVI-I - tv dac, int tmds */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
@@ -1220,7 +1222,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
@@ -1232,7 +1235,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
}
if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
@@ -1245,7 +1249,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2,
ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
}
break;
case CT_IBOOK:
@@ -1259,7 +1264,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
0),
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
- DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
/* VGA - TV DAC */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
radeon_add_legacy_encoder(dev,
@@ -1268,7 +1274,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
- DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1277,7 +1284,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_POWERBOOK_EXTERNAL:
DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
@@ -1290,7 +1298,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
0),
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
- DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
/* DVI-I - primary dac, ext tmds */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
radeon_add_legacy_encoder(dev,
@@ -1303,10 +1312,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
+ /* XXX some are SL */
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1315,7 +1326,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_POWERBOOK_INTERNAL:
DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
@@ -1328,7 +1340,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
0),
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
- DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
/* DVI-I - primary dac, int tmds */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
radeon_add_legacy_encoder(dev,
@@ -1344,7 +1357,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1353,7 +1367,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_POWERBOOK_VGA:
DRM_INFO("Connector Table: %d (powerbook vga)\n",
@@ -1366,7 +1381,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
0),
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
- DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
radeon_add_legacy_encoder(dev,
@@ -1375,7 +1391,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1384,7 +1401,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_MINI_EXTERNAL:
DRM_INFO("Connector Table: %d (mini external tmds)\n",
@@ -1401,10 +1419,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
+ /* XXX are any DL? */
radeon_add_legacy_connector(dev, 0,
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
- DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1413,7 +1433,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_MINI_INTERNAL:
DRM_INFO("Connector Table: %d (mini internal tmds)\n",
@@ -1433,7 +1454,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 0,
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
- DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1442,7 +1464,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_IMAC_G5_ISIGHT:
DRM_INFO("Connector Table: %d (imac g5 isight)\n",
@@ -1455,7 +1478,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
- DRM_MODE_CONNECTOR_DVID, &ddc_i2c);
+ DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
/* VGA - tv dac */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
radeon_add_legacy_encoder(dev,
@@ -1464,7 +1488,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
- DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1473,7 +1498,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_EMAC:
DRM_INFO("Connector Table: %d (emac)\n",
@@ -1486,7 +1512,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
/* VGA - tv dac */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
radeon_add_legacy_encoder(dev,
@@ -1495,7 +1522,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
- DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1504,7 +1532,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
@@ -1581,11 +1610,63 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
return true;
}
+static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
+{
+ /* Acer 5102 has non-existent TV port */
+ if (dev->pdev->device == 0x5975 &&
+ dev->pdev->subsystem_vendor == 0x1025 &&
+ dev->pdev->subsystem_device == 0x009f)
+ return false;
+
+ /* HP dc5750 has non-existent TV port */
+ if (dev->pdev->device == 0x5974 &&
+ dev->pdev->subsystem_vendor == 0x103c &&
+ dev->pdev->subsystem_device == 0x280a)
+ return false;
+
+ return true;
+}
+
+static uint16_t combios_check_dl_dvi(struct drm_device *dev, int is_dvi_d)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t ext_tmds_info;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ }
+ ext_tmds_info = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (ext_tmds_info) {
+ uint8_t rev = RBIOS8(ext_tmds_info);
+ uint8_t flags = RBIOS8(ext_tmds_info + 4 + 5);
+ if (rev >= 3) {
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+ } else {
+ if (flags & 1) {
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+ }
+ }
+ }
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+}
+
bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
uint32_t conn_info, entry, devices;
- uint16_t tmp;
+ uint16_t tmp, connector_object_id;
enum radeon_combios_ddc ddc_type;
enum radeon_combios_connector connector;
int i = 0;
@@ -1628,8 +1709,9 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
break;
}
- radeon_apply_legacy_quirks(dev, i, &connector,
- &ddc_i2c);
+ if (!radeon_apply_legacy_quirks(dev, i, &connector,
+ &ddc_i2c))
+ continue;
switch (connector) {
case CONNECTOR_PROPRIETARY_LEGACY:
@@ -1644,7 +1726,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
radeon_add_legacy_connector(dev, i, devices,
legacy_connector_convert
[connector],
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
break;
case CONNECTOR_CRT_LEGACY:
if (tmp & 0x1) {
@@ -1669,7 +1752,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
devices,
legacy_connector_convert
[connector],
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
break;
case CONNECTOR_DVI_I_LEGACY:
devices = 0;
@@ -1698,6 +1782,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
+ connector_object_id = combios_check_dl_dvi(dev, 0);
} else {
devices |= ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
@@ -1706,19 +1791,24 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
+ connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
}
radeon_add_legacy_connector(dev,
i,
devices,
legacy_connector_convert
[connector],
- &ddc_i2c);
+ &ddc_i2c,
+ connector_object_id);
break;
case CONNECTOR_DVI_D_LEGACY:
- if ((tmp >> 4) & 0x1)
+ if ((tmp >> 4) & 0x1) {
devices = ATOM_DEVICE_DFP2_SUPPORT;
- else
+ connector_object_id = combios_check_dl_dvi(dev, 1);
+ } else {
devices = ATOM_DEVICE_DFP1_SUPPORT;
+ connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ }
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
(dev, devices, 0),
@@ -1726,7 +1816,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
radeon_add_legacy_connector(dev, i, devices,
legacy_connector_convert
[connector],
- &ddc_i2c);
+ &ddc_i2c,
+ connector_object_id);
break;
case CONNECTOR_CTV_LEGACY:
case CONNECTOR_STV_LEGACY:
@@ -1740,7 +1831,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT,
legacy_connector_convert
[connector],
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
default:
DRM_ERROR("Unknown connector type: %d\n",
@@ -1772,10 +1864,29 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT |
ATOM_DEVICE_DFP1_SUPPORT,
DRM_MODE_CONNECTOR_DVII,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
} else {
- DRM_DEBUG("No connector info found\n");
- return false;
+ uint16_t crt_info =
+ combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+ DRM_DEBUG("Found CRT table, assuming VGA connector\n");
+ if (crt_info) {
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ radeon_add_legacy_connector(dev,
+ 0,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
+ } else {
+ DRM_DEBUG("No connector info found\n");
+ return false;
+ }
}
}
@@ -1870,7 +1981,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
5,
ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
}
}
@@ -1880,16 +1992,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (tv_info) {
if (RBIOS8(tv_info + 6) == 'T') {
- radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
- (dev,
- ATOM_DEVICE_TV1_SUPPORT,
- 2),
- ATOM_DEVICE_TV1_SUPPORT);
- radeon_add_legacy_connector(dev, 6,
- ATOM_DEVICE_TV1_SUPPORT,
- DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ if (radeon_apply_legacy_tv_quirks(dev)) {
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 6,
+ ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
+ }
}
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index e376be47a4a..fce4c4087fd 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -178,25 +178,12 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *mode = NULL;
- struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
-
- if (native_mode->panel_xres != 0 &&
- native_mode->panel_yres != 0 &&
- native_mode->dotclock != 0) {
- mode = drm_mode_create(dev);
-
- mode->hdisplay = native_mode->panel_xres;
- mode->vdisplay = native_mode->panel_yres;
-
- mode->htotal = mode->hdisplay + native_mode->hblank;
- mode->hsync_start = mode->hdisplay + native_mode->hoverplus;
- mode->hsync_end = mode->hsync_start + native_mode->hsync_width;
- mode->vtotal = mode->vdisplay + native_mode->vblank;
- mode->vsync_start = mode->vdisplay + native_mode->voverplus;
- mode->vsync_end = mode->vsync_start + native_mode->vsync_width;
- mode->clock = native_mode->dotclock;
- mode->flags = 0;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ if (native_mode->hdisplay != 0 &&
+ native_mode->vdisplay != 0 &&
+ native_mode->clock != 0) {
+ mode = drm_mode_duplicate(dev, native_mode);
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -210,7 +197,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *mode = NULL;
- struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
int i;
struct mode_size {
int w;
@@ -236,11 +223,16 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
};
for (i = 0; i < 17; i++) {
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+ if (common_modes[i].w > 1024 ||
+ common_modes[i].h > 768)
+ continue;
+ }
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (common_modes[i].w > native_mode->panel_xres ||
- common_modes[i].h > native_mode->panel_yres ||
- (common_modes[i].w == native_mode->panel_xres &&
- common_modes[i].h == native_mode->panel_yres))
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
continue;
}
if (common_modes[i].w < 320 || common_modes[i].h < 200)
@@ -344,28 +336,23 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
/* Try to get native mode details from EDID if necessary */
- if (!native_mode->dotclock) {
+ if (!native_mode->clock) {
struct drm_display_mode *t, *mode;
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
- if (mode->hdisplay == native_mode->panel_xres &&
- mode->vdisplay == native_mode->panel_yres) {
- native_mode->hblank = mode->htotal - mode->hdisplay;
- native_mode->hoverplus = mode->hsync_start - mode->hdisplay;
- native_mode->hsync_width = mode->hsync_end - mode->hsync_start;
- native_mode->vblank = mode->vtotal - mode->vdisplay;
- native_mode->voverplus = mode->vsync_start - mode->vdisplay;
- native_mode->vsync_width = mode->vsync_end - mode->vsync_start;
- native_mode->dotclock = mode->clock;
+ if (mode->hdisplay == native_mode->hdisplay &&
+ mode->vdisplay == native_mode->vdisplay) {
+ *native_mode = *mode;
+ drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
DRM_INFO("Determined LVDS native mode details from EDID\n");
break;
}
}
}
- if (!native_mode->dotclock) {
+ if (!native_mode->clock) {
DRM_INFO("No LVDS native mode details, disabling RMX\n");
radeon_encoder->rmx_type = RMX_OFF;
}
@@ -410,13 +397,64 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
static int radeon_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+ if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+ return MODE_PANEL;
+
+ if (encoder) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+ /* AVIVO hardware supports downscaling modes larger than the panel
+ * to the panel size, but I'm not sure this is desirable.
+ */
+ if ((mode->hdisplay > native_mode->hdisplay) ||
+ (mode->vdisplay > native_mode->vdisplay))
+ return MODE_PANEL;
+
+ /* if scaling is disabled, block non-native modes */
+ if (radeon_encoder->rmx_type == RMX_OFF) {
+ if ((mode->hdisplay != native_mode->hdisplay) ||
+ (mode->vdisplay != native_mode->vdisplay))
+ return MODE_PANEL;
+ }
+ }
+
return MODE_OK;
}
static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
{
- enum drm_connector_status ret = connector_status_connected;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ enum drm_connector_status ret = connector_status_disconnected;
+
+ if (encoder) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+ /* check if panel is valid */
+ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+ ret = connector_status_connected;
+
+ }
+
+ /* check for edid as well */
+ if (radeon_connector->edid)
+ ret = connector_status_connected;
+ else {
+ if (radeon_connector->ddc_bus) {
+ radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ radeon_i2c_do_lock(radeon_connector, 0);
+ if (radeon_connector->edid)
+ ret = connector_status_connected;
+ }
+ }
/* check acpi lid status ??? */
+
radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
@@ -427,6 +465,8 @@ static void radeon_connector_destroy(struct drm_connector *connector)
if (radeon_connector->ddc_bus)
radeon_i2c_destroy(radeon_connector->ddc_bus);
+ if (radeon_connector->edid)
+ kfree(radeon_connector->edid);
kfree(radeon_connector->con_priv);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -496,6 +536,8 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
static int radeon_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ /* XXX check mode bandwidth */
+ /* XXX verify against max DAC output frequency */
return MODE_OK;
}
@@ -514,9 +556,32 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
radeon_i2c_do_lock(radeon_connector, 1);
dret = radeon_ddc_probe(radeon_connector);
radeon_i2c_do_lock(radeon_connector, 0);
- if (dret)
- ret = connector_status_connected;
- else {
+ if (dret) {
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+ radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_i2c_do_lock(radeon_connector, 0);
+
+ if (!radeon_connector->edid) {
+ DRM_ERROR("DDC responded but not EDID found for %s\n",
+ drm_get_connector_name(connector));
+ } else {
+ radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+ /* some oems have boards with separate digital and analog connectors
+ * with a shared ddc line (often vga + hdmi)
+ */
+ if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ } else
+ ret = connector_status_connected;
+ }
+ } else {
if (radeon_connector->dac_load_detect) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
@@ -570,6 +635,8 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
static int radeon_tv_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
+ return MODE_CLOCK_RANGE;
return MODE_OK;
}
@@ -644,6 +711,10 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
dret = radeon_ddc_probe(radeon_connector);
radeon_i2c_do_lock(radeon_connector, 0);
if (dret) {
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
radeon_i2c_do_lock(radeon_connector, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector, 0);
@@ -654,10 +725,15 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
} else {
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
- /* if this isn't a digital monitor
- then we need to make sure we don't have any
- TV conflicts */
- ret = connector_status_connected;
+ /* some oems have boards with separate digital and analog connectors
+ * with a shared ddc line (often vga + hdmi)
+ */
+ if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ } else
+ ret = connector_status_connected;
}
}
@@ -753,9 +829,27 @@ static void radeon_dvi_force(struct drm_connector *connector)
radeon_connector->use_digital = true;
}
+static int radeon_dvi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ /* XXX check mode bandwidth */
+
+ if (radeon_connector->use_digital && (mode->clock > 165000)) {
+ if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
+ (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
+ (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
+ return MODE_OK;
+ else
+ return MODE_CLOCK_HIGH;
+ }
+ return MODE_OK;
+}
+
struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
.get_modes = radeon_dvi_get_modes,
- .mode_valid = radeon_vga_mode_valid,
+ .mode_valid = radeon_dvi_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
@@ -775,13 +869,15 @@ radeon_add_atom_connector(struct drm_device *dev,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb,
- uint32_t igp_lane_info)
+ uint32_t igp_lane_info,
+ uint16_t connector_object_id)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *radeon_dig_connector;
uint32_t subpixel_order = SubPixelNone;
+ bool shared_ddc = false;
int ret;
/* fixme - tv/cv/din */
@@ -795,6 +891,13 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->devices |= supported_device;
return;
}
+ if (radeon_connector->ddc_bus && i2c_bus->valid) {
+ if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
+ sizeof(struct radeon_i2c_bus_rec)) == 0) {
+ radeon_connector->shared_ddc = true;
+ shared_ddc = true;
+ }
+ }
}
radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
@@ -805,6 +908,8 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->connector_id = connector_id;
radeon_connector->devices = supported_device;
+ radeon_connector->shared_ddc = shared_ddc;
+ radeon_connector->connector_object_id = connector_object_id;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -956,7 +1061,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t connector_id,
uint32_t supported_device,
int connector_type,
- struct radeon_i2c_bus_rec *i2c_bus)
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint16_t connector_object_id)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -985,6 +1091,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->connector_id = connector_id;
radeon_connector->devices = supported_device;
+ radeon_connector->connector_object_id = connector_object_id;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index b13c79e38bc..28772a37009 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -109,9 +109,15 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
+ else
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0);
+ }
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
- else {
+ } else {
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ec835d56d30..e3f9edfa40f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -322,10 +322,6 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_RV380:
rdev->asic = &r300_asic;
if (rdev->flags & RADEON_IS_PCIE) {
- rdev->asic->gart_init = &rv370_pcie_gart_init;
- rdev->asic->gart_fini = &rv370_pcie_gart_fini;
- rdev->asic->gart_enable = &rv370_pcie_gart_enable;
- rdev->asic->gart_disable = &rv370_pcie_gart_disable;
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
}
@@ -448,20 +444,24 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
return r;
}
-static struct card_info atom_card_info = {
- .dev = NULL,
- .reg_read = cail_reg_read,
- .reg_write = cail_reg_write,
- .mc_read = cail_mc_read,
- .mc_write = cail_mc_write,
- .pll_read = cail_pll_read,
- .pll_write = cail_pll_write,
-};
-
int radeon_atombios_init(struct radeon_device *rdev)
{
- atom_card_info.dev = rdev->ddev;
- rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
+ struct card_info *atom_card_info =
+ kzalloc(sizeof(struct card_info), GFP_KERNEL);
+
+ if (!atom_card_info)
+ return -ENOMEM;
+
+ rdev->mode_info.atom_card_info = atom_card_info;
+ atom_card_info->dev = rdev->ddev;
+ atom_card_info->reg_read = cail_reg_read;
+ atom_card_info->reg_write = cail_reg_write;
+ atom_card_info->mc_read = cail_mc_read;
+ atom_card_info->mc_write = cail_mc_write;
+ atom_card_info->pll_read = cail_pll_read;
+ atom_card_info->pll_write = cail_pll_write;
+
+ rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
return 0;
}
@@ -469,6 +469,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
void radeon_atombios_fini(struct radeon_device *rdev)
{
kfree(rdev->mode_info.atom_context);
+ kfree(rdev->mode_info.atom_card_info);
}
int radeon_combios_init(struct radeon_device *rdev)
@@ -485,7 +486,6 @@ void radeon_combios_fini(struct radeon_device *rdev)
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
{
struct radeon_device *rdev = cookie;
-
radeon_vga_set_state(rdev, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
@@ -493,6 +493,29 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
+
+void radeon_agp_disable(struct radeon_device *rdev)
+{
+ rdev->flags &= ~RADEON_IS_AGP;
+ if (rdev->family >= CHIP_R600) {
+ DRM_INFO("Forcing AGP to PCIE mode\n");
+ rdev->flags |= RADEON_IS_PCIE;
+ } else if (rdev->family >= CHIP_RV515 ||
+ rdev->family == CHIP_RV380 ||
+ rdev->family == CHIP_RV410 ||
+ rdev->family == CHIP_R423) {
+ DRM_INFO("Forcing AGP to PCIE mode\n");
+ rdev->flags |= RADEON_IS_PCIE;
+ rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+ } else {
+ DRM_INFO("Forcing AGP to PCI mode\n");
+ rdev->flags |= RADEON_IS_PCI;
+ rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart_set_page = &r100_pci_gart_set_page;
+ }
+}
+
/*
* Radeon device.
*/
@@ -531,32 +554,7 @@ int radeon_device_init(struct radeon_device *rdev,
}
if (radeon_agpmode == -1) {
- rdev->flags &= ~RADEON_IS_AGP;
- if (rdev->family >= CHIP_R600) {
- DRM_INFO("Forcing AGP to PCIE mode\n");
- rdev->flags |= RADEON_IS_PCIE;
- } else if (rdev->family >= CHIP_RV515 ||
- rdev->family == CHIP_RV380 ||
- rdev->family == CHIP_RV410 ||
- rdev->family == CHIP_R423) {
- DRM_INFO("Forcing AGP to PCIE mode\n");
- rdev->flags |= RADEON_IS_PCIE;
- rdev->asic->gart_init = &rv370_pcie_gart_init;
- rdev->asic->gart_fini = &rv370_pcie_gart_fini;
- rdev->asic->gart_enable = &rv370_pcie_gart_enable;
- rdev->asic->gart_disable = &rv370_pcie_gart_disable;
- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
- } else {
- DRM_INFO("Forcing AGP to PCI mode\n");
- rdev->flags |= RADEON_IS_PCI;
- rdev->asic->gart_init = &r100_pci_gart_init;
- rdev->asic->gart_fini = &r100_pci_gart_fini;
- rdev->asic->gart_enable = &r100_pci_gart_enable;
- rdev->asic->gart_disable = &r100_pci_gart_disable;
- rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
- rdev->asic->gart_set_page = &r100_pci_gart_set_page;
- }
+ radeon_agp_disable(rdev);
}
/* set DMA mask + need_dma32 flags.
@@ -588,111 +586,26 @@ int radeon_device_init(struct radeon_device *rdev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
- rdev->new_init_path = false;
- r = radeon_init(rdev);
- if (r) {
- return r;
- }
-
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
- r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- if (r) {
- return -EINVAL;
- }
+ /* this will fail for cards that aren't VGA class devices, just
+ * ignore it */
+ vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- if (!rdev->new_init_path) {
- /* Setup errata flags */
- radeon_errata(rdev);
- /* Initialize scratch registers */
- radeon_scratch_init(rdev);
- /* Initialize surface registers */
- radeon_surface_init(rdev);
-
- /* BIOS*/
- if (!radeon_get_bios(rdev)) {
- if (ASIC_IS_AVIVO(rdev))
- return -EINVAL;
- }
- if (rdev->is_atom_bios) {
- r = radeon_atombios_init(rdev);
- if (r) {
- return r;
- }
- } else {
- r = radeon_combios_init(rdev);
- if (r) {
- return r;
- }
- }
- /* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
- /* FIXME: what do we want to do here ? */
- }
- /* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- if (rdev->is_atom_bios) {
- atom_asic_init(rdev->mode_info.atom_context);
- } else {
- radeon_combios_asic_init(rdev->ddev);
- }
- }
- /* Get clock & vram information */
- radeon_get_clock_info(rdev->ddev);
- radeon_vram_info(rdev);
- /* Initialize clocks */
- r = radeon_clocks_init(rdev);
- if (r) {
- return r;
- }
+ r = radeon_init(rdev);
+ if (r)
+ return r;
- /* Initialize memory controller (also test AGP) */
- r = radeon_mc_init(rdev);
- if (r) {
- return r;
- }
- /* Fence driver */
- r = radeon_fence_driver_init(rdev);
- if (r) {
- return r;
- }
- r = radeon_irq_kms_init(rdev);
- if (r) {
- return r;
- }
- /* Memory manager */
- r = radeon_object_init(rdev);
- if (r) {
- return r;
- }
- r = radeon_gpu_gart_init(rdev);
+ if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
+ /* Acceleration not working on AGP card try again
+ * with fallback to PCI or PCIE GART
+ */
+ radeon_gpu_reset(rdev);
+ radeon_fini(rdev);
+ radeon_agp_disable(rdev);
+ r = radeon_init(rdev);
if (r)
return r;
- /* Initialize GART (initialize after TTM so we can allocate
- * memory through TTM but finalize after TTM) */
- r = radeon_gart_enable(rdev);
- if (r)
- return 0;
- r = radeon_gem_init(rdev);
- if (r)
- return 0;
-
- /* 1M ring buffer */
- r = radeon_cp_init(rdev, 1024 * 1024);
- if (r)
- return 0;
- r = radeon_wb_init(rdev);
- if (r)
- DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
- r = radeon_ib_pool_init(rdev);
- if (r)
- return 0;
- r = radeon_ib_test(rdev);
- if (r)
- return 0;
- rdev->accel_working = true;
}
- DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
if (radeon_testing) {
radeon_test_moves(rdev);
}
@@ -706,32 +619,8 @@ void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
- /* Order matter so becarefull if you rearrange anythings */
- if (!rdev->new_init_path) {
- radeon_ib_pool_fini(rdev);
- radeon_cp_fini(rdev);
- radeon_wb_fini(rdev);
- radeon_gpu_gart_fini(rdev);
- radeon_gem_fini(rdev);
- radeon_mc_fini(rdev);
-#if __OS_HAS_AGP
- radeon_agp_fini(rdev);
-#endif
- radeon_irq_kms_fini(rdev);
- vga_client_register(rdev->pdev, NULL, NULL, NULL);
- radeon_fence_driver_fini(rdev);
- radeon_clocks_fini(rdev);
- radeon_object_fini(rdev);
- if (rdev->is_atom_bios) {
- radeon_atombios_fini(rdev);
- } else {
- radeon_combios_fini(rdev);
- }
- kfree(rdev->bios);
- rdev->bios = NULL;
- } else {
- radeon_fini(rdev);
- }
+ radeon_fini(rdev);
+ vga_client_register(rdev->pdev, NULL, NULL, NULL);
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
}
@@ -771,14 +660,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
radeon_save_bios_scratch_regs(rdev);
- if (!rdev->new_init_path) {
- radeon_cp_disable(rdev);
- radeon_gart_disable(rdev);
- rdev->irq.sw_int = false;
- radeon_irq_set(rdev);
- } else {
- radeon_suspend(rdev);
- }
+ radeon_suspend(rdev);
/* evict remaining vram memory */
radeon_object_evict_vram(rdev);
@@ -797,7 +679,6 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
int radeon_resume_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
- int r;
acquire_console_sem();
pci_set_power_state(dev->pdev, PCI_D0);
@@ -807,43 +688,7 @@ int radeon_resume_kms(struct drm_device *dev)
return -1;
}
pci_set_master(dev->pdev);
- /* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (!rdev->new_init_path) {
- if (radeon_gpu_reset(rdev)) {
- /* FIXME: what do we want to do here ? */
- }
- /* post card */
- if (rdev->is_atom_bios) {
- atom_asic_init(rdev->mode_info.atom_context);
- } else {
- radeon_combios_asic_init(rdev->ddev);
- }
- /* Initialize clocks */
- r = radeon_clocks_init(rdev);
- if (r) {
- release_console_sem();
- return r;
- }
- /* Enable IRQ */
- rdev->irq.sw_int = true;
- radeon_irq_set(rdev);
- /* Initialize GPU Memory Controller */
- r = radeon_mc_init(rdev);
- if (r) {
- goto out;
- }
- r = radeon_gart_enable(rdev);
- if (r) {
- goto out;
- }
- r = radeon_cp_init(rdev, rdev->cp.ring_size);
- if (r) {
- goto out;
- }
- } else {
- radeon_resume(rdev);
- }
-out:
+ radeon_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
fb_set_suspend(rdev->fbdev_info, 0);
release_console_sem();
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5d8141b1376..c85df4afcb7 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -106,51 +106,44 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
legacy_crtc_load_lut(crtc);
}
-/** Sets the color ramps on behalf of RandR */
+/** Sets the color ramps on behalf of fbcon */
void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- if (regno == 0)
- DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id);
radeon_crtc->lut_r[regno] = red >> 6;
radeon_crtc->lut_g[regno] = green >> 6;
radeon_crtc->lut_b[regno] = blue >> 6;
}
+/** Gets the color ramps on behalf of fbcon */
+void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ *red = radeon_crtc->lut_r[regno] << 6;
+ *green = radeon_crtc->lut_g[regno] << 6;
+ *blue = radeon_crtc->lut_b[regno] << 6;
+}
+
static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int i, j;
+ int i;
if (size != 256) {
return;
}
- if (crtc->fb == NULL) {
- return;
- }
- if (crtc->fb->depth == 16) {
- for (i = 0; i < 64; i++) {
- if (i <= 31) {
- for (j = 0; j < 8; j++) {
- radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6;
- radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6;
- }
- }
- for (j = 0; j < 4; j++)
- radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6;
- }
- } else {
- for (i = 0; i < 256; i++) {
- radeon_crtc->lut_r[i] = red[i] >> 6;
- radeon_crtc->lut_g[i] = green[i] >> 6;
- radeon_crtc->lut_b[i] = blue[i] >> 6;
- }
+ /* userspace palettes are always correct as is */
+ for (i = 0; i < 256; i++) {
+ radeon_crtc->lut_r[i] = red[i] >> 6;
+ radeon_crtc->lut_g[i] = green[i] >> 6;
+ radeon_crtc->lut_b[i] = blue[i] >> 6;
}
-
radeon_crtc_load_lut(crtc);
}
@@ -341,27 +334,19 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
- struct edid *edid;
int ret = 0;
if (!radeon_connector->ddc_bus)
return -1;
if (!radeon_connector->edid) {
radeon_i2c_do_lock(radeon_connector, 1);
- edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
radeon_i2c_do_lock(radeon_connector, 0);
- } else
- edid = radeon_connector->edid;
+ }
- if (edid) {
- /* update digital bits here */
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- radeon_connector->use_digital = 1;
- else
- radeon_connector->use_digital = 0;
- drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
- ret = drm_add_edid_modes(&radeon_connector->base, edid);
- kfree(edid);
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
return ret;
}
drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
@@ -724,7 +709,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
if (ret) {
return ret;
}
- /* allocate crtcs - TODO single crtc */
+
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ num_crtc = 1;
+
+ /* allocate crtcs */
for (i = 0; i < num_crtc; i++) {
radeon_crtc_init(rdev->ddev, i);
}
@@ -764,7 +753,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
radeon_crtc->rmx_type = radeon_encoder->rmx_type;
memcpy(&radeon_crtc->native_mode,
&radeon_encoder->native_mode,
- sizeof(struct radeon_native_mode));
+ sizeof(struct drm_display_mode));
first = false;
} else {
if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
@@ -782,10 +771,10 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if (radeon_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
a.full = rfixed_const(crtc->mode.vdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.panel_xres);
+ b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
radeon_crtc->vsc.full = rfixed_div(a, b);
a.full = rfixed_const(crtc->mode.hdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.panel_yres);
+ b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
radeon_crtc->hsc.full = rfixed_div(a, b);
} else {
radeon_crtc->vsc.full = rfixed_const(1);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 621646752cd..d42bc512d75 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -31,6 +31,10 @@
extern int atom_debug;
+/* evil but including atombios.h is much worse */
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+ struct drm_display_mode *mode);
+
uint32_t
radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{
@@ -167,49 +171,17 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
-
- if (mode->hdisplay < native_mode->panel_xres ||
- mode->vdisplay < native_mode->panel_yres) {
- if (ASIC_IS_AVIVO(rdev)) {
- adjusted_mode->hdisplay = native_mode->panel_xres;
- adjusted_mode->vdisplay = native_mode->panel_yres;
- adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
- adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
- adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
- adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
- adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
- adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
- /* update crtc values */
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- /* adjust crtc values */
- adjusted_mode->crtc_hdisplay = native_mode->panel_xres;
- adjusted_mode->crtc_vdisplay = native_mode->panel_yres;
- adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
- adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
- adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
- adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
- adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
- adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
- } else {
- adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
- adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
- adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
- adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
- adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
- adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
- /* update crtc values */
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- /* adjust crtc values */
- adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
- adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
- adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
- adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
- adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
- adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+ if (mode->hdisplay < native_mode->hdisplay ||
+ mode->vdisplay < native_mode->vdisplay) {
+ int mode_id = adjusted_mode->base.id;
+ *adjusted_mode = *native_mode;
+ if (!ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->hdisplay = mode->hdisplay;
+ adjusted_mode->vdisplay = mode->vdisplay;
}
- adjusted_mode->flags = native_mode->flags;
- adjusted_mode->clock = native_mode->dotclock;
+ adjusted_mode->base.id = mode_id;
}
}
@@ -219,7 +191,11 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ /* set the active encoder to connector routing */
+ radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
if (radeon_encoder->rmx_type != RMX_OFF)
@@ -230,6 +206,18 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+ if (tv_dac) {
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M)
+ radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
+ else
+ radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
+ }
+ }
+
return true;
}
@@ -461,7 +449,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
case 1:
args.v1.ucMisc = 0;
args.v1.ucAction = action;
- if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
@@ -486,7 +474,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (dig->coherent_mode)
args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
}
- if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v2.ucTruncate = 0;
@@ -544,7 +532,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
@@ -554,7 +542,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -566,7 +554,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
/*if (radeon_output->MonType == MT_DP)
return ATOM_ENCODER_MODE_DP;
else*/
- if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -734,14 +722,17 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
args.v1.ucAction = action;
-
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ args.v1.usInitInfo = radeon_connector->connector_object_id;
+ } else {
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ else
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ }
if (ASIC_IS_DCE32(rdev)) {
- if (radeon_encoder->pixel_clock > 165000) {
- args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 2) / 100);
- args.v2.acConfig.fDualLinkConnector = 1;
- } else {
- args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 4) / 100);
- }
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
if (dig->dig_block)
args.v2.acConfig.ucEncoderSel = 1;
@@ -766,7 +757,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
}
} else {
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
- args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock) / 10);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -874,16 +864,9 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index = 0;
bool is_dig = false;
- int devices;
memset(&args, 0, sizeof(args));
- /* on DPMS off we have no idea if active device is meaningful */
- if (mode != DRM_MODE_DPMS_ON && !radeon_encoder->active_device)
- devices = radeon_encoder->devices;
- else
- devices = radeon_encoder->active_device;
-
DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
@@ -914,18 +897,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (devices & (ATOM_DEVICE_TV_SUPPORT))
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
- else if (devices & (ATOM_DEVICE_CV_SUPPORT))
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- if (devices & (ATOM_DEVICE_TV_SUPPORT))
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
- else if (devices & (ATOM_DEVICE_CV_SUPPORT))
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
@@ -1104,8 +1087,11 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
/* set scaler clears this on some chips */
- if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, AVIVO_D1MODE_INTERLEAVE_EN);
+ if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
+ if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+ AVIVO_D1MODE_INTERLEAVE_EN);
+ }
}
static void
@@ -1153,6 +1139,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
break;
@@ -1268,8 +1255,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
radeon_atom_output_lock(encoder, true);
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- radeon_encoder_set_active_device(encoder);
}
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
@@ -1345,6 +1330,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
@@ -1364,7 +1350,10 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
return;
encoder = &radeon_encoder->base;
- encoder->possible_crtcs = 0x3;
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ encoder->possible_crtcs = 0x1;
+ else
+ encoder->possible_crtcs = 0x3;
encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 1ba704eedef..b38c4c8e2c6 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -55,6 +55,7 @@ static struct fb_ops radeonfb_ops = {
.fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
};
/**
@@ -123,6 +124,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
+ .gamma_get = radeon_crtc_fb_gamma_get,
};
int radeonfb_create(struct drm_device *dev,
@@ -146,9 +148,15 @@ int radeonfb_create(struct drm_device *dev,
unsigned long tmp;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
+ int crtc_count;
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
+
+ /* avivo can't scanout real 24bpp */
+ if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+ surface_bpp = 32;
+
mode_cmd.bpp = surface_bpp;
/* need to align pitch with crtc limits */
mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
@@ -217,7 +225,11 @@ int radeonfb_create(struct drm_device *dev,
rfbdev = info->par;
rfbdev->helper.funcs = &radeon_fb_helper_funcs;
rfbdev->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2,
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ crtc_count = 1;
+ else
+ crtc_count = 2;
+ ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
RADEONFB_CONN_LIMIT);
if (ret)
goto out_unref;
@@ -234,7 +246,7 @@ int radeonfb_create(struct drm_device *dev,
strcpy(info->fix.id, "radeondrmfb");
- drm_fb_helper_fill_fix(info, fb->pitch);
+ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
@@ -309,7 +321,7 @@ int radeon_parse_options(char *options)
int radeonfb_probe(struct drm_device *dev)
{
- return drm_fb_helper_single_fb_probe(dev, &radeonfb_create);
+ return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
}
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a931af065dd..a68d7566178 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -140,15 +140,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
WARN(1, "trying to unbind memory to unitialized GART !\n");
return;
}
- t = offset / 4096;
- p = t / (PAGE_SIZE / 4096);
+ t = offset / RADEON_GPU_PAGE_SIZE;
+ p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = 0;
- for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, 0);
}
}
@@ -169,8 +169,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
DRM_ERROR("trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
- t = offset / 4096;
- p = t / (PAGE_SIZE / 4096);
+ t = offset / RADEON_GPU_PAGE_SIZE;
+ p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
/* we need to support large memory configurations */
@@ -185,9 +185,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
}
rdev->gart.pages[p] = pagelist[i];
page_base = rdev->gart.pages_addr[p];
- for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, page_base);
- page_base += 4096;
+ page_base += RADEON_GPU_PAGE_SIZE;
}
}
mb();
@@ -200,14 +200,14 @@ int radeon_gart_init(struct radeon_device *rdev)
if (rdev->gart.pages) {
return 0;
}
- /* We need PAGE_SIZE >= 4096 */
- if (PAGE_SIZE < 4096) {
+ /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
+ if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
/* Compute table size */
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
- rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096;
+ rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1841145a7c4..a0fe6232dcb 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -83,11 +83,22 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
int radeon_irq_kms_init(struct radeon_device *rdev)
{
int r = 0;
+ int num_crtc = 2;
- r = drm_vblank_init(rdev->ddev, 2);
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ num_crtc = 1;
+
+ r = drm_vblank_init(rdev->ddev, num_crtc);
if (r) {
return r;
}
+ /* enable msi */
+ rdev->msi_enabled = 0;
+ if (rdev->family >= CHIP_RV380) {
+ int ret = pci_enable_msi(rdev->pdev);
+ if (!ret)
+ rdev->msi_enabled = 1;
+ }
drm_irq_install(rdev->ddev);
rdev->irq.installed = true;
DRM_INFO("radeon: irq initialized.\n");
@@ -99,5 +110,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
if (rdev->irq.installed) {
rdev->irq.installed = false;
drm_irq_uninstall(rdev->ddev);
+ if (rdev->msi_enabled)
+ pci_disable_msi(rdev->pdev);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 2b997a15fb1..8d0b7aa87fa 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -48,7 +48,7 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active;
u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp;
u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp;
- struct radeon_native_mode *native_mode = &radeon_crtc->native_mode;
+ struct drm_display_mode *native_mode = &radeon_crtc->native_mode;
fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
(RADEON_VERT_STRETCH_RESERVED |
@@ -95,19 +95,19 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
fp_horz_vert_active = 0;
- if (native_mode->panel_xres == 0 ||
- native_mode->panel_yres == 0) {
+ if (native_mode->hdisplay == 0 ||
+ native_mode->vdisplay == 0) {
hscale = false;
vscale = false;
} else {
- if (xres > native_mode->panel_xres)
- xres = native_mode->panel_xres;
- if (yres > native_mode->panel_yres)
- yres = native_mode->panel_yres;
+ if (xres > native_mode->hdisplay)
+ xres = native_mode->hdisplay;
+ if (yres > native_mode->vdisplay)
+ yres = native_mode->vdisplay;
- if (xres == native_mode->panel_xres)
+ if (xres == native_mode->hdisplay)
hscale = false;
- if (yres == native_mode->panel_yres)
+ if (yres == native_mode->vdisplay)
vscale = false;
}
@@ -119,11 +119,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
else {
inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
- / native_mode->panel_xres + 1;
+ / native_mode->hdisplay + 1;
fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
RADEON_HORZ_STRETCH_BLEND |
RADEON_HORZ_STRETCH_ENABLE |
- ((native_mode->panel_xres/8-1) << 16));
+ ((native_mode->hdisplay/8-1) << 16));
}
if (!vscale)
@@ -131,11 +131,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
else {
inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
- / native_mode->panel_yres + 1;
+ / native_mode->vdisplay + 1;
fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
RADEON_VERT_STRETCH_ENABLE |
RADEON_VERT_STRETCH_BLEND |
- ((native_mode->panel_yres-1) << 12));
+ ((native_mode->vdisplay-1) << 12));
}
break;
case RMX_CENTER:
@@ -175,8 +175,8 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
? RADEON_CRTC_V_SYNC_POL
: 0)));
- fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) |
- (((native_mode->panel_xres / 8) & 0x1ff) << 16));
+ fp_horz_vert_active = (((native_mode->vdisplay) & 0xfff) |
+ (((native_mode->hdisplay / 8) & 0x1ff) << 16));
break;
case RMX_OFF:
default:
@@ -532,6 +532,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
radeon_fb = to_radeon_framebuffer(old_fb);
radeon_gem_object_unpin(radeon_fb->obj);
}
+
+ /* Bytes per pixel may have changed */
+ radeon_bandwidth_update(rdev);
+
return 0;
}
@@ -664,6 +668,9 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+
+ WREG32(RADEON_FP_H2_SYNC_STRT_WID, crtc_h_sync_strt_wid);
+ WREG32(RADEON_FP_V2_SYNC_STRT_WID, crtc_v_sync_strt_wid);
} else {
uint32_t crtc_gen_cntl;
uint32_t crtc_ext_cntl;
@@ -1015,14 +1022,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
/* TODO TV */
radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_set_crtc_timing(crtc, adjusted_mode);
radeon_set_pll(crtc, adjusted_mode);
- radeon_bandwidth_update(rdev);
if (radeon_crtc->crtc_id == 0) {
radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
} else {
@@ -1053,6 +1057,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.mode_set_base = radeon_crtc_set_base,
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
+ .load_lut = radeon_crtc_load_lut,
};
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index b1547f700d7..00382122869 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -107,8 +107,6 @@ static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
else
radeon_combios_output_lock(encoder, true);
radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- radeon_encoder_set_active_device(encoder);
}
static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
@@ -192,6 +190,8 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ /* set the active encoder to connector routing */
+ radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
if (radeon_encoder->rmx_type != RMX_OFF)
@@ -218,7 +218,8 @@ static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
+ /* set the active encoder to connector routing */
+ radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
@@ -272,7 +273,6 @@ static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
else
radeon_combios_output_lock(encoder, true);
radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
- radeon_encoder_set_active_device(encoder);
}
static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
@@ -468,7 +468,6 @@ static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
else
radeon_combios_output_lock(encoder, true);
radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
- radeon_encoder_set_active_device(encoder);
}
static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
@@ -543,6 +542,14 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+ fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
+ RADEON_FP_DFP_SYNC_SEL |
+ RADEON_FP_CRT_SYNC_SEL |
+ RADEON_FP_CRTC_LOCK_8DOT |
+ RADEON_FP_USE_SHADOW_EN |
+ RADEON_FP_CRTC_USE_SHADOW_VEND |
+ RADEON_FP_CRT_SYNC_ALT);
+
if (1) /* FIXME rgbBits == 8 */
fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
else
@@ -556,7 +563,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
else
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
} else
- fp_gen_cntl |= RADEON_FP_SEL_CRTC1;
+ fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
} else {
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
@@ -593,7 +600,8 @@ static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
+ /* set the active encoder to connector routing */
+ radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
@@ -636,7 +644,6 @@ static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
else
radeon_combios_output_lock(encoder, true);
radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
- radeon_encoder_set_active_device(encoder);
}
static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
@@ -735,7 +742,8 @@ static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
+ /* set the active encoder to connector routing */
+ radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
@@ -839,7 +847,6 @@ static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
else
radeon_combios_output_lock(encoder, true);
radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
- radeon_encoder_set_active_device(encoder);
}
static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
@@ -881,7 +888,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
R420_TV_DAC_DACADJ_MASK |
R420_TV_DAC_RDACPD |
R420_TV_DAC_GDACPD |
- R420_TV_DAC_GDACPD |
+ R420_TV_DAC_BDACPD |
R420_TV_DAC_TVENABLE);
} else {
tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
@@ -889,7 +896,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
RADEON_TV_DAC_DACADJ_MASK |
RADEON_TV_DAC_RDACPD |
RADEON_TV_DAC_GDACPD |
- RADEON_TV_DAC_GDACPD);
+ RADEON_TV_DAC_BDACPD);
}
/* FIXME TV */
@@ -1318,7 +1325,10 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
return;
encoder = &radeon_encoder->base;
- encoder->possible_crtcs = 0x3;
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ encoder->possible_crtcs = 0x1;
+ else
+ encoder->possible_crtcs = 0x3;
encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 570a58729da..ace726aa0d7 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -172,6 +172,7 @@ enum radeon_connector_table {
struct radeon_mode_info {
struct atom_context *atom_context;
+ struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[2];
@@ -186,17 +187,6 @@ struct radeon_mode_info {
};
-struct radeon_native_mode {
- /* preferred mode */
- uint32_t panel_xres, panel_yres;
- uint32_t hoverplus, hsync_width;
- uint32_t hblank;
- uint32_t voverplus, vsync_width;
- uint32_t vblank;
- uint32_t dotclock;
- uint32_t flags;
-};
-
#define MAX_H_CODE_TIMING_LEN 32
#define MAX_V_CODE_TIMING_LEN 32
@@ -228,7 +218,7 @@ struct radeon_crtc {
enum radeon_rmx_type rmx_type;
fixed20_12 vsc;
fixed20_12 hsc;
- struct radeon_native_mode native_mode;
+ struct drm_display_mode native_mode;
};
struct radeon_encoder_primary_dac {
@@ -248,7 +238,7 @@ struct radeon_encoder_lvds {
bool use_bios_dividers;
uint32_t lvds_gen_cntl;
/* panel mode */
- struct radeon_native_mode native_mode;
+ struct drm_display_mode native_mode;
};
struct radeon_encoder_tv_dac {
@@ -271,6 +261,16 @@ struct radeon_encoder_int_tmds {
struct radeon_tmds_pll tmds_pll[4];
};
+/* spread spectrum */
+struct radeon_atom_ss {
+ uint16_t percentage;
+ uint8_t type;
+ uint8_t step;
+ uint8_t delay;
+ uint8_t range;
+ uint8_t refdiv;
+};
+
struct radeon_encoder_atom_dig {
/* atom dig */
bool coherent_mode;
@@ -278,8 +278,9 @@ struct radeon_encoder_atom_dig {
/* atom lvds */
uint32_t lvds_misc;
uint16_t panel_pwr_delay;
+ struct radeon_atom_ss *ss;
/* panel mode */
- struct radeon_native_mode native_mode;
+ struct drm_display_mode native_mode;
};
struct radeon_encoder_atom_dac {
@@ -294,7 +295,7 @@ struct radeon_encoder {
uint32_t flags;
uint32_t pixel_clock;
enum radeon_rmx_type rmx_type;
- struct radeon_native_mode native_mode;
+ struct drm_display_mode native_mode;
void *enc_priv;
};
@@ -308,12 +309,15 @@ struct radeon_connector {
uint32_t connector_id;
uint32_t devices;
struct radeon_i2c_chan *ddc_bus;
+ /* some systems have a an hdmi and vga port with a shared ddc line */
+ bool shared_ddc;
bool use_digital;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
void *con_priv;
bool dac_load_detect;
+ uint16_t connector_object_id;
};
struct radeon_framebuffer {
@@ -407,6 +411,8 @@ extern void
radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
+extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 73af463b7a5..1f056dadc5c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -400,11 +400,9 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
int radeon_object_list_reserve(struct list_head *head)
{
struct radeon_object_list *lobj;
- struct list_head *i;
int r;
- list_for_each(i, head) {
- lobj = list_entry(i, struct radeon_object_list, list);
+ list_for_each_entry(lobj, head, list){
if (!lobj->robj->pin_count) {
r = radeon_object_reserve(lobj->robj, true);
if (unlikely(r != 0)) {
@@ -420,13 +418,10 @@ int radeon_object_list_reserve(struct list_head *head)
void radeon_object_list_unreserve(struct list_head *head)
{
struct radeon_object_list *lobj;
- struct list_head *i;
- list_for_each(i, head) {
- lobj = list_entry(i, struct radeon_object_list, list);
+ list_for_each_entry(lobj, head, list) {
if (!lobj->robj->pin_count) {
radeon_object_unreserve(lobj->robj);
- } else {
}
}
}
@@ -436,7 +431,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
struct radeon_object_list *lobj;
struct radeon_object *robj;
struct radeon_fence *old_fence = NULL;
- struct list_head *i;
int r;
r = radeon_object_list_reserve(head);
@@ -444,8 +438,7 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
radeon_object_list_unreserve(head);
return r;
}
- list_for_each(i, head) {
- lobj = list_entry(i, struct radeon_object_list, list);
+ list_for_each_entry(lobj, head, list) {
robj = lobj->robj;
if (!robj->pin_count) {
if (lobj->wdomain) {
@@ -482,10 +475,8 @@ void radeon_object_list_unvalidate(struct list_head *head)
{
struct radeon_object_list *lobj;
struct radeon_fence *old_fence = NULL;
- struct list_head *i;
- list_for_each(i, head) {
- lobj = list_entry(i, struct radeon_object_list, list);
+ list_for_each_entry(lobj, head, list) {
old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
lobj->robj->tobj.sync_obj = NULL;
if (old_fence) {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
new file mode 100644
index 00000000000..46146c6a2a0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -0,0 +1,65 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rafał Miłecki <zajec5@gmail.com>
+ */
+#include "drmP.h"
+#include "radeon.h"
+
+int radeon_debugfs_pm_init(struct radeon_device *rdev);
+
+int radeon_pm_init(struct radeon_device *rdev)
+{
+ if (radeon_debugfs_pm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for CP !\n");
+ }
+
+ return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev));
+
+ return 0;
+}
+
+static struct drm_info_list radeon_pm_info_list[] = {
+ {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
+};
+#endif
+
+int radeon_debugfs_pm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index bfa1ab9c93e..29ab75903ec 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -290,6 +290,8 @@
#define RADEON_BUS_CNTL 0x0030
# define RADEON_BUS_MASTER_DIS (1 << 6)
# define RADEON_BUS_BIOS_DIS_ROM (1 << 12)
+# define RS600_BUS_MASTER_DIS (1 << 14)
+# define RS600_MSI_REARM (1 << 20) /* rs600/rs690/rs740 */
# define RADEON_BUS_RD_DISCARD_EN (1 << 24)
# define RADEON_BUS_RD_ABORT_EN (1 << 25)
# define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
@@ -297,6 +299,9 @@
# define RADEON_BUS_READ_BURST (1 << 30)
#define RADEON_BUS_CNTL1 0x0034
# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
+/* rv370/rv380, rv410, r423/r430/r480, r5xx */
+#define RADEON_MSI_REARM_EN 0x0160
+# define RV370_MSI_REARM_EN (1 << 0)
/* #define RADEON_PCIE_INDEX 0x0030 */
/* #define RADEON_PCIE_DATA 0x0034 */
@@ -3311,6 +3316,7 @@
#define RADEON_AIC_CNTL 0x01d0
# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
# define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
+# define RS400_MSI_REARM (1 << 3) /* rs400/rs480 */
#define RADEON_AIC_LO_ADDR 0x01dc
#define RADEON_AIC_PT_BASE 0x01d8
#define RADEON_AIC_HI_ADDR 0x01e0
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 03c33cf4e14..f8a465d9a1c 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -42,7 +42,7 @@ void radeon_test_moves(struct radeon_device *rdev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffer) / test size
*/
- n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 -
+ n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
rdev->cp.ring_size) / size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
@@ -102,7 +102,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence);
+ r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
goto out_cleanup;
@@ -145,7 +145,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence);
+ r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
goto out_cleanup;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 765bd184b6f..1381e06d6af 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -295,6 +295,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
+
+ r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+
r = ttm_tt_bind(bo->ttm, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
diff --git a/drivers/gpu/drm/radeon/rs100d.h b/drivers/gpu/drm/radeon/rs100d.h
new file mode 100644
index 00000000000..48a913a06cf
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs100d.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RS100D_H__
+#define __RS100D_H__
+
+/* Registers */
+#define R_00015C_NB_TOM 0x00015C
+#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_00015C_MC_FB_START 0xFFFF0000
+#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_00015C_MC_FB_TOP 0x0000FFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index a3fbdad938c..ca037160a58 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -27,27 +27,12 @@
*/
#include <linux/seq_file.h>
#include <drm/drmP.h>
-#include "radeon_reg.h"
#include "radeon.h"
+#include "rs400d.h"
-/* rs400,rs480 depends on : */
-void r100_hdp_reset(struct radeon_device *rdev);
-void r100_mc_disable_clients(struct radeon_device *rdev);
-int r300_mc_wait_for_idle(struct radeon_device *rdev);
-void r420_pipes_init(struct radeon_device *rdev);
+/* This files gather functions specifics to : rs400,rs480 */
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
-/* This files gather functions specifics to :
- * rs400,rs480
- *
- * Some of these functions might be used by newer ASICs.
- */
-void rs400_gpu_init(struct radeon_device *rdev);
-int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
-
-
-/*
- * GART functions.
- */
void rs400_gart_adjust_size(struct radeon_device *rdev)
{
/* Check gart size */
@@ -238,61 +223,6 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
return 0;
}
-
-/*
- * MC functions.
- */
-int rs400_mc_init(struct radeon_device *rdev)
-{
- uint32_t tmp;
- int r;
-
- if (r100_debugfs_rbbm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for RBBM !\n");
- }
-
- rs400_gpu_init(rdev);
- rs400_gart_disable(rdev);
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
- rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
- r = radeon_mc_setup(rdev);
- if (r) {
- return r;
- }
-
- r100_mc_disable_clients(rdev);
- if (r300_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait MC idle while "
- "programming pipes. Bad things might happen.\n");
- }
-
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
- tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
- WREG32(RADEON_MC_FB_LOCATION, tmp);
- tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS;
- WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
- WREG32(RADEON_HOST_PATH_CNTL, tmp);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
-
- return 0;
-}
-
-void rs400_mc_fini(struct radeon_device *rdev)
-{
-}
-
-
-/*
- * Global GPU functions
- */
-void rs400_errata(struct radeon_device *rdev)
-{
- rdev->pll_errata = 0;
-}
-
void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs400 ? */
@@ -305,10 +235,6 @@ void rs400_gpu_init(struct radeon_device *rdev)
}
}
-
-/*
- * VRAM info.
- */
void rs400_vram_info(struct radeon_device *rdev)
{
rs400_gart_adjust_size(rdev);
@@ -319,10 +245,6 @@ void rs400_vram_info(struct radeon_device *rdev)
r100_vram_init_sizes(rdev);
}
-
-/*
- * Indirect registers accessor
- */
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
uint32_t r;
@@ -340,10 +262,6 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
WREG32(RS480_NB_MC_INDEX, 0xff);
}
-
-/*
- * Debugfs info
- */
#if defined(CONFIG_DEBUG_FS)
static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
{
@@ -419,7 +337,7 @@ static struct drm_info_list rs400_gart_info_list[] = {
};
#endif
-int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
@@ -427,3 +345,190 @@ int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
return 0;
#endif
}
+
+static int rs400_mc_init(struct radeon_device *rdev)
+{
+ int r;
+ u32 tmp;
+
+ /* Setup GPU memory space */
+ tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
+ rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ r = radeon_mc_setup(rdev);
+ if (r)
+ return r;
+ return 0;
+}
+
+void rs400_mc_program(struct radeon_device *rdev)
+{
+ struct r100_mc_save save;
+
+ /* Stops all mc clients */
+ r100_mc_stop(rdev, &save);
+
+ /* Wait for mc idle */
+ if (r300_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+ WREG32(R_000148_MC_FB_LOCATION,
+ S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+ S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+
+ r100_mc_resume(rdev, &save);
+}
+
+static int rs400_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ rs400_mc_program(rdev);
+ /* Resume clock */
+ r300_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+ rs400_gpu_init(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ r = rs400_gart_enable(rdev);
+ if (r)
+ return r;
+ /* Enable IRQ */
+ rdev->irq.sw_int = true;
+ r100_irq_set(rdev);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ return r;
+ }
+ r = r100_wb_init(rdev);
+ if (r)
+ dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+ r = r100_ib_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ return r;
+ }
+ return 0;
+}
+
+int rs400_resume(struct radeon_device *rdev)
+{
+ /* Make sur GART are not working */
+ rs400_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ r300_clock_startup(rdev);
+ /* setup MC before calling post tables */
+ rs400_mc_program(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ radeon_combios_asic_init(rdev->ddev);
+ /* Resume clock after posting */
+ r300_clock_startup(rdev);
+ return rs400_startup(rdev);
+}
+
+int rs400_suspend(struct radeon_device *rdev)
+{
+ r100_cp_disable(rdev);
+ r100_wb_disable(rdev);
+ r100_irq_disable(rdev);
+ rs400_gart_disable(rdev);
+ return 0;
+}
+
+void rs400_fini(struct radeon_device *rdev)
+{
+ rs400_suspend(rdev);
+ r100_cp_fini(rdev);
+ r100_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ radeon_gem_fini(rdev);
+ rs400_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_object_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+int rs400_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Disable VGA */
+ r100_vga_render_disable(rdev);
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* TODO: disable VGA need to use VGA request */
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+ return -EINVAL;
+ } else {
+ r = radeon_combios_init(rdev);
+ if (r)
+ return r;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (!radeon_card_posted(rdev) && rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ radeon_combios_asic_init(rdev->ddev);
+ }
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Get vram informations */
+ rs400_vram_info(rdev);
+ /* Initialize memory controller (also test AGP) */
+ r = rs400_mc_init(rdev);
+ if (r)
+ return r;
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_object_init(rdev);
+ if (r)
+ return r;
+ r = rs400_gart_init(rdev);
+ if (r)
+ return r;
+ r300_set_reg_safe(rdev);
+ rdev->accel_working = true;
+ r = rs400_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ rs400_suspend(rdev);
+ r100_cp_fini(rdev);
+ r100_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ rs400_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs400d.h b/drivers/gpu/drm/radeon/rs400d.h
new file mode 100644
index 00000000000..6d8bac58ced
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs400d.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RS400D_H__
+#define __RS400D_H__
+
+/* Registers */
+#define R_000148_MC_FB_LOCATION 0x000148
+#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000148_MC_FB_START 0xFFFF0000
+#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000148_MC_FB_TOP 0x0000FFFF
+#define R_00015C_NB_TOM 0x00015C
+#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_00015C_MC_FB_START 0xFFFF0000
+#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_00015C_MC_FB_TOP 0x0000FFFF
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
+#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
+#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
+#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
+#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
+#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_VAP_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
+#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
+#define C_000E40_TIM_BUSY 0xFDFFFFFF
+#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
+#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
+#define C_000E40_GA_BUSY 0xFBFFFFFF
+#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
+#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
+#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 4a4fe1cb131..5f117cd8736 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -25,27 +25,26 @@
* Alex Deucher
* Jerome Glisse
*/
+/* RS600 / Radeon X1250/X1270 integrated GPU
+ *
+ * This file gather function specific to RS600 which is the IGP of
+ * the X1250/X1270 family supporting intel CPU (while RS690/RS740
+ * is the X1250/X1270 supporting AMD CPU). The display engine are
+ * the avivo one, bios is an atombios, 3D block are the one of the
+ * R4XX family. The GART is different from the RS400 one and is very
+ * close to the one of the R600 family (R600 likely being an evolution
+ * of the RS600 GART block).
+ */
#include "drmP.h"
-#include "radeon_reg.h"
#include "radeon.h"
+#include "atom.h"
+#include "rs600d.h"
#include "rs600_reg_safe.h"
-/* rs600 depends on : */
-void r100_hdp_reset(struct radeon_device *rdev);
-int r100_gui_wait_for_idle(struct radeon_device *rdev);
-int r300_mc_wait_for_idle(struct radeon_device *rdev);
-void r420_pipes_init(struct radeon_device *rdev);
-
-/* This files gather functions specifics to :
- * rs600
- *
- * Some of these functions might be used by newer ASICs.
- */
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
-
/*
* GART.
*/
@@ -53,18 +52,18 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
{
uint32_t tmp;
- tmp = RREG32_MC(RS600_MC_PT0_CNTL);
- tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
- WREG32_MC(RS600_MC_PT0_CNTL, tmp);
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+ tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+ WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
- tmp = RREG32_MC(RS600_MC_PT0_CNTL);
- tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
- WREG32_MC(RS600_MC_PT0_CNTL, tmp);
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+ tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
+ WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
- tmp = RREG32_MC(RS600_MC_PT0_CNTL);
- tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
- WREG32_MC(RS600_MC_PT0_CNTL, tmp);
- tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+ tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+ WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
}
int rs600_gart_init(struct radeon_device *rdev)
@@ -86,7 +85,7 @@ int rs600_gart_init(struct radeon_device *rdev)
int rs600_gart_enable(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 tmp;
int r, i;
if (rdev->gart.table.vram.robj == NULL) {
@@ -96,46 +95,50 @@ int rs600_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ /* Enable bus master */
+ tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
+ WREG32(R_00004C_BUS_CNTL, tmp);
/* FIXME: setup default page */
- WREG32_MC(RS600_MC_PT0_CNTL,
- (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
- RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
+ WREG32_MC(R_000100_MC_PT0_CNTL,
+ (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+ S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
for (i = 0; i < 19; i++) {
- WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i,
- (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
- RS600_SYSTEM_ACCESS_MODE_IN_SYS |
- RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE |
- RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
- RS600_ENABLE_FRAGMENT_PROCESSING |
- RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
+ WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
+ S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+ S_00016C_SYSTEM_ACCESS_MODE_MASK(
+ V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
+ S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+ V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
+ S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
+ S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+ S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
}
/* System context map to GART space */
- WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location);
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
+ WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
+ WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
/* enable first context */
- WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location);
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp);
- WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL,
- (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT));
+ WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+ WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
+ WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
+ S_000102_ENABLE_PAGE_TABLE(1) |
+ S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
/* disable all other contexts */
for (i = 1; i < 8; i++) {
- WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
+ WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
}
/* setup the page table */
- WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
- rdev->gart.table_addr);
- WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+ WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+ rdev->gart.table_addr);
+ WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
/* enable page tables */
- tmp = RREG32_MC(RS600_MC_PT0_CNTL);
- WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT));
- tmp = RREG32_MC(RS600_MC_CNTL1);
- WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES));
+ tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+ WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
+ tmp = RREG32_MC(R_000009_MC_CNTL1);
+ WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
rs600_gart_tlb_flush(rdev);
rdev->gart.ready = true;
return 0;
@@ -146,10 +149,9 @@ void rs600_gart_disable(struct radeon_device *rdev)
uint32_t tmp;
/* FIXME: disable out of gart access */
- WREG32_MC(RS600_MC_PT0_CNTL, 0);
- tmp = RREG32_MC(RS600_MC_CNTL1);
- tmp &= ~RS600_ENABLE_PAGE_TABLES;
- WREG32_MC(RS600_MC_CNTL1, tmp);
+ WREG32_MC(R_000100_MC_PT0_CNTL, 0);
+ tmp = RREG32_MC(R_000009_MC_CNTL1);
+ WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
if (rdev->gart.table.vram.robj) {
radeon_object_kunmap(rdev->gart.table.vram.robj);
radeon_object_unpin(rdev->gart.table.vram.robj);
@@ -183,132 +185,64 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
return 0;
}
-
-/*
- * MC.
- */
-void rs600_mc_disable_clients(struct radeon_device *rdev)
-{
- unsigned tmp;
-
- if (r100_gui_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait GUI idle while "
- "programming pipes. Bad things might happen.\n");
- }
-
- rv515_vga_render_disable(rdev);
-
- tmp = RREG32(AVIVO_D1VGA_CONTROL);
- WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
- tmp = RREG32(AVIVO_D2VGA_CONTROL);
- WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
-
- tmp = RREG32(AVIVO_D1CRTC_CONTROL);
- WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
- tmp = RREG32(AVIVO_D2CRTC_CONTROL);
- WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
-
- /* make sure all previous write got through */
- tmp = RREG32(AVIVO_D2CRTC_CONTROL);
-
- mdelay(1);
-}
-
-int rs600_mc_init(struct radeon_device *rdev)
-{
- uint32_t tmp;
- int r;
-
- if (r100_debugfs_rbbm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for RBBM !\n");
- }
-
- rs600_gpu_init(rdev);
- rs600_gart_disable(rdev);
-
- /* Setup GPU memory space */
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- r = radeon_mc_setup(rdev);
- if (r) {
- return r;
- }
-
- /* Program GPU memory space */
- /* Enable bus master */
- tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
- WREG32(RADEON_BUS_CNTL, tmp);
- /* FIXME: What does AGP means for such chipset ? */
- WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
- /* FIXME: are this AGP reg in indirect MC range ? */
- WREG32_MC(RS600_MC_AGP_BASE, 0);
- WREG32_MC(RS600_MC_AGP_BASE_2, 0);
- rs600_mc_disable_clients(rdev);
- if (rs600_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait MC idle while "
- "programming pipes. Bad things might happen.\n");
- }
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
- tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
- WREG32_MC(RS600_MC_FB_LOCATION, tmp);
- WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
- return 0;
-}
-
-void rs600_mc_fini(struct radeon_device *rdev)
-{
-}
-
-
-/*
- * Interrupts
- */
int rs600_irq_set(struct radeon_device *rdev)
{
uint32_t tmp = 0;
uint32_t mode_int = 0;
if (rdev->irq.sw_int) {
- tmp |= RADEON_SW_INT_ENABLE;
+ tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.crtc_vblank_int[0]) {
- mode_int |= AVIVO_D1MODE_INT_MASK;
+ mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.crtc_vblank_int[1]) {
- mode_int |= AVIVO_D2MODE_INT_MASK;
+ mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
- WREG32(RADEON_GEN_INT_CNTL, tmp);
- WREG32(AVIVO_DxMODE_INT_MASK, mode_int);
+ WREG32(R_000040_GEN_INT_CNTL, tmp);
+ WREG32(R_006540_DxMODE_INT_MASK, mode_int);
return 0;
}
static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
{
- uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
- uint32_t irq_mask = RADEON_SW_INT_TEST;
-
- if (irqs & AVIVO_DISPLAY_INT_STATUS) {
- *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS);
- if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) {
- WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK);
+ uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
+ uint32_t irq_mask = ~C_000044_SW_INT;
+
+ if (G_000044_DISPLAY_INT_STAT(irqs)) {
+ *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
+ WREG32(R_006534_D1MODE_VBLANK_STATUS,
+ S_006534_D1MODE_VBLANK_ACK(1));
}
- if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
- WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK);
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
+ WREG32(R_006D34_D2MODE_VBLANK_STATUS,
+ S_006D34_D2MODE_VBLANK_ACK(1));
}
} else {
*r500_disp_int = 0;
}
if (irqs) {
- WREG32(RADEON_GEN_INT_STATUS, irqs);
+ WREG32(R_000044_GEN_INT_STATUS, irqs);
}
return irqs & irq_mask;
}
+void rs600_irq_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(R_000040_GEN_INT_CNTL, 0);
+ WREG32(R_006540_DxMODE_INT_MASK, 0);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ rs600_irq_ack(rdev, &tmp);
+}
+
int rs600_irq_process(struct radeon_device *rdev)
{
- uint32_t status;
+ uint32_t status, msi_rearm;
uint32_t r500_disp_int;
status = rs600_irq_ack(rdev, &r500_disp_int);
@@ -317,71 +251,65 @@ int rs600_irq_process(struct radeon_device *rdev)
}
while (status || r500_disp_int) {
/* SW interrupt */
- if (status & RADEON_SW_INT_TEST) {
+ if (G_000040_SW_INT_EN(status))
radeon_fence_process(rdev);
- }
/* Vertical blank interrupts */
- if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) {
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
drm_handle_vblank(rdev->ddev, 0);
- }
- if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
drm_handle_vblank(rdev->ddev, 1);
- }
status = rs600_irq_ack(rdev, &r500_disp_int);
}
+ if (rdev->msi_enabled) {
+ switch (rdev->family) {
+ case CHIP_RS600:
+ case CHIP_RS690:
+ case CHIP_RS740:
+ msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
+ WREG32(RADEON_BUS_CNTL, msi_rearm);
+ WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
+ break;
+ default:
+ msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
+ WREG32(RADEON_MSI_REARM_EN, msi_rearm);
+ WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
+ break;
+ }
+ }
return IRQ_HANDLED;
}
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
{
if (crtc == 0)
- return RREG32(AVIVO_D1CRTC_FRAME_COUNT);
+ return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
else
- return RREG32(AVIVO_D2CRTC_FRAME_COUNT);
+ return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
}
-
-/*
- * Global GPU functions
- */
int rs600_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
- uint32_t tmp;
for (i = 0; i < rdev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32_MC(RS600_MC_STATUS);
- if (tmp & RS600_MC_STATUS_IDLE) {
+ if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
return 0;
- }
- DRM_UDELAY(1);
+ udelay(1);
}
return -1;
}
-void rs600_errata(struct radeon_device *rdev)
-{
- rdev->pll_errata = 0;
-}
-
void rs600_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs600 ? */
r100_hdp_reset(rdev);
- rv515_vga_render_disable(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
- if (rs600_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait MC idle while "
- "programming pipes. Bad things might happen.\n");
- }
+ /* Wait for mc idle */
+ if (rs600_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
}
-
-/*
- * VRAM info.
- */
void rs600_vram_info(struct radeon_device *rdev)
{
/* FIXME: to do or is these values sane ? */
@@ -394,31 +322,208 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
/* FIXME: implement, should this be like rs690 ? */
}
-
-/*
- * Indirect registers accessor
- */
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
- uint32_t r;
-
- WREG32(RS600_MC_INDEX,
- ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
- r = RREG32(RS600_MC_DATA);
- return r;
+ WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+ S_000070_MC_IND_CITF_ARB0(1));
+ return RREG32(R_000074_MC_IND_DATA);
}
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
- WREG32(RS600_MC_INDEX,
- RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 |
- ((reg) & RS600_MC_ADDR_MASK));
- WREG32(RS600_MC_DATA, v);
+ WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+ S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
+ WREG32(R_000074_MC_IND_DATA, v);
}
-int rs600_init(struct radeon_device *rdev)
+void rs600_debugfs(struct radeon_device *rdev)
+{
+ if (r100_debugfs_rbbm_init(rdev))
+ DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+}
+
+void rs600_set_safe_registers(struct radeon_device *rdev)
{
rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
+}
+
+static void rs600_mc_program(struct radeon_device *rdev)
+{
+ struct rv515_mc_save save;
+
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+
+ /* Wait for mc idle */
+ if (rs600_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+
+ /* FIXME: What does AGP means for such chipset ? */
+ WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
+ WREG32_MC(R_000006_AGP_BASE, 0);
+ WREG32_MC(R_000007_AGP_BASE_2, 0);
+ /* Program MC */
+ WREG32_MC(R_000004_MC_FB_LOCATION,
+ S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
+ S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
+ WREG32(R_000134_HDP_FB_LOCATION,
+ S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+ rv515_mc_resume(rdev, &save);
+}
+
+static int rs600_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ rs600_mc_program(rdev);
+ /* Resume clock */
+ rv515_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+ rs600_gpu_init(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ r = rs600_gart_enable(rdev);
+ if (r)
+ return r;
+ /* Enable IRQ */
+ rdev->irq.sw_int = true;
+ rs600_irq_set(rdev);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ return r;
+ }
+ r = r100_wb_init(rdev);
+ if (r)
+ dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+ r = r100_ib_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ return r;
+ }
+ return 0;
+}
+
+int rs600_resume(struct radeon_device *rdev)
+{
+ /* Make sur GART are not working */
+ rs600_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ rv515_clock_startup(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ atom_asic_init(rdev->mode_info.atom_context);
+ /* Resume clock after posting */
+ rv515_clock_startup(rdev);
+ return rs600_startup(rdev);
+}
+
+int rs600_suspend(struct radeon_device *rdev)
+{
+ r100_cp_disable(rdev);
+ r100_wb_disable(rdev);
+ rs600_irq_disable(rdev);
+ rs600_gart_disable(rdev);
+ return 0;
+}
+
+void rs600_fini(struct radeon_device *rdev)
+{
+ rs600_suspend(rdev);
+ r100_cp_fini(rdev);
+ r100_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ radeon_gem_fini(rdev);
+ rs600_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_object_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+int rs600_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Disable VGA */
+ rv515_vga_render_disable(rdev);
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ } else {
+ dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
+ return -EINVAL;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (!radeon_card_posted(rdev) && rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+ /* Get vram informations */
+ rs600_vram_info(rdev);
+ /* Initialize memory controller (also test AGP) */
+ r = r420_mc_init(rdev);
+ if (r)
+ return r;
+ rs600_debugfs(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_object_init(rdev);
+ if (r)
+ return r;
+ r = rs600_gart_init(rdev);
+ if (r)
+ return r;
+ rs600_set_safe_registers(rdev);
+ rdev->accel_working = true;
+ r = rs600_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ rs600_suspend(rdev);
+ r100_cp_fini(rdev);
+ r100_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ rs600_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rdev->accel_working = false;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
new file mode 100644
index 00000000000..81308924859
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RS600D_H__
+#define __RS600D_H__
+
+/* Registers */
+#define R_000040_GEN_INT_CNTL 0x000040
+#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0)
+#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1)
+#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE
+#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12)
+#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1)
+#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF
+#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6)
+#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1)
+#define C_000040_CRTC2_VSYNC 0xFFFFFFBF
+#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7)
+#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1)
+#define C_000040_SNAPSHOT2 0xFFFFFF7F
+#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9)
+#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1)
+#define C_000040_CRTC2_VBLANK 0xFFFFFDFF
+#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10)
+#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1)
+#define C_000040_FP2_DETECT 0xFFFFFBFF
+#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11)
+#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1)
+#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF
+#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
+#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
+#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
+#define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14)
+#define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1)
+#define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF
+#define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15)
+#define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1)
+#define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF
+#define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17)
+#define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1)
+#define C_000040_I2C_INT_EN 0xFFFDFFFF
+#define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19)
+#define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1)
+#define C_000040_GUI_IDLE 0xFFF7FFFF
+#define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24)
+#define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1)
+#define C_000040_VIPH_INT_EN 0xFEFFFFFF
+#define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25)
+#define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1)
+#define C_000040_SW_INT_EN 0xFDFFFFFF
+#define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27)
+#define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1)
+#define C_000040_GEYSERVILLE 0xF7FFFFFF
+#define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28)
+#define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1)
+#define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF
+#define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29)
+#define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1)
+#define C_000040_DVI_I2C_INT 0xDFFFFFFF
+#define S_000040_GUIDMA(x) (((x) & 0x1) << 30)
+#define G_000040_GUIDMA(x) (((x) >> 30) & 0x1)
+#define C_000040_GUIDMA 0xBFFFFFFF
+#define S_000040_VIDDMA(x) (((x) & 0x1) << 31)
+#define G_000040_VIDDMA(x) (((x) >> 31) & 0x1)
+#define C_000040_VIDDMA 0x7FFFFFFF
+#define R_000044_GEN_INT_STATUS 0x000044
+#define S_000044_DISPLAY_INT_STAT(x) (((x) & 0x1) << 0)
+#define G_000044_DISPLAY_INT_STAT(x) (((x) >> 0) & 0x1)
+#define C_000044_DISPLAY_INT_STAT 0xFFFFFFFE
+#define S_000044_VGA_INT_STAT(x) (((x) & 0x1) << 1)
+#define G_000044_VGA_INT_STAT(x) (((x) >> 1) & 0x1)
+#define C_000044_VGA_INT_STAT 0xFFFFFFFD
+#define S_000044_CAP0_INT_ACTIVE(x) (((x) & 0x1) << 8)
+#define G_000044_CAP0_INT_ACTIVE(x) (((x) >> 8) & 0x1)
+#define C_000044_CAP0_INT_ACTIVE 0xFFFFFEFF
+#define S_000044_DMA_VIPH0_INT(x) (((x) & 0x1) << 12)
+#define G_000044_DMA_VIPH0_INT(x) (((x) >> 12) & 0x1)
+#define C_000044_DMA_VIPH0_INT 0xFFFFEFFF
+#define S_000044_DMA_VIPH1_INT(x) (((x) & 0x1) << 13)
+#define G_000044_DMA_VIPH1_INT(x) (((x) >> 13) & 0x1)
+#define C_000044_DMA_VIPH1_INT 0xFFFFDFFF
+#define S_000044_DMA_VIPH2_INT(x) (((x) & 0x1) << 14)
+#define G_000044_DMA_VIPH2_INT(x) (((x) >> 14) & 0x1)
+#define C_000044_DMA_VIPH2_INT 0xFFFFBFFF
+#define S_000044_DMA_VIPH3_INT(x) (((x) & 0x1) << 15)
+#define G_000044_DMA_VIPH3_INT(x) (((x) >> 15) & 0x1)
+#define C_000044_DMA_VIPH3_INT 0xFFFF7FFF
+#define S_000044_MC_PROBE_FAULT_STAT(x) (((x) & 0x1) << 16)
+#define G_000044_MC_PROBE_FAULT_STAT(x) (((x) >> 16) & 0x1)
+#define C_000044_MC_PROBE_FAULT_STAT 0xFFFEFFFF
+#define S_000044_I2C_INT(x) (((x) & 0x1) << 17)
+#define G_000044_I2C_INT(x) (((x) >> 17) & 0x1)
+#define C_000044_I2C_INT 0xFFFDFFFF
+#define S_000044_SCRATCH_INT_STAT(x) (((x) & 0x1) << 18)
+#define G_000044_SCRATCH_INT_STAT(x) (((x) >> 18) & 0x1)
+#define C_000044_SCRATCH_INT_STAT 0xFFFBFFFF
+#define S_000044_GUI_IDLE_STAT(x) (((x) & 0x1) << 19)
+#define G_000044_GUI_IDLE_STAT(x) (((x) >> 19) & 0x1)
+#define C_000044_GUI_IDLE_STAT 0xFFF7FFFF
+#define S_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) & 0x1) << 20)
+#define G_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) >> 20) & 0x1)
+#define C_000044_ATI_OVERDRIVE_INT_STAT 0xFFEFFFFF
+#define S_000044_MC_PROTECTION_FAULT_STAT(x) (((x) & 0x1) << 21)
+#define G_000044_MC_PROTECTION_FAULT_STAT(x) (((x) >> 21) & 0x1)
+#define C_000044_MC_PROTECTION_FAULT_STAT 0xFFDFFFFF
+#define S_000044_RBBM_READ_INT_STAT(x) (((x) & 0x1) << 22)
+#define G_000044_RBBM_READ_INT_STAT(x) (((x) >> 22) & 0x1)
+#define C_000044_RBBM_READ_INT_STAT 0xFFBFFFFF
+#define S_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) & 0x1) << 23)
+#define G_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) >> 23) & 0x1)
+#define C_000044_CB_CONTEXT_SWITCH_STAT 0xFF7FFFFF
+#define S_000044_VIPH_INT(x) (((x) & 0x1) << 24)
+#define G_000044_VIPH_INT(x) (((x) >> 24) & 0x1)
+#define C_000044_VIPH_INT 0xFEFFFFFF
+#define S_000044_SW_INT(x) (((x) & 0x1) << 25)
+#define G_000044_SW_INT(x) (((x) >> 25) & 0x1)
+#define C_000044_SW_INT 0xFDFFFFFF
+#define S_000044_SW_INT_SET(x) (((x) & 0x1) << 26)
+#define G_000044_SW_INT_SET(x) (((x) >> 26) & 0x1)
+#define C_000044_SW_INT_SET 0xFBFFFFFF
+#define S_000044_IDCT_INT_STAT(x) (((x) & 0x1) << 27)
+#define G_000044_IDCT_INT_STAT(x) (((x) >> 27) & 0x1)
+#define C_000044_IDCT_INT_STAT 0xF7FFFFFF
+#define S_000044_GUIDMA_STAT(x) (((x) & 0x1) << 30)
+#define G_000044_GUIDMA_STAT(x) (((x) >> 30) & 0x1)
+#define C_000044_GUIDMA_STAT 0xBFFFFFFF
+#define S_000044_VIDDMA_STAT(x) (((x) & 0x1) << 31)
+#define G_000044_VIDDMA_STAT(x) (((x) >> 31) & 0x1)
+#define C_000044_VIDDMA_STAT 0x7FFFFFFF
+#define R_00004C_BUS_CNTL 0x00004C
+#define S_00004C_BUS_MASTER_DIS(x) (((x) & 0x1) << 14)
+#define G_00004C_BUS_MASTER_DIS(x) (((x) >> 14) & 0x1)
+#define C_00004C_BUS_MASTER_DIS 0xFFFFBFFF
+#define S_00004C_BUS_MSI_REARM(x) (((x) & 0x1) << 20)
+#define G_00004C_BUS_MSI_REARM(x) (((x) >> 20) & 0x1)
+#define C_00004C_BUS_MSI_REARM 0xFFEFFFFF
+#define R_000070_MC_IND_INDEX 0x000070
+#define S_000070_MC_IND_ADDR(x) (((x) & 0xFFFF) << 0)
+#define G_000070_MC_IND_ADDR(x) (((x) >> 0) & 0xFFFF)
+#define C_000070_MC_IND_ADDR 0xFFFF0000
+#define S_000070_MC_IND_SEQ_RBS_0(x) (((x) & 0x1) << 16)
+#define G_000070_MC_IND_SEQ_RBS_0(x) (((x) >> 16) & 0x1)
+#define C_000070_MC_IND_SEQ_RBS_0 0xFFFEFFFF
+#define S_000070_MC_IND_SEQ_RBS_1(x) (((x) & 0x1) << 17)
+#define G_000070_MC_IND_SEQ_RBS_1(x) (((x) >> 17) & 0x1)
+#define C_000070_MC_IND_SEQ_RBS_1 0xFFFDFFFF
+#define S_000070_MC_IND_SEQ_RBS_2(x) (((x) & 0x1) << 18)
+#define G_000070_MC_IND_SEQ_RBS_2(x) (((x) >> 18) & 0x1)
+#define C_000070_MC_IND_SEQ_RBS_2 0xFFFBFFFF
+#define S_000070_MC_IND_SEQ_RBS_3(x) (((x) & 0x1) << 19)
+#define G_000070_MC_IND_SEQ_RBS_3(x) (((x) >> 19) & 0x1)
+#define C_000070_MC_IND_SEQ_RBS_3 0xFFF7FFFF
+#define S_000070_MC_IND_AIC_RBS(x) (((x) & 0x1) << 20)
+#define G_000070_MC_IND_AIC_RBS(x) (((x) >> 20) & 0x1)
+#define C_000070_MC_IND_AIC_RBS 0xFFEFFFFF
+#define S_000070_MC_IND_CITF_ARB0(x) (((x) & 0x1) << 21)
+#define G_000070_MC_IND_CITF_ARB0(x) (((x) >> 21) & 0x1)
+#define C_000070_MC_IND_CITF_ARB0 0xFFDFFFFF
+#define S_000070_MC_IND_CITF_ARB1(x) (((x) & 0x1) << 22)
+#define G_000070_MC_IND_CITF_ARB1(x) (((x) >> 22) & 0x1)
+#define C_000070_MC_IND_CITF_ARB1 0xFFBFFFFF
+#define S_000070_MC_IND_WR_EN(x) (((x) & 0x1) << 23)
+#define G_000070_MC_IND_WR_EN(x) (((x) >> 23) & 0x1)
+#define C_000070_MC_IND_WR_EN 0xFF7FFFFF
+#define S_000070_MC_IND_RD_INV(x) (((x) & 0x1) << 24)
+#define G_000070_MC_IND_RD_INV(x) (((x) >> 24) & 0x1)
+#define C_000070_MC_IND_RD_INV 0xFEFFFFFF
+#define R_000074_MC_IND_DATA 0x000074
+#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_000074_MC_IND_DATA 0x00000000
+#define R_000134_HDP_FB_LOCATION 0x000134
+#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000134_HDP_FB_START 0xFFFF0000
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
+#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
+#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
+#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
+#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
+#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_VAP_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
+#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
+#define C_000E40_TIM_BUSY 0xFDFFFFFF
+#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
+#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
+#define C_000E40_GA_BUSY 0xFBFFFFFF
+#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
+#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
+#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+#define R_0060A4_D1CRTC_STATUS_FRAME_COUNT 0x0060A4
+#define S_0060A4_D1CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0)
+#define G_0060A4_D1CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF)
+#define C_0060A4_D1CRTC_FRAME_COUNT 0xFF000000
+#define R_006534_D1MODE_VBLANK_STATUS 0x006534
+#define S_006534_D1MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0)
+#define G_006534_D1MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1)
+#define C_006534_D1MODE_VBLANK_OCCURRED 0xFFFFFFFE
+#define S_006534_D1MODE_VBLANK_ACK(x) (((x) & 0x1) << 4)
+#define G_006534_D1MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1)
+#define C_006534_D1MODE_VBLANK_ACK 0xFFFFFFEF
+#define S_006534_D1MODE_VBLANK_STAT(x) (((x) & 0x1) << 12)
+#define G_006534_D1MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1)
+#define C_006534_D1MODE_VBLANK_STAT 0xFFFFEFFF
+#define S_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16)
+#define G_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1)
+#define C_006534_D1MODE_VBLANK_INTERRUPT 0xFFFEFFFF
+#define R_006540_DxMODE_INT_MASK 0x006540
+#define S_006540_D1MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 0)
+#define G_006540_D1MODE_VBLANK_INT_MASK(x) (((x) >> 0) & 0x1)
+#define C_006540_D1MODE_VBLANK_INT_MASK 0xFFFFFFFE
+#define S_006540_D1MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 4)
+#define G_006540_D1MODE_VLINE_INT_MASK(x) (((x) >> 4) & 0x1)
+#define C_006540_D1MODE_VLINE_INT_MASK 0xFFFFFFEF
+#define S_006540_D2MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 8)
+#define G_006540_D2MODE_VBLANK_INT_MASK(x) (((x) >> 8) & 0x1)
+#define C_006540_D2MODE_VBLANK_INT_MASK 0xFFFFFEFF
+#define S_006540_D2MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 12)
+#define G_006540_D2MODE_VLINE_INT_MASK(x) (((x) >> 12) & 0x1)
+#define C_006540_D2MODE_VLINE_INT_MASK 0xFFFFEFFF
+#define S_006540_D1MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 30)
+#define G_006540_D1MODE_VBLANK_CP_SEL(x) (((x) >> 30) & 0x1)
+#define C_006540_D1MODE_VBLANK_CP_SEL 0xBFFFFFFF
+#define S_006540_D2MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 31)
+#define G_006540_D2MODE_VBLANK_CP_SEL(x) (((x) >> 31) & 0x1)
+#define C_006540_D2MODE_VBLANK_CP_SEL 0x7FFFFFFF
+#define R_0068A4_D2CRTC_STATUS_FRAME_COUNT 0x0068A4
+#define S_0068A4_D2CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0)
+#define G_0068A4_D2CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF)
+#define C_0068A4_D2CRTC_FRAME_COUNT 0xFF000000
+#define R_006D34_D2MODE_VBLANK_STATUS 0x006D34
+#define S_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0)
+#define G_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1)
+#define C_006D34_D2MODE_VBLANK_OCCURRED 0xFFFFFFFE
+#define S_006D34_D2MODE_VBLANK_ACK(x) (((x) & 0x1) << 4)
+#define G_006D34_D2MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1)
+#define C_006D34_D2MODE_VBLANK_ACK 0xFFFFFFEF
+#define S_006D34_D2MODE_VBLANK_STAT(x) (((x) & 0x1) << 12)
+#define G_006D34_D2MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1)
+#define C_006D34_D2MODE_VBLANK_STAT 0xFFFFEFFF
+#define S_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16)
+#define G_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1)
+#define C_006D34_D2MODE_VBLANK_INTERRUPT 0xFFFEFFFF
+#define R_007EDC_DISP_INTERRUPT_STATUS 0x007EDC
+#define S_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) & 0x1) << 4)
+#define G_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) >> 4) & 0x1)
+#define C_007EDC_LB_D1_VBLANK_INTERRUPT 0xFFFFFFEF
+#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5)
+#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1)
+#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF
+
+
+/* MC registers */
+#define R_000000_MC_STATUS 0x000000
+#define S_000000_MC_IDLE(x) (((x) & 0x1) << 0)
+#define G_000000_MC_IDLE(x) (((x) >> 0) & 0x1)
+#define C_000000_MC_IDLE 0xFFFFFFFE
+#define R_000004_MC_FB_LOCATION 0x000004
+#define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000004_MC_FB_START 0xFFFF0000
+#define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000004_MC_FB_TOP 0x0000FFFF
+#define R_000005_MC_AGP_LOCATION 0x000005
+#define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
+#define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000005_MC_AGP_START 0xFFFF0000
+#define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000005_MC_AGP_TOP 0x0000FFFF
+#define R_000006_AGP_BASE 0x000006
+#define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_000006_AGP_BASE_ADDR 0x00000000
+#define R_000007_AGP_BASE_2 0x000007
+#define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
+#define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
+#define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0
+#define R_000009_MC_CNTL1 0x000009
+#define S_000009_ENABLE_PAGE_TABLES(x) (((x) & 0x1) << 26)
+#define G_000009_ENABLE_PAGE_TABLES(x) (((x) >> 26) & 0x1)
+#define C_000009_ENABLE_PAGE_TABLES 0xFBFFFFFF
+/* FIXME don't know the various field size need feedback from AMD */
+#define R_000100_MC_PT0_CNTL 0x000100
+#define S_000100_ENABLE_PT(x) (((x) & 0x1) << 0)
+#define G_000100_ENABLE_PT(x) (((x) >> 0) & 0x1)
+#define C_000100_ENABLE_PT 0xFFFFFFFE
+#define S_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) & 0x7) << 15)
+#define G_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) >> 15) & 0x7)
+#define C_000100_EFFECTIVE_L2_CACHE_SIZE 0xFFFC7FFF
+#define S_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 0x7) << 21)
+#define G_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) >> 21) & 0x7)
+#define C_000100_EFFECTIVE_L2_QUEUE_SIZE 0xFF1FFFFF
+#define S_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) & 0x1) << 28)
+#define G_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) >> 28) & 0x1)
+#define C_000100_INVALIDATE_ALL_L1_TLBS 0xEFFFFFFF
+#define S_000100_INVALIDATE_L2_CACHE(x) (((x) & 0x1) << 29)
+#define G_000100_INVALIDATE_L2_CACHE(x) (((x) >> 29) & 0x1)
+#define C_000100_INVALIDATE_L2_CACHE 0xDFFFFFFF
+#define R_000102_MC_PT0_CONTEXT0_CNTL 0x000102
+#define S_000102_ENABLE_PAGE_TABLE(x) (((x) & 0x1) << 0)
+#define G_000102_ENABLE_PAGE_TABLE(x) (((x) >> 0) & 0x1)
+#define C_000102_ENABLE_PAGE_TABLE 0xFFFFFFFE
+#define S_000102_PAGE_TABLE_DEPTH(x) (((x) & 0x3) << 1)
+#define G_000102_PAGE_TABLE_DEPTH(x) (((x) >> 1) & 0x3)
+#define C_000102_PAGE_TABLE_DEPTH 0xFFFFFFF9
+#define V_000102_PAGE_TABLE_FLAT 0
+/* R600 documentation suggest that this should be a number of pages */
+#define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x000112
+#define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x000114
+#define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x00011C
+#define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x00012C
+#define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x00013C
+#define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x00014C
+#define R_00016C_MC_PT0_CLIENT0_CNTL 0x00016C
+#define S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0)
+#define G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1)
+#define C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFE
+#define S_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 1)
+#define G_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 1) & 0x1)
+#define C_00016C_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFD
+#define S_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) & 0x3) << 8)
+#define G_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) >> 8) & 0x3)
+#define C_00016C_SYSTEM_ACCESS_MODE_MASK 0xFFFFFCFF
+#define V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY 0
+#define V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP 1
+#define V_00016C_SYSTEM_ACCESS_MODE_IN_SYS 2
+#define V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS 3
+#define S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) & 0x1) << 10)
+#define G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) >> 10) & 0x1)
+#define C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS 0xFFFFFBFF
+#define V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH 0
+#define V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1
+#define S_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) & 0x7) << 11)
+#define G_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) >> 11) & 0x7)
+#define C_00016C_EFFECTIVE_L1_CACHE_SIZE 0xFFFFC7FF
+#define S_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) & 0x1) << 14)
+#define G_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) >> 14) & 0x1)
+#define C_00016C_ENABLE_FRAGMENT_PROCESSING 0xFFFFBFFF
+#define S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 0x7) << 15)
+#define G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) >> 15) & 0x7)
+#define C_00016C_EFFECTIVE_L1_QUEUE_SIZE 0xFFFC7FFF
+#define S_00016C_INVALIDATE_L1_TLB(x) (((x) & 0x1) << 20)
+#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1)
+#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 7a0098ddf97..27547175cf9 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -26,105 +26,29 @@
* Jerome Glisse
*/
#include "drmP.h"
-#include "radeon_reg.h"
#include "radeon.h"
-#include "rs690r.h"
#include "atom.h"
-#include "atom-bits.h"
-
-/* rs690,rs740 depends on : */
-void r100_hdp_reset(struct radeon_device *rdev);
-int r300_mc_wait_for_idle(struct radeon_device *rdev);
-void r420_pipes_init(struct radeon_device *rdev);
-void rs400_gart_disable(struct radeon_device *rdev);
-int rs400_gart_enable(struct radeon_device *rdev);
-void rs400_gart_adjust_size(struct radeon_device *rdev);
-void rs600_mc_disable_clients(struct radeon_device *rdev);
-
-/* This files gather functions specifics to :
- * rs690,rs740
- *
- * Some of these functions might be used by newer ASICs.
- */
-void rs690_gpu_init(struct radeon_device *rdev);
-int rs690_mc_wait_for_idle(struct radeon_device *rdev);
-
-
-/*
- * MC functions.
- */
-int rs690_mc_init(struct radeon_device *rdev)
-{
- uint32_t tmp;
- int r;
-
- if (r100_debugfs_rbbm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for RBBM !\n");
- }
-
- rs690_gpu_init(rdev);
- rs400_gart_disable(rdev);
-
- /* Setup GPU memory space */
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
- rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- r = radeon_mc_setup(rdev);
- if (r) {
- return r;
- }
-
- /* Program GPU memory space */
- rs600_mc_disable_clients(rdev);
- if (rs690_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait MC idle while "
- "programming pipes. Bad things might happen.\n");
- }
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
- tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
- WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
- /* FIXME: Does this reg exist on RS480,RS740 ? */
- WREG32(0x310, rdev->mc.vram_location);
- WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
- return 0;
-}
-
-void rs690_mc_fini(struct radeon_device *rdev)
-{
-}
-
+#include "rs690d.h"
-/*
- * Global GPU functions
- */
-int rs690_mc_wait_for_idle(struct radeon_device *rdev)
+static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
- tmp = RREG32_MC(RS690_MC_STATUS);
- if (tmp & RS690_MC_STATUS_IDLE) {
+ tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
+ if (G_000090_MC_SYSTEM_IDLE(tmp))
return 0;
- }
- DRM_UDELAY(1);
+ udelay(1);
}
return -1;
}
-void rs690_errata(struct radeon_device *rdev)
-{
- rdev->pll_errata = 0;
-}
-
-void rs690_gpu_init(struct radeon_device *rdev)
+static void rs690_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs690 ? */
r100_hdp_reset(rdev);
- rv515_vga_render_disable(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs690_mc_wait_for_idle(rdev)) {
@@ -133,10 +57,6 @@ void rs690_gpu_init(struct radeon_device *rdev)
}
}
-
-/*
- * VRAM info.
- */
void rs690_pm_info(struct radeon_device *rdev)
{
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
@@ -250,39 +170,39 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
/*
* Line Buffer Setup
* There is a single line buffer shared by both display controllers.
- * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+ * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
* the display controllers. The paritioning can either be done
* manually or via one of four preset allocations specified in bits 1:0:
* 0 - line buffer is divided in half and shared between crtc
* 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
* 2 - D1 gets the whole buffer
* 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
- * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual
+ * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
* allocation mode. In manual allocation mode, D1 always starts at 0,
* D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
*/
- tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK;
- tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE;
+ tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
+ tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
/* auto */
if (mode1 && mode2) {
if (mode1->hdisplay > mode2->hdisplay) {
if (mode1->hdisplay > 2560)
- tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
else
- tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
} else if (mode2->hdisplay > mode1->hdisplay) {
if (mode2->hdisplay > 2560)
- tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
else
- tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
} else
- tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
} else if (mode1) {
- tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY;
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
} else if (mode2) {
- tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+ tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
}
- WREG32(DC_LB_MEMORY_SPLIT, tmp);
+ WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
}
struct rs690_watermark {
@@ -487,28 +407,28 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
* option.
*/
if (rdev->disp_priority == 2) {
- tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER);
- tmp &= ~MC_DISP1R_INIT_LAT_MASK;
- tmp &= ~MC_DISP0R_INIT_LAT_MASK;
- if (mode1)
- tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
+ tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
+ tmp &= C_000104_MC_DISP0R_INIT_LAT;
+ tmp &= C_000104_MC_DISP1R_INIT_LAT;
if (mode0)
- tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
- WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp);
+ tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
+ if (mode1)
+ tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
+ WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
}
rs690_line_buffer_adjust(rdev, mode0, mode1);
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
- WREG32(DCP_CONTROL, 0);
+ WREG32(R_006C9C_DCP_CONTROL, 0);
if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
- WREG32(DCP_CONTROL, 2);
+ WREG32(R_006C9C_DCP_CONTROL, 2);
rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
tmp = (wm0.lb_request_fifo_depth - 1);
tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
- WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
+ WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
if (mode0 && mode1) {
if (rfixed_trunc(wm0.dbpp) > 64)
@@ -561,10 +481,10 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
- WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
- WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
- WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
} else if (mode0) {
if (rfixed_trunc(wm0.dbpp) > 64)
a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
@@ -591,10 +511,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
- WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
- WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
- WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
- WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
+ S_006D48_D2MODE_PRIORITY_A_OFF(1));
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
+ S_006D4C_D2MODE_PRIORITY_B_OFF(1));
} else {
if (rfixed_trunc(wm1.dbpp) > 64)
a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
@@ -621,30 +543,205 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
- WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
- WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
- WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
+ S_006548_D1MODE_PRIORITY_A_OFF(1));
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
+ S_00654C_D1MODE_PRIORITY_B_OFF(1));
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
}
}
-/*
- * Indirect registers accessor
- */
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
uint32_t r;
- WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK));
- r = RREG32(RS690_MC_DATA);
- WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
+ WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
+ r = RREG32(R_00007C_MC_DATA);
+ WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
return r;
}
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
- WREG32(RS690_MC_INDEX,
- RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK));
- WREG32(RS690_MC_DATA, v);
- WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
+ WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
+ S_000078_MC_IND_WR_EN(1));
+ WREG32(R_00007C_MC_DATA, v);
+ WREG32(R_000078_MC_INDEX, 0x7F);
+}
+
+void rs690_mc_program(struct radeon_device *rdev)
+{
+ struct rv515_mc_save save;
+
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+
+ /* Wait for mc idle */
+ if (rs690_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+ /* Program MC, should be a 32bits limited address space */
+ WREG32_MC(R_000100_MCCFG_FB_LOCATION,
+ S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
+ S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
+ WREG32(R_000134_HDP_FB_LOCATION,
+ S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+ rv515_mc_resume(rdev, &save);
+}
+
+static int rs690_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ rs690_mc_program(rdev);
+ /* Resume clock */
+ rv515_clock_startup(rdev);
+ /* Initialize GPU configuration (# pipes, ...) */
+ rs690_gpu_init(rdev);
+ /* Initialize GART (initialize after TTM so we can allocate
+ * memory through TTM but finalize after TTM) */
+ r = rs400_gart_enable(rdev);
+ if (r)
+ return r;
+ /* Enable IRQ */
+ rdev->irq.sw_int = true;
+ rs600_irq_set(rdev);
+ /* 1M ring buffer */
+ r = r100_cp_init(rdev, 1024 * 1024);
+ if (r) {
+ dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ return r;
+ }
+ r = r100_wb_init(rdev);
+ if (r)
+ dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+ r = r100_ib_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+ return r;
+ }
+ return 0;
+}
+
+int rs690_resume(struct radeon_device *rdev)
+{
+ /* Make sur GART are not working */
+ rs400_gart_disable(rdev);
+ /* Resume clock before doing reset */
+ rv515_clock_startup(rdev);
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* post */
+ atom_asic_init(rdev->mode_info.atom_context);
+ /* Resume clock after posting */
+ rv515_clock_startup(rdev);
+ return rs690_startup(rdev);
+}
+
+int rs690_suspend(struct radeon_device *rdev)
+{
+ r100_cp_disable(rdev);
+ r100_wb_disable(rdev);
+ rs600_irq_disable(rdev);
+ rs400_gart_disable(rdev);
+ return 0;
+}
+
+void rs690_fini(struct radeon_device *rdev)
+{
+ rs690_suspend(rdev);
+ r100_cp_fini(rdev);
+ r100_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ radeon_gem_fini(rdev);
+ rs400_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_object_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
+int rs690_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Disable VGA */
+ rv515_vga_render_disable(rdev);
+ /* Initialize scratch registers */
+ radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* TODO: disable VGA need to use VGA request */
+ /* BIOS*/
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ if (rdev->is_atom_bios) {
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ } else {
+ dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+ return -EINVAL;
+ }
+ /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+ if (radeon_gpu_reset(rdev)) {
+ dev_warn(rdev->dev,
+ "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+ RREG32(R_000E40_RBBM_STATUS),
+ RREG32(R_0007C0_CP_STAT));
+ }
+ /* check if cards are posted or not */
+ if (!radeon_card_posted(rdev) && rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+ /* Get vram informations */
+ rs690_vram_info(rdev);
+ /* Initialize memory controller (also test AGP) */
+ r = r420_mc_init(rdev);
+ if (r)
+ return r;
+ rv515_debugfs(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_object_init(rdev);
+ if (r)
+ return r;
+ r = rs400_gart_init(rdev);
+ if (r)
+ return r;
+ rs600_set_safe_registers(rdev);
+ rdev->accel_working = true;
+ r = rs690_startup(rdev);
+ if (r) {
+ /* Somethings want wront with the accel init stop accel */
+ dev_err(rdev->dev, "Disabling GPU acceleration\n");
+ rs690_suspend(rdev);
+ r100_cp_fini(rdev);
+ r100_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ rs400_gart_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ rdev->accel_working = false;
+ }
+ return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
new file mode 100644
index 00000000000..62d31e7a897
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RS690D_H__
+#define __RS690D_H__
+
+/* Registers */
+#define R_000078_MC_INDEX 0x000078
+#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
+#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF)
+#define C_000078_MC_IND_ADDR 0xFFFFFE00
+#define S_000078_MC_IND_WR_EN(x) (((x) & 0x1) << 9)
+#define G_000078_MC_IND_WR_EN(x) (((x) >> 9) & 0x1)
+#define C_000078_MC_IND_WR_EN 0xFFFFFDFF
+#define R_00007C_MC_DATA 0x00007C
+#define S_00007C_MC_DATA(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_00007C_MC_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_00007C_MC_DATA 0x00000000
+#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
+#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_0000F8_CONFIG_MEMSIZE 0x00000000
+#define R_000134_HDP_FB_LOCATION 0x000134
+#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000134_HDP_FB_START 0xFFFF0000
+#define R_0007C0_CP_STAT 0x0007C0
+#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
+#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
+#define C_0007C0_MRU_BUSY 0xFFFFFFFE
+#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
+#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
+#define C_0007C0_MWU_BUSY 0xFFFFFFFD
+#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
+#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
+#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
+#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
+#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
+#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
+#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
+#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
+#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
+#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
+#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
+#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
+#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
+#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
+#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
+#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
+#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
+#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
+#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
+#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
+#define C_0007C0_CSI_BUSY 0xFFFFDFFF
+#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
+#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
+#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
+#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
+#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
+#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
+#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
+#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
+#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
+#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
+#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
+#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
+#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
+#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
+#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
+#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
+#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
+#define C_0007C0_CP_BUSY 0x7FFFFFFF
+#define R_000E40_RBBM_STATUS 0x000E40
+#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
+#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
+#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
+#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
+#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
+#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
+#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
+#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
+#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
+#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
+#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
+#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
+#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
+#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
+#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
+#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
+#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
+#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
+#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
+#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
+#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
+#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
+#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
+#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
+#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
+#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
+#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
+#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
+#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
+#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
+#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
+#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
+#define C_000E40_E2_BUSY 0xFFFDFFFF
+#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
+#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
+#define C_000E40_RB2D_BUSY 0xFFFBFFFF
+#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
+#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
+#define C_000E40_RB3D_BUSY 0xFFF7FFFF
+#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
+#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
+#define C_000E40_VAP_BUSY 0xFFEFFFFF
+#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
+#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
+#define C_000E40_RE_BUSY 0xFFDFFFFF
+#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
+#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
+#define C_000E40_TAM_BUSY 0xFFBFFFFF
+#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
+#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
+#define C_000E40_TDM_BUSY 0xFF7FFFFF
+#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
+#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
+#define C_000E40_PB_BUSY 0xFEFFFFFF
+#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
+#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
+#define C_000E40_TIM_BUSY 0xFDFFFFFF
+#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
+#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
+#define C_000E40_GA_BUSY 0xFBFFFFFF
+#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
+#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
+#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
+#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
+#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
+#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
+#define R_006520_DC_LB_MEMORY_SPLIT 0x006520
+#define S_006520_DC_LB_MEMORY_SPLIT(x) (((x) & 0x3) << 0)
+#define G_006520_DC_LB_MEMORY_SPLIT(x) (((x) >> 0) & 0x3)
+#define C_006520_DC_LB_MEMORY_SPLIT 0xFFFFFFFC
+#define S_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) & 0x1) << 2)
+#define G_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) >> 2) & 0x1)
+#define C_006520_DC_LB_MEMORY_SPLIT_MODE 0xFFFFFFFB
+#define V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
+#define V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
+#define V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY 2
+#define V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
+#define S_006520_DC_LB_DISP1_END_ADR(x) (((x) & 0x7FF) << 4)
+#define G_006520_DC_LB_DISP1_END_ADR(x) (((x) >> 4) & 0x7FF)
+#define C_006520_DC_LB_DISP1_END_ADR 0xFFFF800F
+#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548
+#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
+#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
+#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000
+#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
+#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C
+#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
+#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
+#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000
+#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
+#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF
+#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
+#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+#define R_006C9C_DCP_CONTROL 0x006C9C
+#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48
+#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
+#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
+#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000
+#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
+#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
+#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C
+#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
+#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
+#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000
+#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
+#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF
+#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
+#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+#define R_006D58_LB_MAX_REQ_OUTSTANDING 0x006D58
+#define S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 0)
+#define G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) >> 0) & 0xF)
+#define C_006D58_LB_D1_MAX_REQ_OUTSTANDING 0xFFFFFFF0
+#define S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 16)
+#define G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) >> 16) & 0xF)
+#define C_006D58_LB_D2_MAX_REQ_OUTSTANDING 0xFFF0FFFF
+
+
+#define R_000090_MC_SYSTEM_STATUS 0x000090
+#define S_000090_MC_SYSTEM_IDLE(x) (((x) & 0x1) << 0)
+#define G_000090_MC_SYSTEM_IDLE(x) (((x) >> 0) & 0x1)
+#define C_000090_MC_SYSTEM_IDLE 0xFFFFFFFE
+#define S_000090_MC_SEQUENCER_IDLE(x) (((x) & 0x1) << 1)
+#define G_000090_MC_SEQUENCER_IDLE(x) (((x) >> 1) & 0x1)
+#define C_000090_MC_SEQUENCER_IDLE 0xFFFFFFFD
+#define S_000090_MC_ARBITER_IDLE(x) (((x) & 0x1) << 2)
+#define G_000090_MC_ARBITER_IDLE(x) (((x) >> 2) & 0x1)
+#define C_000090_MC_ARBITER_IDLE 0xFFFFFFFB
+#define S_000090_MC_SELECT_PM(x) (((x) & 0x1) << 3)
+#define G_000090_MC_SELECT_PM(x) (((x) >> 3) & 0x1)
+#define C_000090_MC_SELECT_PM 0xFFFFFFF7
+#define S_000090_RESERVED4(x) (((x) & 0xF) << 4)
+#define G_000090_RESERVED4(x) (((x) >> 4) & 0xF)
+#define C_000090_RESERVED4 0xFFFFFF0F
+#define S_000090_RESERVED8(x) (((x) & 0xF) << 8)
+#define G_000090_RESERVED8(x) (((x) >> 8) & 0xF)
+#define C_000090_RESERVED8 0xFFFFF0FF
+#define S_000090_RESERVED12(x) (((x) & 0xF) << 12)
+#define G_000090_RESERVED12(x) (((x) >> 12) & 0xF)
+#define C_000090_RESERVED12 0xFFFF0FFF
+#define S_000090_MCA_INIT_EXECUTED(x) (((x) & 0x1) << 16)
+#define G_000090_MCA_INIT_EXECUTED(x) (((x) >> 16) & 0x1)
+#define C_000090_MCA_INIT_EXECUTED 0xFFFEFFFF
+#define S_000090_MCA_IDLE(x) (((x) & 0x1) << 17)
+#define G_000090_MCA_IDLE(x) (((x) >> 17) & 0x1)
+#define C_000090_MCA_IDLE 0xFFFDFFFF
+#define S_000090_MCA_SEQ_IDLE(x) (((x) & 0x1) << 18)
+#define G_000090_MCA_SEQ_IDLE(x) (((x) >> 18) & 0x1)
+#define C_000090_MCA_SEQ_IDLE 0xFFFBFFFF
+#define S_000090_MCA_ARB_IDLE(x) (((x) & 0x1) << 19)
+#define G_000090_MCA_ARB_IDLE(x) (((x) >> 19) & 0x1)
+#define C_000090_MCA_ARB_IDLE 0xFFF7FFFF
+#define S_000090_RESERVED20(x) (((x) & 0xFFF) << 20)
+#define G_000090_RESERVED20(x) (((x) >> 20) & 0xFFF)
+#define C_000090_RESERVED20 0x000FFFFF
+#define R_000100_MCCFG_FB_LOCATION 0x000100
+#define S_000100_MC_FB_START(x) (((x) & 0xFFFF) << 0)
+#define G_000100_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
+#define C_000100_MC_FB_START 0xFFFF0000
+#define S_000100_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
+#define G_000100_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
+#define C_000100_MC_FB_TOP 0x0000FFFF
+#define R_000104_MC_INIT_MISC_LAT_TIMER 0x000104
+#define S_000104_MC_CPR_INIT_LAT(x) (((x) & 0xF) << 0)
+#define G_000104_MC_CPR_INIT_LAT(x) (((x) >> 0) & 0xF)
+#define C_000104_MC_CPR_INIT_LAT 0xFFFFFFF0
+#define S_000104_MC_VF_INIT_LAT(x) (((x) & 0xF) << 4)
+#define G_000104_MC_VF_INIT_LAT(x) (((x) >> 4) & 0xF)
+#define C_000104_MC_VF_INIT_LAT 0xFFFFFF0F
+#define S_000104_MC_DISP0R_INIT_LAT(x) (((x) & 0xF) << 8)
+#define G_000104_MC_DISP0R_INIT_LAT(x) (((x) >> 8) & 0xF)
+#define C_000104_MC_DISP0R_INIT_LAT 0xFFFFF0FF
+#define S_000104_MC_DISP1R_INIT_LAT(x) (((x) & 0xF) << 12)
+#define G_000104_MC_DISP1R_INIT_LAT(x) (((x) >> 12) & 0xF)
+#define C_000104_MC_DISP1R_INIT_LAT 0xFFFF0FFF
+#define S_000104_MC_FIXED_INIT_LAT(x) (((x) & 0xF) << 16)
+#define G_000104_MC_FIXED_INIT_LAT(x) (((x) >> 16) & 0xF)
+#define C_000104_MC_FIXED_INIT_LAT 0xFFF0FFFF
+#define S_000104_MC_E2R_INIT_LAT(x) (((x) & 0xF) << 20)
+#define G_000104_MC_E2R_INIT_LAT(x) (((x) >> 20) & 0xF)
+#define C_000104_MC_E2R_INIT_LAT 0xFF0FFFFF
+#define S_000104_SAME_PAGE_PRIO(x) (((x) & 0xF) << 24)
+#define G_000104_SAME_PAGE_PRIO(x) (((x) >> 24) & 0xF)
+#define C_000104_SAME_PAGE_PRIO 0xF0FFFFFF
+#define S_000104_MC_GLOBW_INIT_LAT(x) (((x) & 0xF) << 28)
+#define G_000104_MC_GLOBW_INIT_LAT(x) (((x) >> 28) & 0xF)
+#define C_000104_MC_GLOBW_INIT_LAT 0x0FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h
deleted file mode 100644
index c0d9faa2175..00000000000
--- a/drivers/gpu/drm/radeon/rs690r.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Jerome Glisse.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- * Alex Deucher
- * Jerome Glisse
- */
-#ifndef RS690R_H
-#define RS690R_H
-
-/* RS690/RS740 registers */
-#define MC_INDEX 0x0078
-# define MC_INDEX_MASK 0x1FF
-# define MC_INDEX_WR_EN (1 << 9)
-# define MC_INDEX_WR_ACK 0x7F
-#define MC_DATA 0x007C
-#define HDP_FB_LOCATION 0x0134
-#define DC_LB_MEMORY_SPLIT 0x6520
-#define DC_LB_MEMORY_SPLIT_MASK 0x00000003
-#define DC_LB_MEMORY_SPLIT_SHIFT 0
-#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
-#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
-#define DC_LB_MEMORY_SPLIT_D1_ONLY 2
-#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
-#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
-#define DC_LB_DISP1_END_ADR_SHIFT 4
-#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0
-#define D1MODE_PRIORITY_A_CNT 0x6548
-#define MODE_PRIORITY_MARK_MASK 0x00007FFF
-#define MODE_PRIORITY_OFF (1 << 16)
-#define MODE_PRIORITY_ALWAYS_ON (1 << 20)
-#define MODE_PRIORITY_FORCE_MASK (1 << 24)
-#define D1MODE_PRIORITY_B_CNT 0x654C
-#define LB_MAX_REQ_OUTSTANDING 0x6D58
-#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F
-#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0
-#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000
-#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16
-#define DCP_CONTROL 0x6C9C
-#define D2MODE_PRIORITY_A_CNT 0x6D48
-#define D2MODE_PRIORITY_B_CNT 0x6D4C
-
-/* MC indirect registers */
-#define MC_STATUS_IDLE (1 << 0)
-#define MC_MISC_CNTL 0x18
-#define DISABLE_GTW (1 << 1)
-#define GART_INDEX_REG_EN (1 << 12)
-#define BLOCK_GFX_D3_EN (1 << 14)
-#define GART_FEATURE_ID 0x2B
-#define HANG_EN (1 << 11)
-#define TLB_ENABLE (1 << 18)
-#define P2P_ENABLE (1 << 19)
-#define GTW_LAC_EN (1 << 25)
-#define LEVEL2_GART (0 << 30)
-#define LEVEL1_GART (1 << 30)
-#define PDC_EN (1 << 31)
-#define GART_BASE 0x2C
-#define GART_CACHE_CNTRL 0x2E
-# define GART_CACHE_INVALIDATE (1 << 0)
-#define MC_STATUS 0x90
-#define MCCFG_FB_LOCATION 0x100
-#define MC_FB_START_MASK 0x0000FFFF
-#define MC_FB_START_SHIFT 0
-#define MC_FB_TOP_MASK 0xFFFF0000
-#define MC_FB_TOP_SHIFT 16
-#define MCCFG_AGP_LOCATION 0x101
-#define MC_AGP_START_MASK 0x0000FFFF
-#define MC_AGP_START_SHIFT 0
-#define MC_AGP_TOP_MASK 0xFFFF0000
-#define MC_AGP_TOP_SHIFT 16
-#define MCCFG_AGP_BASE 0x102
-#define MCCFG_AGP_BASE_2 0x103
-#define MC_INIT_MISC_LAT_TIMER 0x104
-#define MC_DISP0R_INIT_LAT_SHIFT 8
-#define MC_DISP0R_INIT_LAT_MASK 0x00000F00
-#define MC_DISP1R_INIT_LAT_SHIFT 12
-#define MC_DISP1R_INIT_LAT_MASK 0x0000F000
-
-#endif
diff --git a/drivers/gpu/drm/radeon/rv200d.h b/drivers/gpu/drm/radeon/rv200d.h
new file mode 100644
index 00000000000..c5b398330c2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv200d.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RV200D_H__
+#define __RV200D_H__
+
+#define R_00015C_AGP_BASE_2 0x00015C
+#define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
+#define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
+#define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv250d.h b/drivers/gpu/drm/radeon/rv250d.h
new file mode 100644
index 00000000000..e5a70b06fe1
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv250d.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RV250D_H__
+#define __RV250D_H__
+
+#define R_00000D_SCLK_CNTL_M6 0x00000D
+#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
+#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
+#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
+#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3)
+#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1)
+#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7
+#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4)
+#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1)
+#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF
+#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5)
+#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1)
+#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF
+#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6)
+#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1)
+#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF
+#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7)
+#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1)
+#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F
+#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8)
+#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1)
+#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF
+#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9)
+#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1)
+#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF
+#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10)
+#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1)
+#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF
+#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11)
+#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1)
+#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF
+#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12)
+#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1)
+#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF
+#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13)
+#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1)
+#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF
+#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14)
+#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1)
+#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF
+#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15)
+#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1)
+#define C_00000D_FORCE_DISP2 0xFFFF7FFF
+#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
+#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
+#define C_00000D_FORCE_CP 0xFFFEFFFF
+#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
+#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
+#define C_00000D_FORCE_HDP 0xFFFDFFFF
+#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18)
+#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1)
+#define C_00000D_FORCE_DISP1 0xFFFBFFFF
+#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
+#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
+#define C_00000D_FORCE_TOP 0xFFF7FFFF
+#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
+#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
+#define C_00000D_FORCE_E2 0xFFEFFFFF
+#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
+#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
+#define C_00000D_FORCE_SE 0xFFDFFFFF
+#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
+#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
+#define C_00000D_FORCE_IDCT 0xFFBFFFFF
+#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
+#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
+#define C_00000D_FORCE_VIP 0xFF7FFFFF
+#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
+#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
+#define C_00000D_FORCE_RE 0xFEFFFFFF
+#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
+#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
+#define C_00000D_FORCE_PB 0xFDFFFFFF
+#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
+#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
+#define C_00000D_FORCE_TAM 0xFBFFFFFF
+#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
+#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
+#define C_00000D_FORCE_TDM 0xF7FFFFFF
+#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
+#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
+#define C_00000D_FORCE_RB 0xEFFFFFFF
+#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
+#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
+#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
+#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30)
+#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1)
+#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF
+#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
+#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
+#define C_00000D_FORCE_OV0 0x7FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv350d.h b/drivers/gpu/drm/radeon/rv350d.h
new file mode 100644
index 00000000000..c75c5ed9e65
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv350d.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+#ifndef __RV350D_H__
+#define __RV350D_H__
+
+/* RV350, RV380 registers */
+/* #define R_00000D_SCLK_CNTL 0x00000D */
+#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21)
+#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1)
+#define C_00000D_FORCE_VAP 0xFFDFFFFF
+#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25)
+#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1)
+#define C_00000D_FORCE_SR 0xFDFFFFFF
+#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26)
+#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1)
+#define C_00000D_FORCE_PX 0xFBFFFFFF
+#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27)
+#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1)
+#define C_00000D_FORCE_TX 0xF7FFFFFF
+#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28)
+#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1)
+#define C_00000D_FORCE_US 0xEFFFFFFF
+#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30)
+#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1)
+#define C_00000D_FORCE_SU 0xBFFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index e53b5ca7a25..7935f793bf6 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -137,6 +137,8 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
void rv515_vga_render_disable(struct radeon_device *rdev)
{
+ WREG32(R_000330_D1VGA_CONTROL, 0);
+ WREG32(R_000338_D2VGA_CONTROL, 0);
WREG32(R_000300_VGA_RENDER_CONTROL,
RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
}
@@ -478,7 +480,7 @@ static int rv515_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
rdev->irq.sw_int = true;
- r100_irq_set(rdev);
+ rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -520,7 +522,7 @@ int rv515_suspend(struct radeon_device *rdev)
{
r100_cp_disable(rdev);
r100_wb_disable(rdev);
- r100_irq_disable(rdev);
+ rs600_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
return 0;
@@ -553,7 +555,6 @@ int rv515_init(struct radeon_device *rdev)
{
int r;
- rdev->new_init_path = true;
/* Initialize scratch registers */
radeon_scratch_init(rdev);
/* Initialize surface registers */
@@ -586,6 +587,8 @@ int rv515_init(struct radeon_device *rdev)
}
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
rv515_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index e0b97d16139..b0efd0ddae7 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -75,7 +75,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
@@ -126,17 +126,36 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev)
}
-/*
- * MC
- */
-static void rv770_mc_resume(struct radeon_device *rdev)
+void rv770_agp_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+static void rv770_mc_program(struct radeon_device *rdev)
{
- u32 d1vga_control, d2vga_control;
- u32 vga_render_control, vga_hdp_control;
- u32 d1crtc_control, d2crtc_control;
- u32 new_d1grph_primary, new_d1grph_secondary;
- u32 new_d2grph_primary, new_d2grph_secondary;
- u64 old_vram_start;
+ struct rv515_mc_save save;
u32 tmp;
int i, j;
@@ -150,53 +169,42 @@ static void rv770_mc_resume(struct radeon_device *rdev)
}
WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
- d1vga_control = RREG32(D1VGA_CONTROL);
- d2vga_control = RREG32(D2VGA_CONTROL);
- vga_render_control = RREG32(VGA_RENDER_CONTROL);
- vga_hdp_control = RREG32(VGA_HDP_CONTROL);
- d1crtc_control = RREG32(D1CRTC_CONTROL);
- d2crtc_control = RREG32(D2CRTC_CONTROL);
- old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
- new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
- new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
- new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
- new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
- new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
- new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
- new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
- new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
-
- /* Stop all video */
- WREG32(D1VGA_CONTROL, 0);
- WREG32(D2VGA_CONTROL, 0);
- WREG32(VGA_RENDER_CONTROL, 0);
- WREG32(D1CRTC_UPDATE_LOCK, 1);
- WREG32(D2CRTC_UPDATE_LOCK, 1);
- WREG32(D1CRTC_CONTROL, 0);
- WREG32(D2CRTC_CONTROL, 0);
- WREG32(D1CRTC_UPDATE_LOCK, 0);
- WREG32(D2CRTC_UPDATE_LOCK, 0);
-
- mdelay(1);
+ rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "[drm] MC not idle !\n");
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
-
/* Lockout access through VGA aperture*/
WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
-
/* Update configuration */
- WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
- WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12);
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+ /* VRAM before AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.gtt_end >> 12);
+ } else {
+ /* VRAM after AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.gtt_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ } else {
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
- tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16;
+ tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
if (rdev->flags & RADEON_IS_AGP) {
- WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16);
+ WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
} else {
@@ -204,31 +212,10 @@ static void rv770_mc_resume(struct radeon_device *rdev)
WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
}
- WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
- WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
- WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
- WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
- WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
-
- /* Unlock host access */
- WREG32(VGA_HDP_CONTROL, vga_hdp_control);
-
- mdelay(1);
if (r600_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "[drm] MC not idle !\n");
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
-
- /* Restore video state */
- WREG32(D1CRTC_UPDATE_LOCK, 1);
- WREG32(D2CRTC_UPDATE_LOCK, 1);
- WREG32(D1CRTC_CONTROL, d1crtc_control);
- WREG32(D2CRTC_CONTROL, d2crtc_control);
- WREG32(D1CRTC_UPDATE_LOCK, 0);
- WREG32(D2CRTC_UPDATE_LOCK, 0);
- WREG32(D1VGA_CONTROL, d1vga_control);
- WREG32(D2VGA_CONTROL, d2vga_control);
- WREG32(VGA_RENDER_CONTROL, vga_render_control);
-
+ rv515_mc_resume(rdev, &save);
/* we need to own VRAM, so turn off the VGA renderer here
* to stop it overwriting our objects */
rv515_vga_render_disable(rdev);
@@ -542,11 +529,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1);
else
- gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_SHIFT) >> NOOFBANK_MASK);
+ gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
gb_tiling_config |= GROUP_SIZE(0);
- if (((mc_arb_ramcfg & NOOFROWS_MASK) & NOOFROWS_SHIFT) > 3) {
+ if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
} else {
@@ -592,14 +579,14 @@ static void rv770_gpu_init(struct radeon_device *rdev)
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
- ROQ_IB2_START(0x2b)));
+ ROQ_IB2_START(0x2b)));
WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
- SYNC_GRADIENT |
- SYNC_WALKER |
- SYNC_ALIGNER));
+ SYNC_GRADIENT |
+ SYNC_WALKER |
+ SYNC_ALIGNER));
sx_debug_1 = RREG32(SX_DEBUG_1);
sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
@@ -611,9 +598,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
- GS_FLUSH_CTL(4) |
- ACK_FLUSH_CTL(3) |
- SYNC_FLUSH_CTL));
+ GS_FLUSH_CTL(4) |
+ ACK_FLUSH_CTL(3) |
+ SYNC_FLUSH_CTL));
if (rdev->family == CHIP_RV770)
WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
@@ -624,12 +611,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
}
WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
- POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
- SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
+ POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
- SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
- SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
@@ -787,14 +774,36 @@ int rv770_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u32 tmp;
+ int chansize, numchan;
int r;
/* Get VRAM informations */
- /* FIXME: Don't know how to determine vram width, need to check
- * vram_width usage
- */
- rdev->mc.vram_width = 128;
rdev->mc.vram_is_ddr = true;
+ tmp = RREG32(MC_ARB_RAMCFG);
+ if (tmp & CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & CHANSIZE_MASK) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ }
+ rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
@@ -840,9 +849,9 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size;
+ rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size;
+ rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
@@ -861,11 +870,14 @@ static int rv770_startup(struct radeon_device *rdev)
{
int r;
- radeon_gpu_reset(rdev);
- rv770_mc_resume(rdev);
- r = rv770_pcie_gart_enable(rdev);
- if (r)
- return r;
+ rv770_mc_program(rdev);
+ if (rdev->flags & RADEON_IS_AGP) {
+ rv770_agp_enable(rdev);
+ } else {
+ r = rv770_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
rv770_gpu_init(rdev);
r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
@@ -884,9 +896,8 @@ static int rv770_startup(struct radeon_device *rdev)
r = r600_cp_resume(rdev);
if (r)
return r;
- r = r600_wb_init(rdev);
- if (r)
- return r;
+ /* write back buffer are not vital so don't worry about failure */
+ r600_wb_enable(rdev);
return 0;
}
@@ -894,15 +905,12 @@ int rv770_resume(struct radeon_device *rdev)
{
int r;
- if (radeon_gpu_reset(rdev)) {
- /* FIXME: what do we want to do here ? */
- }
+ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
/* post card */
- if (rdev->is_atom_bios) {
- atom_asic_init(rdev->mode_info.atom_context);
- } else {
- radeon_combios_asic_init(rdev->ddev);
- }
+ atom_asic_init(rdev->mode_info.atom_context);
/* Initialize clocks */
r = radeon_clocks_init(rdev);
if (r) {
@@ -915,7 +923,7 @@ int rv770_resume(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_test(rdev);
+ r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
@@ -929,8 +937,8 @@ int rv770_suspend(struct radeon_device *rdev)
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
+ r600_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
-
/* unpin shaders bo */
radeon_object_unpin(rdev->r600_blit.shader_obj);
return 0;
@@ -946,7 +954,6 @@ int rv770_init(struct radeon_device *rdev)
{
int r;
- rdev->new_init_path = true;
r = radeon_dummy_page_init(rdev);
if (r)
return r;
@@ -960,8 +967,10 @@ int rv770_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Must be an ATOMBIOS */
- if (!rdev->is_atom_bios)
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
return -EINVAL;
+ }
r = radeon_atombios_init(rdev);
if (r)
return r;
@@ -974,24 +983,20 @@ int rv770_init(struct radeon_device *rdev)
r600_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
r = radeon_clocks_init(rdev);
if (r)
return r;
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
return r;
r = rv770_mc_init(rdev);
- if (r) {
- if (rdev->flags & RADEON_IS_AGP) {
- /* Retry with disabling AGP */
- rv770_fini(rdev);
- rdev->flags &= ~RADEON_IS_AGP;
- return rv770_init(rdev);
- }
+ if (r)
return r;
- }
/* Memory manager */
r = radeon_object_init(rdev);
if (r)
@@ -1020,12 +1025,10 @@ int rv770_init(struct radeon_device *rdev)
r = rv770_startup(rdev);
if (r) {
- if (rdev->flags & RADEON_IS_AGP) {
- /* Retry with disabling AGP */
- rv770_fini(rdev);
- rdev->flags &= ~RADEON_IS_AGP;
- return rv770_init(rdev);
- }
+ rv770_suspend(rdev);
+ r600_wb_fini(rdev);
+ radeon_ring_fini(rdev);
+ rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
@@ -1034,7 +1037,7 @@ int rv770_init(struct radeon_device *rdev)
DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
- r = radeon_ib_test(rdev);
+ r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
rdev->accel_working = false;
@@ -1049,20 +1052,15 @@ void rv770_fini(struct radeon_device *rdev)
r600_blit_fini(rdev);
radeon_ring_fini(rdev);
+ r600_wb_fini(rdev);
rv770_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
-#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP)
radeon_agp_fini(rdev);
-#endif
radeon_object_fini(rdev);
- if (rdev->is_atom_bios) {
- radeon_atombios_fini(rdev);
- } else {
- radeon_combios_fini(rdev);
- }
+ radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
radeon_dummy_page_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 4b9c3d6396f..a1367ab6f26 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -129,6 +129,10 @@
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
@@ -142,6 +146,7 @@
#define CHANSIZE_MASK 0x00000100
#define BURSTLENGTH_SHIFT 9
#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
#define MC_VM_AGP_TOP 0x2028
#define MC_VM_AGP_BOT 0x202C
#define MC_VM_AGP_BASE 0x2030
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
index 541744d00d3..b17007178a3 100644
--- a/drivers/gpu/drm/ttm/ttm_global.c
+++ b/drivers/gpu/drm/ttm/ttm_global.c
@@ -82,8 +82,8 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
if (unlikely(ret != 0))
goto out_err;
- ++item->refcount;
}
+ ++item->refcount;
ref->object = item->object;
object = item->object;
mutex_unlock(&item->mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a55ee1a56c1..7bcb89f39ce 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -279,6 +279,7 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
return ttm_tt_set_caching(ttm, state);
}
+EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{