summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c9
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c10
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c15
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c26
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c407
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c12
-rw-r--r--drivers/gpu/drm/radeon/r100.c172
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h10
-rw-r--r--drivers/gpu/drm/radeon/r300.c33
-rw-r--r--drivers/gpu/drm/radeon/r420.c24
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h9
-rw-r--r--drivers/gpu/drm/radeon/r520.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c764
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c58
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c39
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c18
-rw-r--r--drivers/gpu/drm/radeon/r600d.h174
-rw-r--r--drivers/gpu/drm/radeon/radeon.h161
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h37
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c339
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c679
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c262
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c69
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c196
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c102
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c109
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c144
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c140
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h110
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c538
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h157
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c42
-rw-r--r--drivers/gpu/drm/radeon/rs400.c17
-rw-r--r--drivers/gpu/drm/radeon/rs600.c119
-rw-r--r--drivers/gpu/drm/radeon/rs690.c50
-rw-r--r--drivers/gpu/drm/radeon/rv515.c22
-rw-r--r--drivers/gpu/drm/radeon/rv770.c118
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h5
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
67 files changed, 4137 insertions, 1765 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f831ea15929..96eddd17e05 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -92,6 +92,7 @@ config DRM_I830
config DRM_I915
tristate "i915 driver"
depends on AGP_INTEL
+ select SHMEM
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index c5bd50ce78e..3963b3c1081 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -331,6 +331,7 @@ create_mode:
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->rb, cmdline_mode->interlace,
cmdline_mode->margins);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
list_add(&mode->head, &connector->modes);
return mode;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 30af8f3e642..c39b26f1abe 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -673,6 +673,12 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return NULL;
}
+ /* Some EDIDs have bogus h/vtotal values */
+ if (mode->hsync_end > mode->htotal)
+ mode->htotal = mode->hsync_end + 1;
+ if (mode->vsync_end > mode->vtotal)
+ mode->vtotal = mode->vsync_end + 1;
+
drm_mode_set_name(mode);
if (pt->misc & DRM_EDID_PT_INTERLACED)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 515b838006d..34afe15ebc0 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -710,7 +710,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
mutex_lock(&dev->mode_config.mutex);
- ret = crtc->funcs->set_config(&fb_helper->crtc_info->mode_set);
+ ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
mutex_unlock(&dev->mode_config.mutex);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 80391995bde..e9dbb481c46 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -552,7 +552,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 59cebd7d3fc..1f0d717dbad 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -103,6 +103,11 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
return child;
}
+/* drm_mm_pre_get() - pre allocate drm_mm_node structure
+ * drm_mm: memory manager struct we are pre-allocating for
+ *
+ * Returns 0 on success or -ENOMEM if allocation fails.
+ */
int drm_mm_pre_get(struct drm_mm *mm)
{
struct drm_mm_node *node;
@@ -253,12 +258,14 @@ void drm_mm_put_block(struct drm_mm_node *cur)
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
+ spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&next_node->fl_entry,
&mm->unused_nodes);
++mm->num_unused;
} else
kfree(next_node);
+ spin_unlock(&mm->unused_lock);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
@@ -271,11 +278,13 @@ void drm_mm_put_block(struct drm_mm_node *cur)
list_add(&cur->fl_entry, &mm->fl_entry);
} else {
list_del(&cur->ml_entry);
+ spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&cur->fl_entry, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(cur);
+ spin_unlock(&mm->unused_lock);
}
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f8ce9a3a420..26bf0552b3c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -267,10 +267,10 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
uint32_t *mem;
for (page = 0; page < page_count; page++) {
- mem = kmap(pages[page]);
+ mem = kmap_atomic(pages[page], KM_USER0);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- kunmap(pages[page]);
+ kunmap_atomic(pages[page], KM_USER0);
}
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 57204e29897..a725f659119 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -296,6 +296,7 @@ typedef struct drm_i915_private {
u32 saveVBLANK_A;
u32 saveVSYNC_A;
u32 saveBCLRPAT_A;
+ u32 saveTRANSACONF;
u32 saveTRANS_HTOTAL_A;
u32 saveTRANS_HBLANK_A;
u32 saveTRANS_HSYNC_A;
@@ -326,6 +327,7 @@ typedef struct drm_i915_private {
u32 saveVBLANK_B;
u32 saveVSYNC_B;
u32 saveBCLRPAT_B;
+ u32 saveTRANSBCONF;
u32 saveTRANS_HTOTAL_B;
u32 saveTRANS_HBLANK_B;
u32 saveTRANS_HSYNC_B;
@@ -414,6 +416,16 @@ typedef struct drm_i915_private {
u32 savePFB_WIN_SZ;
u32 savePFA_WIN_POS;
u32 savePFB_WIN_POS;
+ u32 savePCH_DREF_CONTROL;
+ u32 saveDISP_ARB_CTL;
+ u32 savePIPEA_DATA_M1;
+ u32 savePIPEA_DATA_N1;
+ u32 savePIPEA_LINK_M1;
+ u32 savePIPEA_LINK_N1;
+ u32 savePIPEB_DATA_M1;
+ u32 savePIPEB_DATA_N1;
+ u32 savePIPEB_LINK_M1;
+ u32 savePIPEB_LINK_N1;
struct {
struct drm_mm gtt_space;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c3ceffa46ea..aa7fd82aa6e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -254,10 +254,15 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir;
+ u32 de_iir, gt_iir, de_ier;
u32 new_de_iir, new_gt_iir;
struct drm_i915_master_private *master_priv;
+ /* disable master interrupt before clearing iir */
+ de_ier = I915_READ(DEIER);
+ I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ (void)I915_READ(DEIER);
+
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
@@ -290,6 +295,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
gt_iir = new_gt_iir;
}
+ I915_WRITE(DEIER, de_ier);
+ (void)I915_READ(DEIER);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 992d5617e79..6eec8171a44 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -239,6 +239,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ if (IS_IGDNG(dev)) {
+ dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ }
+
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
@@ -263,6 +268,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
if (IS_IGDNG(dev)) {
+ dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
+ dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
+ dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
+ dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1);
+
dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
@@ -270,6 +280,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
+ dev_priv->saveTRANSACONF = I915_READ(TRANSACONF);
dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
@@ -314,6 +325,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
if (IS_IGDNG(dev)) {
+ dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
+ dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
+ dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
+ dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1);
+
dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
@@ -321,6 +337,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
+ dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF);
dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
@@ -368,6 +385,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
fpb1_reg = FPB1;
}
+ if (IS_IGDNG(dev)) {
+ I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
+ I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
+ }
+
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
@@ -395,6 +417,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
if (IS_IGDNG(dev)) {
+ I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
+ I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
+ I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
+ I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+
I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
@@ -402,6 +429,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+ I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF);
I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
@@ -439,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
DRM_UDELAY(150);
- if (IS_I965G(dev))
+ if (IS_I965G(dev) && !IS_IGDNG(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
DRM_UDELAY(150);
@@ -454,6 +482,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
if (IS_IGDNG(dev)) {
+ I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
+ I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
+ I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
+ I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+
I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
@@ -461,6 +494,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+ I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF);
I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 212e22740fc..e5051446c48 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -262,8 +262,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
} while (time_after(timeout, jiffies));
}
- if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
- CRT_HOTPLUG_MONITOR_COLOR)
+ if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
+ CRT_HOTPLUG_MONITOR_NONE)
return true;
return false;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ccd180dce4c..897230832c8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -863,10 +863,8 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
- int max_n;
- bool found;
int err_most = 47;
- found = false;
+ int err_min = 10000;
/* eDP has only 2 clock choice, no n/m/p setting */
if (HAS_eDP)
@@ -890,10 +888,9 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
}
memset(best_clock, 0, sizeof(*best_clock));
- max_n = limit->n.max;
for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
/* based on hardware requriment prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
/* based on hardware requirment prefere larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
@@ -907,18 +904,18 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
this_err = abs((10000 - (target*10000/clock.dot)));
if (this_err < err_most) {
*best_clock = clock;
- err_most = this_err;
- max_n = clock.n;
- found = true;
/* found on first matching */
goto out;
+ } else if (this_err < err_min) {
+ *best_clock = clock;
+ err_min = this_err;
}
}
}
}
}
out:
- return found;
+ return true;
}
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 663ab6de0b5..c33451aec1b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -77,14 +77,32 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
u32 temp;
- if (mode != DRM_MODE_DPMS_ON) {
- temp = I915_READ(hdmi_priv->sdvox_reg);
+ temp = I915_READ(hdmi_priv->sdvox_reg);
+
+ /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ * we do this anyway which shows more stable in testing.
+ */
+ if (IS_IGDNG(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(hdmi_priv->sdvox_reg);
+ }
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ temp &= ~SDVO_ENABLE;
} else {
- temp = I915_READ(hdmi_priv->sdvox_reg);
- I915_WRITE(hdmi_priv->sdvox_reg, temp | SDVO_ENABLE);
+ temp |= SDVO_ENABLE;
}
+
+ I915_WRITE(hdmi_priv->sdvox_reg, temp);
POSTING_READ(hdmi_priv->sdvox_reg);
+
+ /* HW workaround, need to write this twice for issue that may result
+ * in first write getting masked.
+ */
+ if (IS_IGDNG(dev)) {
+ I915_WRITE(hdmi_priv->sdvox_reg, temp);
+ POSTING_READ(hdmi_priv->sdvox_reg);
+ }
}
static void intel_hdmi_save(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 09a28923f46..b5713eedd6e 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o
+ r600_blit_kms.o radeon_pm.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 901befe03da..d67c42555ab 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -107,6 +107,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base += 3;
break;
case ATOM_IIO_WRITE:
+ (void)ctx->card->reg_read(ctx->card, CU16(base + 1));
ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 5d402086bc4..c11ddddfb3b 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2314,7 +2314,7 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
UCHAR ucSS_Step;
UCHAR ucSS_Delay;
UCHAR ucSS_Id;
- UCHAR ucRecommandedRef_Div;
+ UCHAR ucRecommendedRef_Div;
UCHAR ucSS_Range; /* it was reserved for V11 */
} ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 14fa9701aeb..fba3c96b915 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -31,10 +31,6 @@
#include "atom.h"
#include "atom-bits.h"
-/* evil but including atombios.h is much worse */
-bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
- SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing,
- int32_t *pixel_clock);
static void atombios_overscan_setup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -245,84 +241,172 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
switch (mode) {
case DRM_MODE_DPMS_ON:
+ atombios_enable_crtc(crtc, 1);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 1);
- atombios_enable_crtc(crtc, 1);
atombios_blank_crtc(crtc, 0);
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, 1);
- atombios_enable_crtc(crtc, 0);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 0);
+ atombios_enable_crtc(crtc, 0);
break;
}
-
- if (mode != DRM_MODE_DPMS_OFF) {
- radeon_crtc_load_lut(crtc);
- }
}
static void
atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
- SET_CRTC_USING_DTD_TIMING_PARAMETERS * crtc_param)
+ struct drm_display_mode *mode)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param;
+ SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
+ u16 misc = 0;
- conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size);
- conv_param.usH_Blanking_Time =
- cpu_to_le16(crtc_param->usH_Blanking_Time);
- conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size);
- conv_param.usV_Blanking_Time =
- cpu_to_le16(crtc_param->usV_Blanking_Time);
- conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset);
- conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
- conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset);
- conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
- conv_param.susModeMiscInfo.usAccess =
- cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
- conv_param.ucCRTC = crtc_param->ucCRTC;
+ memset(&args, 0, sizeof(args));
+ args.usH_Size = cpu_to_le16(mode->crtc_hdisplay);
+ args.usH_Blanking_Time =
+ cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay);
+ args.usV_Size = cpu_to_le16(mode->crtc_vdisplay);
+ args.usV_Blanking_Time =
+ cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay);
+ args.usH_SyncOffset =
+ cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay);
+ args.usH_SyncWidth =
+ cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+ args.usV_SyncOffset =
+ cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay);
+ args.usV_SyncWidth =
+ cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+ /*args.ucH_Border = mode->hborder;*/
+ /*args.ucV_Border = mode->vborder;*/
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= ATOM_VSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= ATOM_HSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_CSYNC)
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
printk("executing set crtc dtd timing\n");
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-void atombios_crtc_set_timing(struct drm_crtc *crtc,
- SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *
- crtc_param)
+static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param;
+ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
+ u16 misc = 0;
- conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total);
- conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp);
- conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart);
- conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
- conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total);
- conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp);
- conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart);
- conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
- conv_param.susModeMiscInfo.usAccess =
- cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
- conv_param.ucCRTC = crtc_param->ucCRTC;
- conv_param.ucOverscanRight = crtc_param->ucOverscanRight;
- conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft;
- conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom;
- conv_param.ucOverscanTop = crtc_param->ucOverscanTop;
- conv_param.ucReserved = crtc_param->ucReserved;
+ memset(&args, 0, sizeof(args));
+ args.usH_Total = cpu_to_le16(mode->crtc_htotal);
+ args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay);
+ args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start);
+ args.usH_SyncWidth =
+ cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+ args.usV_Total = cpu_to_le16(mode->crtc_vtotal);
+ args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay);
+ args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start);
+ args.usV_SyncWidth =
+ cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= ATOM_VSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= ATOM_HSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_CSYNC)
+ misc |= ATOM_COMPOSITESYNC;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ misc |= ATOM_INTERLACE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
+
+ args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ args.ucCRTC = radeon_crtc->crtc_id;
printk("executing set crtc timing\n");
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_set_ss(struct drm_crtc *crtc, int enable)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder = NULL;
+ struct radeon_encoder *radeon_encoder = NULL;
+ struct radeon_encoder_atom_dig *dig = NULL;
+ int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args;
+ ENABLE_LVDS_SS_PARAMETERS legacy_args;
+ uint16_t percentage = 0;
+ uint8_t type = 0, step = 0, delay = 0, range = 0;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ /* only enable spread spectrum on LVDS */
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ dig = radeon_encoder->enc_priv;
+ if (dig && dig->ss) {
+ percentage = dig->ss->percentage;
+ type = dig->ss->type;
+ step = dig->ss->step;
+ delay = dig->ss->delay;
+ range = dig->ss->range;
+ } else if (enable)
+ return;
+ } else if (enable)
+ return;
+ break;
+ }
+ }
+
+ if (!radeon_encoder)
+ return;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ memset(&args, 0, sizeof(args));
+ args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
+ args.ucSpreadSpectrumType = type;
+ args.ucSpreadSpectrumStep = step;
+ args.ucSpreadSpectrumDelay = delay;
+ args.ucSpreadSpectrumRange = range;
+ args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.ucEnable = enable;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ } else {
+ memset(&legacy_args, 0, sizeof(legacy_args));
+ legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
+ legacy_args.ucSpreadSpectrumType = type;
+ legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
+ legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
+ legacy_args.ucEnable = enable;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args);
+ }
}
void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
@@ -333,12 +417,13 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
uint8_t frev, crev;
- int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+ int index;
SET_PIXEL_CLOCK_PS_ALLOCATION args;
PIXEL_CLOCK_PARAMETERS *spc1_ptr;
PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
- uint32_t sclock = mode->clock;
+ uint32_t pll_clock = mode->clock;
+ uint32_t adjusted_clock;
uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
int pll_flags = 0;
@@ -346,8 +431,6 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
memset(&args, 0, sizeof(args));
if (ASIC_IS_AVIVO(rdev)) {
- uint32_t ss_cntl;
-
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
@@ -358,15 +441,6 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
-
- /* disable spread spectrum clocking for now -- thanks Hedy Lamarr */
- if (radeon_crtc->crtc_id == 0) {
- ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
- WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1);
- } else {
- ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
- WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1);
- }
} else {
pll_flags |= RADEON_PLL_LEGACY;
@@ -383,9 +457,8 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (encoder->encoder_type !=
DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
- if (!ASIC_IS_AVIVO(rdev)
- && (encoder->encoder_type ==
- DRM_MODE_ENCODER_LVDS))
+ if (encoder->encoder_type ==
+ DRM_MODE_ENCODER_LVDS)
pll_flags |= RADEON_PLL_USE_REF_DIV;
}
radeon_encoder = to_radeon_encoder(encoder);
@@ -393,14 +466,43 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
}
+ /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
+ * accordingly based on the encoder/transmitter to work around
+ * special hw requirements.
+ */
+ if (ASIC_IS_DCE3(rdev)) {
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args;
+
+ if (!encoder)
+ return;
+
+ memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
+ adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
+ adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
+ adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
+
+ index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
+ atom_execute_table(rdev->mode_info.atom_context,
+ index, (uint32_t *)&adjust_pll_args);
+ adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10;
+ } else {
+ /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+ if (ASIC_IS_AVIVO(rdev) &&
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
+ adjusted_clock = mode->clock * 2;
+ else
+ adjusted_clock = mode->clock;
+ }
+
if (radeon_crtc->crtc_id == 0)
pll = &rdev->clock.p1pll;
else
pll = &rdev->clock.p2pll;
- radeon_compute_pll(pll, mode->clock, &sclock, &fb_div, &frac_fb_div,
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div, pll_flags);
+ index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
@@ -409,7 +511,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
switch (crev) {
case 1:
spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
- spc1_ptr->usPixelClock = cpu_to_le16(sclock);
+ spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
spc1_ptr->ucFracFbDiv = frac_fb_div;
@@ -422,7 +524,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
case 2:
spc2_ptr =
(PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
- spc2_ptr->usPixelClock = cpu_to_le16(sclock);
+ spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
spc2_ptr->ucFracFbDiv = frac_fb_div;
@@ -437,7 +539,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
return;
spc3_ptr =
(PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
- spc3_ptr->usPixelClock = cpu_to_le16(sclock);
+ spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
spc3_ptr->ucFracFbDiv = frac_fb_div;
@@ -471,21 +573,34 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_device *rdev = dev->dev_private;
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
- struct drm_radeon_gem_object *obj_priv;
+ struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ int r;
- if (!crtc->fb)
- return -EINVAL;
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
radeon_fb = to_radeon_framebuffer(crtc->fb);
+ /* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- obj_priv = obj->driver_private;
-
- if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
return -EINVAL;
}
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+ if (tiling_flags & RADEON_TILING_MACRO)
+ fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
switch (crtc->fb->bits_per_pixel) {
case 8:
@@ -515,11 +630,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
- radeon_object_get_tiling_flags(obj->driver_private,
- &tiling_flags, NULL);
- if (tiling_flags & RADEON_TILING_MACRO)
- fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
-
if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= AVIVO_D1GRPH_TILED;
@@ -527,6 +637,16 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
WREG32(AVIVO_D1VGA_CONTROL, 0);
else
WREG32(AVIVO_D2VGA_CONTROL, 0);
+
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id) {
+ WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+ } else {
+ WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
+ WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+ }
+ }
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32) fb_location);
WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
@@ -561,8 +681,17 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
- radeon_gem_object_unpin(radeon_fb->obj);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
}
+
+ /* Bytes per pixel may have changed */
+ radeon_bandwidth_update(rdev);
+
return 0;
}
@@ -574,134 +703,24 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder;
- SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing;
- int need_tv_timings = 0;
- bool ret;
/* TODO color tiling */
- memset(&crtc_timing, 0, sizeof(crtc_timing));
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- /* find tv std */
- if (encoder->crtc == crtc) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-
- if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
- struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
- if (tv_dac) {
- if (tv_dac->tv_std == TV_STD_NTSC ||
- tv_dac->tv_std == TV_STD_NTSC_J ||
- tv_dac->tv_std == TV_STD_PAL_M)
- need_tv_timings = 1;
- else
- need_tv_timings = 2;
- break;
- }
- }
- }
- }
-
- crtc_timing.ucCRTC = radeon_crtc->crtc_id;
- if (need_tv_timings) {
- ret = radeon_atom_get_tv_timings(rdev, need_tv_timings - 1,
- &crtc_timing, &adjusted_mode->clock);
- if (ret == false)
- need_tv_timings = 0;
- }
-
- if (!need_tv_timings) {
- crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
- crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
- crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
- crtc_timing.usH_SyncWidth =
- adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
-
- crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
- crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
- crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
- crtc_timing.usV_SyncWidth =
- adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
- crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
- }
+ atombios_set_ss(crtc, 0);
atombios_crtc_set_pll(crtc, adjusted_mode);
- atombios_crtc_set_timing(crtc, &crtc_timing);
+ atombios_set_ss(crtc, 1);
+ atombios_crtc_set_timing(crtc, adjusted_mode);
if (ASIC_IS_AVIVO(rdev))
atombios_crtc_set_base(crtc, x, y, old_fb);
else {
- if (radeon_crtc->crtc_id == 0) {
- SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing;
- memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing));
-
- /* setup FP shadow regs on R4xx */
- crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id;
- crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay;
- crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay;
- crtc_dtd_timing.usH_Blanking_Time =
- adjusted_mode->crtc_hblank_end -
- adjusted_mode->crtc_hdisplay;
- crtc_dtd_timing.usV_Blanking_Time =
- adjusted_mode->crtc_vblank_end -
- adjusted_mode->crtc_vdisplay;
- crtc_dtd_timing.usH_SyncOffset =
- adjusted_mode->crtc_hsync_start -
- adjusted_mode->crtc_hdisplay;
- crtc_dtd_timing.usV_SyncOffset =
- adjusted_mode->crtc_vsync_start -
- adjusted_mode->crtc_vdisplay;
- crtc_dtd_timing.usH_SyncWidth =
- adjusted_mode->crtc_hsync_end -
- adjusted_mode->crtc_hsync_start;
- crtc_dtd_timing.usV_SyncWidth =
- adjusted_mode->crtc_vsync_end -
- adjusted_mode->crtc_vsync_start;
- /* crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder; */
- /* crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder; */
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- crtc_dtd_timing.susModeMiscInfo.usAccess |=
- ATOM_VSYNC_POLARITY;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- crtc_dtd_timing.susModeMiscInfo.usAccess |=
- ATOM_HSYNC_POLARITY;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
- crtc_dtd_timing.susModeMiscInfo.usAccess |=
- ATOM_COMPOSITESYNC;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- crtc_dtd_timing.susModeMiscInfo.usAccess |=
- ATOM_INTERLACE;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- crtc_dtd_timing.susModeMiscInfo.usAccess |=
- ATOM_DOUBLE_CLOCK_MODE;
-
- atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing);
- }
+ if (radeon_crtc->crtc_id == 0)
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_legacy_atom_set_surface(crtc);
}
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
- radeon_bandwidth_update(rdev);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index fb211e585de..0d79577c157 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -561,7 +561,7 @@ struct table {
char *gpu_prefix;
};
-struct offset *offset_new(unsigned o)
+static struct offset *offset_new(unsigned o)
{
struct offset *offset;
@@ -573,12 +573,12 @@ struct offset *offset_new(unsigned o)
return offset;
}
-void table_offset_add(struct table *t, struct offset *offset)
+static void table_offset_add(struct table *t, struct offset *offset)
{
list_add_tail(&offset->list, &t->offsets);
}
-void table_init(struct table *t)
+static void table_init(struct table *t)
{
INIT_LIST_HEAD(&t->offsets);
t->offset_max = 0;
@@ -586,7 +586,7 @@ void table_init(struct table *t)
t->table = NULL;
}
-void table_print(struct table *t)
+static void table_print(struct table *t)
{
unsigned nlloop, i, j, n, c, id;
@@ -611,7 +611,7 @@ void table_print(struct table *t)
printf("};\n");
}
-int table_build(struct table *t)
+static int table_build(struct table *t)
{
struct offset *offset;
unsigned i, m;
@@ -631,7 +631,7 @@ int table_build(struct table *t)
}
static char gpu_name[10];
-int parser_auth(struct table *t, const char *filename)
+static int parser_auth(struct table *t, const char *filename)
{
FILE *file;
regex_t mask_rex;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 161094c07d9..109096802e6 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -94,6 +94,15 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return radeon_gart_table_ram_alloc(rdev);
}
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ /* Enable bus mastering */
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+}
+
int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -105,9 +114,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
WREG32(RADEON_AIC_HI_ADDR, tmp);
- /* Enable bus mastering */
- tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
- WREG32(RADEON_BUS_CNTL, tmp);
/* set PCI GART page-table base address */
WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -186,7 +192,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
int r100_irq_process(struct radeon_device *rdev)
{
- uint32_t status;
+ uint32_t status, msi_rearm;
status = r100_irq_ack(rdev);
if (!status) {
@@ -209,6 +215,21 @@ int r100_irq_process(struct radeon_device *rdev)
}
status = r100_irq_ack(rdev);
}
+ if (rdev->msi_enabled) {
+ switch (rdev->family) {
+ case CHIP_RS400:
+ case CHIP_RS480:
+ msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
+ WREG32(RADEON_AIC_CNTL, msi_rearm);
+ WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
+ break;
+ default:
+ msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
+ WREG32(RADEON_MSI_REARM_EN, msi_rearm);
+ WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
+ break;
+ }
+ }
return IRQ_HANDLED;
}
@@ -240,24 +261,27 @@ int r100_wb_init(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_object_create(rdev, NULL, 4096,
- true,
- RADEON_GEM_DOMAIN_GTT,
- false, &rdev->wb.wb_obj);
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.wb_obj);
if (r) {
- DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
return r;
}
- r = radeon_object_pin(rdev->wb.wb_obj,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
if (r) {
- DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
return r;
}
- r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
- DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
return r;
}
}
@@ -275,11 +299,19 @@ void r100_wb_disable(struct radeon_device *rdev)
void r100_wb_fini(struct radeon_device *rdev)
{
+ int r;
+
r100_wb_disable(rdev);
if (rdev->wb.wb_obj) {
- radeon_object_kunmap(rdev->wb.wb_obj);
- radeon_object_unpin(rdev->wb.wb_obj);
- radeon_object_unref(&rdev->wb.wb_obj);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(rdev->dev, "(%d) can't finish WB\n", r);
+ return;
+ }
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
@@ -563,19 +595,19 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
indirect1_start = 16;
/* cp setup */
WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
- WREG32(RADEON_CP_RB_CNTL,
-#ifdef __BIG_ENDIAN
- RADEON_BUF_SWAP_32BIT |
-#endif
- REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
+ tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
REG_SET(RADEON_MAX_FETCH, max_fetch) |
RADEON_RB_NO_UPDATE);
+#ifdef __BIG_ENDIAN
+ tmp |= RADEON_BUF_SWAP_32BIT;
+#endif
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+
/* Set ring address */
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
/* Force read & write ptr to 0 */
- tmp = RREG32(RADEON_CP_RB_CNTL);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
@@ -1273,17 +1305,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
- struct radeon_object *robj)
+ struct radeon_bo *robj)
{
unsigned idx;
u32 value;
idx = pkt->idx + 1;
value = radeon_get_ib_value(p, idx + 2);
- if ((value + 1) > radeon_object_size(robj)) {
+ if ((value + 1) > radeon_bo_size(robj)) {
DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
"(need %u have %lu) !\n",
value + 1,
- radeon_object_size(robj));
+ radeon_bo_size(robj));
return -EINVAL;
}
return 0;
@@ -1568,6 +1600,14 @@ void r100_gpu_init(struct radeon_device *rdev)
r100_hdp_reset(rdev);
}
+void r100_hdp_flush(struct radeon_device *rdev)
+{
+ u32 tmp;
+ tmp = RREG32(RADEON_HOST_PATH_CNTL);
+ tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
+ WREG32(RADEON_HOST_PATH_CNTL, tmp);
+}
+
void r100_hdp_reset(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -1635,6 +1675,17 @@ int r100_gpu_reset(struct radeon_device *rdev)
return 0;
}
+void r100_set_common_regs(struct radeon_device *rdev)
+{
+ /* set these so they don't interfere with anything */
+ WREG32(RADEON_OV0_SCALE_CNTL, 0);
+ WREG32(RADEON_SUBPIC_CNTL, 0);
+ WREG32(RADEON_VIPH_CONTROL, 0);
+ WREG32(RADEON_I2C_CNTL_1, 0);
+ WREG32(RADEON_DVI_I2C_CNTL_1, 0);
+ WREG32(RADEON_CAP0_TRIG_CNTL, 0);
+ WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+}
/*
* VRAM info
@@ -2364,7 +2415,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the total latency for the display data.
*/
- disp_latency_overhead.full = rfixed_const(80);
+ disp_latency_overhead.full = rfixed_const(8);
disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
@@ -2562,8 +2613,11 @@ void r100_bandwidth_update(struct radeon_device *rdev)
static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
DRM_ERROR("pitch %d\n", t->pitch);
+ DRM_ERROR("use_pitch %d\n", t->use_pitch);
DRM_ERROR("width %d\n", t->width);
+ DRM_ERROR("width_11 %d\n", t->width_11);
DRM_ERROR("height %d\n", t->height);
+ DRM_ERROR("height_11 %d\n", t->height_11);
DRM_ERROR("num levels %d\n", t->num_levels);
DRM_ERROR("depth %d\n", t->txdepth);
DRM_ERROR("bpp %d\n", t->cpp);
@@ -2576,7 +2630,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
struct r100_cs_track *track, unsigned idx)
{
unsigned face, w, h;
- struct radeon_object *cube_robj;
+ struct radeon_bo *cube_robj;
unsigned long size;
for (face = 0; face < 5; face++) {
@@ -2589,9 +2643,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
size += track->textures[idx].cube_info[face].offset;
- if (size > radeon_object_size(cube_robj)) {
+ if (size > radeon_bo_size(cube_robj)) {
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
- size, radeon_object_size(cube_robj));
+ size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
@@ -2602,7 +2656,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned long size;
unsigned u, i, w, h;
int ret;
@@ -2623,15 +2677,17 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
else
w = track->textures[u].pitch / (1 << i);
} else {
- w = track->textures[u].width / (1 << i);
+ w = track->textures[u].width;
if (rdev->family >= CHIP_RV515)
w |= track->textures[u].width_11;
+ w = w / (1 << i);
if (track->textures[u].roundup_w)
w = roundup_pow_of_two(w);
}
- h = track->textures[u].height / (1 << i);
+ h = track->textures[u].height;
if (rdev->family >= CHIP_RV515)
h |= track->textures[u].height_11;
+ h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
size += w * h;
@@ -2656,9 +2712,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
"%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
- if (size > radeon_object_size(robj)) {
+ if (size > radeon_bo_size(robj)) {
DRM_ERROR("Texture of unit %u needs %lu bytes but is "
- "%lu\n", u, size, radeon_object_size(robj));
+ "%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
@@ -2680,10 +2736,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
- if (size > radeon_object_size(track->cb[i].robj)) {
+ if (size > radeon_bo_size(track->cb[i].robj)) {
DRM_ERROR("[drm] Buffer too small for color buffer %d "
"(need %lu have %lu) !\n", i, size,
- radeon_object_size(track->cb[i].robj));
+ radeon_bo_size(track->cb[i].robj));
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
i, track->cb[i].pitch, track->cb[i].cpp,
track->cb[i].offset, track->maxy);
@@ -2697,10 +2753,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
- if (size > radeon_object_size(track->zb.robj)) {
+ if (size > radeon_bo_size(track->zb.robj)) {
DRM_ERROR("[drm] Buffer too small for z buffer "
"(need %lu have %lu) !\n", size,
- radeon_object_size(track->zb.robj));
+ radeon_bo_size(track->zb.robj));
DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
track->zb.pitch, track->zb.cpp,
track->zb.offset, track->maxy);
@@ -2718,11 +2774,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
"bound\n", prim_walk, i);
return -EINVAL;
}
- if (size > radeon_object_size(track->arrays[i].robj)) {
- DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
- "have %lu dwords\n", prim_walk, i,
- size >> 2,
- radeon_object_size(track->arrays[i].robj) >> 2);
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
DRM_ERROR("Max indices %u\n", track->max_indx);
return -EINVAL;
}
@@ -2736,10 +2793,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
"bound\n", prim_walk, i);
return -EINVAL;
}
- if (size > radeon_object_size(track->arrays[i].robj)) {
- DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
- "have %lu dwords\n", prim_walk, i, size >> 2,
- radeon_object_size(track->arrays[i].robj) >> 2);
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
return -EINVAL;
}
}
@@ -3081,6 +3140,9 @@ static int r100_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r100_mc_program(rdev);
/* Resume clock */
r100_clock_startup(rdev);
@@ -3088,13 +3150,13 @@ static int r100_startup(struct radeon_device *rdev)
r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
+ r100_enable_bm(rdev);
if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_enable(rdev);
if (r)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -3154,7 +3216,7 @@ void r100_fini(struct radeon_device *rdev)
r100_pci_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -3222,10 +3284,8 @@ int r100_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Set asic errata */
r100_errata(rdev);
/* Initialize clocks */
@@ -3244,7 +3304,7 @@ int r100_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
if (rdev->flags & RADEON_IS_PCI) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 0daf0d76a89..ca50903dd2b 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -10,26 +10,26 @@
* CS functions
*/
struct r100_cs_track_cb {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned pitch;
unsigned cpp;
unsigned offset;
};
struct r100_cs_track_array {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned esize;
};
struct r100_cs_cube_info {
- struct radeon_object *robj;
- unsigned offset;
+ struct radeon_bo *robj;
+ unsigned offset;
unsigned width;
unsigned height;
};
struct r100_cs_track_texture {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
unsigned pitch;
unsigned width;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index e08c4a8974c..86065dcc198 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -113,7 +113,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
+ tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
@@ -137,14 +137,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
void rv370_pcie_gart_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 tmp;
+ int r;
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -1181,6 +1186,9 @@ static int r300_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r300_mc_program(rdev);
/* Resume clock */
r300_clock_startup(rdev);
@@ -1193,13 +1201,18 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ if (rdev->family == CHIP_R300 ||
+ rdev->family == CHIP_R350 ||
+ rdev->family == CHIP_RV350)
+ r100_enable_bm(rdev);
+
if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_enable(rdev);
if (r)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -1265,7 +1278,7 @@ void r300_fini(struct radeon_device *rdev)
r100_pci_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -1303,10 +1316,8 @@ int r300_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Set asic errata */
r300_errata(rdev);
/* Initialize clocks */
@@ -1325,7 +1336,7 @@ int r300_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
if (rdev->flags & RADEON_IS_PCIE) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 5c7fe52de30..162c3902fe6 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -169,6 +169,9 @@ static int r420_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r300_mc_program(rdev);
/* Resume clock */
r420_clock_resume(rdev);
@@ -186,7 +189,6 @@ static int r420_startup(struct radeon_device *rdev)
}
r420_pipes_init(rdev);
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -258,7 +260,7 @@ void r420_fini(struct radeon_device *rdev)
radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
if (rdev->is_atom_bios) {
radeon_atombios_fini(rdev);
} else {
@@ -301,16 +303,13 @@ int r420_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- if (rdev->is_atom_bios) {
- atom_asic_init(rdev->mode_info.atom_context);
- } else {
- radeon_combios_asic_init(rdev->ddev);
- }
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
r300_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -329,10 +328,13 @@ int r420_init(struct radeon_device *rdev)
return r;
}
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r) {
return r;
}
+ if (rdev->family == CHIP_R420)
+ r100_enable_bm(rdev);
+
if (rdev->flags & RADEON_IS_PCIE) {
r = rv370_pcie_gart_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 868add6e166..7baa7395556 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -384,9 +384,16 @@
# define AVIVO_D1GRPH_TILED (1 << 20)
# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
+/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
+ * block and vice versa. This applies to GRPH, CUR, etc.
+ */
#define AVIVO_D1GRPH_LUT_SEL 0x6108
#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
#define AVIVO_D1GRPH_PITCH 0x6120
#define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124
#define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128
@@ -404,6 +411,8 @@
# define AVIVO_D1CURSOR_MODE_MASK (3 << 8)
# define AVIVO_D1CURSOR_MODE_24BPP 2
#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408
+#define R700_D1CUR_SURFACE_ADDRESS_HIGH 0x6c0c
+#define R700_D2CUR_SURFACE_ADDRESS_HIGH 0x640c
#define AVIVO_D1CUR_SIZE 0x6410
#define AVIVO_D1CUR_POSITION 0x6414
#define AVIVO_D1CUR_HOT_SPOT 0x6418
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index a555b7b19b4..788eef5c2a0 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -185,7 +185,6 @@ static int r520_startup(struct radeon_device *rdev)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -254,12 +253,17 @@ int r520_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
if (!radeon_card_posted(rdev) && rdev->bios) {
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
r520_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -275,7 +279,7 @@ int r520_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rv370_pcie_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 609719490ec..94e7fd2f59e 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -38,8 +38,10 @@
#define PFP_UCODE_SIZE 576
#define PM4_UCODE_SIZE 1792
+#define RLC_UCODE_SIZE 768
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
+#define R700_RLC_UCODE_SIZE 1024
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin");
MODULE_FIRMWARE("radeon/RV730_me.bin");
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
+MODULE_FIRMWARE("radeon/R600_rlc.bin");
+MODULE_FIRMWARE("radeon/R700_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -180,7 +184,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
void r600_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
+ int i, r;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -208,8 +212,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -339,11 +347,10 @@ int r600_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u32 tmp;
- int chansize;
+ int chansize, numchan;
int r;
/* Get VRAM informations */
- rdev->mc.vram_width = 128;
rdev->mc.vram_is_ddr = true;
tmp = RREG32(RAMCFG);
if (tmp & CHANSIZE_OVERRIDE) {
@@ -353,17 +360,23 @@ int r600_mc_init(struct radeon_device *rdev)
} else {
chansize = 32;
}
- if (rdev->family == CHIP_R600) {
- rdev->mc.vram_width = 8 * chansize;
- } else if (rdev->family == CHIP_RV670) {
- rdev->mc.vram_width = 4 * chansize;
- } else if ((rdev->family == CHIP_RV610) ||
- (rdev->family == CHIP_RV620)) {
- rdev->mc.vram_width = chansize;
- } else if ((rdev->family == CHIP_RV630) ||
- (rdev->family == CHIP_RV635)) {
- rdev->mc.vram_width = 2 * chansize;
+ tmp = RREG32(CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
}
+ rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
@@ -404,35 +417,29 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
} else {
- if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
- rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
- 0xFFFF) << 24;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
- /* Enough place after vram */
- rdev->mc.gtt_location = tmp;
- } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
- /* Enough place before vram */
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
+ 0xFFFF) << 24;
+ tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
+ if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
+ /* Enough place after vram */
+ rdev->mc.gtt_location = tmp;
+ } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
+ /* Enough place before vram */
+ rdev->mc.gtt_location = 0;
+ } else {
+ /* Not enough place after or before shrink
+ * gart size
+ */
+ if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
rdev->mc.gtt_location = 0;
+ rdev->mc.gtt_size = rdev->mc.vram_location;
} else {
- /* Not enough place after or before shrink
- * gart size
- */
- if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
- rdev->mc.gtt_location = 0;
- rdev->mc.gtt_size = rdev->mc.vram_location;
- } else {
- rdev->mc.gtt_location = tmp;
- rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
- }
+ rdev->mc.gtt_location = tmp;
+ rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
}
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- } else {
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
+ rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
rdev->mc.vram_start = rdev->mc.vram_location;
rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
@@ -859,7 +866,8 @@ void r600_gpu_init(struct radeon_device *rdev)
((rdev->family) == CHIP_RV630) ||
((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
} else {
WREG32(DB_DEBUG, 0);
@@ -876,7 +884,8 @@ void r600_gpu_init(struct radeon_device *rdev)
tmp = RREG32(SQ_MS_FIFO_SIZES);
if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
tmp = (CACHE_FIFO_SIZE(0xa) |
FETCH_FIFO_HIWATER(0xa) |
DONE_FIFO_HIWATER(0xe0) |
@@ -919,7 +928,8 @@ void r600_gpu_init(struct radeon_device *rdev)
NUM_ES_STACK_ENTRIES(0));
} else if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
/* no vertex cache */
sq_config &= ~VC_ENABLE;
@@ -976,7 +986,8 @@ void r600_gpu_init(struct radeon_device *rdev)
if (((rdev->family) == CHIP_RV610) ||
((rdev->family) == CHIP_RV620) ||
- ((rdev->family) == CHIP_RS780)) {
+ ((rdev->family) == CHIP_RS780) ||
+ ((rdev->family) == CHIP_RS880)) {
WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
} else {
WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
@@ -1002,8 +1013,9 @@ void r600_gpu_init(struct radeon_device *rdev)
tmp = rdev->config.r600.max_pipes * 16;
switch (rdev->family) {
case CHIP_RV610:
- case CHIP_RS780:
case CHIP_RV620:
+ case CHIP_RS780:
+ case CHIP_RS880:
tmp += 32;
break;
case CHIP_RV670:
@@ -1044,8 +1056,9 @@ void r600_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_RV610:
- case CHIP_RS780:
case CHIP_RV620:
+ case CHIP_RS780:
+ case CHIP_RS880:
tmp = TC_L2_SIZE(8);
break;
case CHIP_RV630:
@@ -1096,6 +1109,10 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_PORT_DATA);
}
+void r600_hdp_flush(struct radeon_device *rdev)
+{
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
/*
* CP & Ring
@@ -1105,11 +1122,12 @@ void r600_cp_stop(struct radeon_device *rdev)
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
}
-int r600_cp_init_microcode(struct radeon_device *rdev)
+int r600_init_microcode(struct radeon_device *rdev)
{
struct platform_device *pdev;
const char *chip_name;
- size_t pfp_req_size, me_req_size;
+ const char *rlc_chip_name;
+ size_t pfp_req_size, me_req_size, rlc_req_size;
char fw_name[30];
int err;
@@ -1123,30 +1141,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
}
switch (rdev->family) {
- case CHIP_R600: chip_name = "R600"; break;
- case CHIP_RV610: chip_name = "RV610"; break;
- case CHIP_RV630: chip_name = "RV630"; break;
- case CHIP_RV620: chip_name = "RV620"; break;
- case CHIP_RV635: chip_name = "RV635"; break;
- case CHIP_RV670: chip_name = "RV670"; break;
+ case CHIP_R600:
+ chip_name = "R600";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV610:
+ chip_name = "RV610";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV630:
+ chip_name = "RV630";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV620:
+ chip_name = "RV620";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV635:
+ chip_name = "RV635";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV670:
+ chip_name = "RV670";
+ rlc_chip_name = "R600";
+ break;
case CHIP_RS780:
- case CHIP_RS880: chip_name = "RS780"; break;
- case CHIP_RV770: chip_name = "RV770"; break;
+ case CHIP_RS880:
+ chip_name = "RS780";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV770:
+ chip_name = "RV770";
+ rlc_chip_name = "R700";
+ break;
case CHIP_RV730:
- case CHIP_RV740: chip_name = "RV730"; break;
- case CHIP_RV710: chip_name = "RV710"; break;
+ case CHIP_RV740:
+ chip_name = "RV730";
+ rlc_chip_name = "R700";
+ break;
+ case CHIP_RV710:
+ chip_name = "RV710";
+ rlc_chip_name = "R700";
+ break;
default: BUG();
}
if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
+ rlc_req_size = R700_RLC_UCODE_SIZE * 4;
} else {
pfp_req_size = PFP_UCODE_SIZE * 4;
me_req_size = PM4_UCODE_SIZE * 12;
+ rlc_req_size = RLC_UCODE_SIZE * 4;
}
- DRM_INFO("Loading %s CP Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
@@ -1170,6 +1220,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
rdev->me_fw->size, fw_name);
err = -EINVAL;
}
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
out:
platform_device_unregister(pdev);
@@ -1182,6 +1244,8 @@ out:
rdev->pfp_fw = NULL;
release_firmware(rdev->me_fw);
rdev->me_fw = NULL;
+ release_firmware(rdev->rlc_fw);
+ rdev->rlc_fw = NULL;
}
return err;
}
@@ -1267,19 +1331,17 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
- WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE |
- (drm_order(4096/8) << 8) | rb_bufsz);
-#else
- WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz);
+ tmp |= BUF_SWAP_32BIT;
#endif
+ WREG32(CP_RB_CNTL, tmp);
WREG32(CP_SEM_WAIT_TIMER, 0x4);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
/* Initialize the ring buffer's read and write pointers */
- tmp = RREG32(CP_RB_CNTL);
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0);
@@ -1378,10 +1440,16 @@ int r600_ring_test(struct radeon_device *rdev)
void r600_wb_disable(struct radeon_device *rdev)
{
+ int r;
+
WREG32(SCRATCH_UMSK, 0);
if (rdev->wb.wb_obj) {
- radeon_object_kunmap(rdev->wb.wb_obj);
- radeon_object_unpin(rdev->wb.wb_obj);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return;
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
}
}
@@ -1389,7 +1457,7 @@ void r600_wb_fini(struct radeon_device *rdev)
{
r600_wb_disable(rdev);
if (rdev->wb.wb_obj) {
- radeon_object_unref(&rdev->wb.wb_obj);
+ radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
@@ -1400,22 +1468,29 @@ int r600_wb_enable(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_object_create(rdev, NULL, 4096, true,
- RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
- dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
+ return r;
+ }
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ r600_wb_fini(rdev);
return r;
}
- r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->wb.gpu_addr);
if (r) {
- dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
r600_wb_fini(rdev);
return r;
}
- r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
- dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
r600_wb_fini(rdev);
return r;
}
@@ -1430,10 +1505,14 @@ int r600_wb_enable(struct radeon_device *rdev)
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
+ /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+ radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(rdev, RB_INT_STAT);
}
int r600_copy_dma(struct radeon_device *rdev,
@@ -1450,24 +1529,12 @@ int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
{
- r600_blit_prepare_copy(rdev, num_pages * 4096);
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096);
+ r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence);
return 0;
}
-int r600_irq_process(struct radeon_device *rdev)
-{
- /* FIXME: implement */
- return 0;
-}
-
-int r600_irq_set(struct radeon_device *rdev)
-{
- /* FIXME: implement */
- return 0;
-}
-
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -1513,12 +1580,25 @@ int r600_startup(struct radeon_device *rdev)
}
r600_gpu_init(rdev);
- r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
if (r) {
- DRM_ERROR("failed to pin blit object %d\n", r);
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
return r;
}
+ r600_irq_set(rdev);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
@@ -1580,13 +1660,19 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
+ int r;
+
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->cp.ready = false;
r600_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
/* unpin shaders bo */
- radeon_object_unpin(rdev->r600_blit.shader_obj);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
return 0;
}
@@ -1624,7 +1710,11 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev) && rdev->bios) {
+ if (!r600_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
@@ -1632,10 +1722,13 @@ int r600_init(struct radeon_device *rdev)
r600_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
r = radeon_clocks_init(rdev);
if (r)
return r;
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -1644,14 +1737,22 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
+
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
- if (!rdev->me_fw || !rdev->pfp_fw) {
- r = r600_cp_init_microcode(rdev);
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
@@ -1698,6 +1799,8 @@ void r600_fini(struct radeon_device *rdev)
r600_suspend(rdev);
r600_blit_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
radeon_ring_fini(rdev);
r600_wb_fini(rdev);
r600_pcie_gart_fini(rdev);
@@ -1706,7 +1809,7 @@ void r600_fini(struct radeon_device *rdev)
radeon_clocks_fini(rdev);
if (rdev->flags & RADEON_IS_AGP)
radeon_agp_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -1792,8 +1895,461 @@ int r600_ib_test(struct radeon_device *rdev)
return r;
}
+/*
+ * Interrupts
+ *
+ * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
+ * the same as the CP ring buffer, but in reverse. Rather than the CPU
+ * writing to the ring and the GPU consuming, the GPU writes to the ring
+ * and host consumes. As the host irq handler processes interrupts, it
+ * increments the rptr. When the rptr catches up with the wptr, all the
+ * current interrupts have been processed.
+ */
+
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+ u32 rb_bufsz;
+ /* Align ring size */
+ rb_bufsz = drm_order(ring_size / 4);
+ ring_size = (1 << rb_bufsz) * 4;
+ rdev->ih.ring_size = ring_size;
+ rdev->ih.align_mask = 4 - 1;
+}
+static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
+{
+ int r;
+
+ rdev->ih.ring_size = ring_size;
+ /* Allocate ring buffer */
+ if (rdev->ih.ring_obj == NULL) {
+ r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
+ true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->ih.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->ih.ring_obj,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->ih.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->ih.ring_obj,
+ (void **)&rdev->ih.ring);
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
+ return r;
+ }
+ }
+ rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
+ rdev->ih.rptr = 0;
+
+ return 0;
+}
+
+static void r600_ih_ring_fini(struct radeon_device *rdev)
+{
+ int r;
+ if (rdev->ih.ring_obj) {
+ r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->ih.ring_obj);
+ radeon_bo_unpin(rdev->ih.ring_obj);
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ }
+ radeon_bo_unref(&rdev->ih.ring_obj);
+ rdev->ih.ring = NULL;
+ rdev->ih.ring_obj = NULL;
+ }
+}
+
+static void r600_rlc_stop(struct radeon_device *rdev)
+{
+
+ if (rdev->family >= CHIP_RV770) {
+ /* r7xx asics need to soft reset RLC before halting */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(15000);
+ WREG32(SRBM_SOFT_RESET, 0);
+ RREG32(SRBM_SOFT_RESET);
+ }
+
+ WREG32(RLC_CNTL, 0);
+}
+
+static void r600_rlc_start(struct radeon_device *rdev)
+{
+ WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int r600_rlc_init(struct radeon_device *rdev)
+{
+ u32 i;
+ const __be32 *fw_data;
+
+ if (!rdev->rlc_fw)
+ return -EINVAL;
+
+ r600_rlc_stop(rdev);
+
+ WREG32(RLC_HB_BASE, 0);
+ WREG32(RLC_HB_CNTL, 0);
+ WREG32(RLC_HB_RPTR, 0);
+ WREG32(RLC_HB_WPTR, 0);
+ WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+ WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ WREG32(RLC_MC_CNTL, 0);
+ WREG32(RLC_UCODE_CNTL, 0);
+
+ fw_data = (const __be32 *)rdev->rlc_fw->data;
+ if (rdev->family >= CHIP_RV770) {
+ for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else {
+ for (i = 0; i < RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ }
+ WREG32(RLC_UCODE_ADDR, 0);
+
+ r600_rlc_start(rdev);
+
+ return 0;
+}
+
+static void r600_enable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_cntl = RREG32(IH_CNTL);
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+ ih_cntl |= ENABLE_INTR;
+ ih_rb_cntl |= IH_RB_ENABLE;
+ WREG32(IH_CNTL, ih_cntl);
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ rdev->ih.enabled = true;
+}
+
+static void r600_disable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+ u32 ih_cntl = RREG32(IH_CNTL);
+
+ ih_rb_cntl &= ~IH_RB_ENABLE;
+ ih_cntl &= ~ENABLE_INTR;
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ WREG32(IH_CNTL, ih_cntl);
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+ rdev->ih.enabled = false;
+ rdev->ih.wptr = 0;
+ rdev->ih.rptr = 0;
+}
+
+int r600_irq_init(struct radeon_device *rdev)
+{
+ int ret = 0;
+ int rb_bufsz;
+ u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+ /* allocate ring */
+ ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
+ if (ret)
+ return ret;
+
+ /* disable irqs */
+ r600_disable_interrupts(rdev);
+
+ /* init rlc */
+ ret = r600_rlc_init(rdev);
+ if (ret) {
+ r600_ih_ring_fini(rdev);
+ return ret;
+ }
+
+ /* setup interrupt control */
+ /* set dummy read address to ring address */
+ WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+ * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+ */
+ interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+ /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+ interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+ WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+ WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+ rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+
+ ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+ IH_WPTR_OVERFLOW_CLEAR |
+ (rb_bufsz << 1));
+ /* WPTR writeback, not yet */
+ /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
+ WREG32(IH_RB_WPTR_ADDR_LO, 0);
+ WREG32(IH_RB_WPTR_ADDR_HI, 0);
+
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+
+ /* Default settings for IH_CNTL (disabled at first) */
+ ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
+ /* RPTR_REARM only works if msi's are enabled */
+ if (rdev->msi_enabled)
+ ih_cntl |= RPTR_REARM;
+
+#ifdef __BIG_ENDIAN
+ ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
+#endif
+ WREG32(IH_CNTL, ih_cntl);
+
+ /* force the active interrupt state to all disabled */
+ WREG32(CP_INT_CNTL, 0);
+ WREG32(GRBM_INT_CNTL, 0);
+ WREG32(DxMODE_INT_MASK, 0);
+
+ /* enable irqs */
+ r600_enable_interrupts(rdev);
+
+ return ret;
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+ r600_disable_interrupts(rdev);
+ r600_rlc_stop(rdev);
+ r600_ih_ring_fini(rdev);
+}
+
+int r600_irq_set(struct radeon_device *rdev)
+{
+ uint32_t cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ uint32_t mode_int = 0;
+
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled)
+ return 0;
+
+ if (rdev->irq.sw_int) {
+ DRM_DEBUG("r600_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0]) {
+ DRM_DEBUG("r600_irq_set: vblank 0\n");
+ mode_int |= D1MODE_VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1]) {
+ DRM_DEBUG("r600_irq_set: vblank 1\n");
+ mode_int |= D2MODE_VBLANK_INT_MASK;
+ }
+
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(DxMODE_INT_MASK, mode_int);
+
+ return 0;
+}
+
+static inline void r600_irq_ack(struct radeon_device *rdev, u32 disp_int)
+{
+
+ if (disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+ if (disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+ if (disp_int & LB_D2_VBLANK_INTERRUPT)
+ WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+ if (disp_int & LB_D2_VLINE_INTERRUPT)
+ WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+
+}
+
+void r600_irq_disable(struct radeon_device *rdev)
+{
+ u32 disp_int;
+
+ r600_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ if (ASIC_IS_DCE3(rdev))
+ disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+ else
+ disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ r600_irq_ack(rdev, disp_int);
+}
+
+static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+
+ /* XXX use writeback */
+ wptr = RREG32(IH_RB_WPTR);
+
+ if (wptr & RB_OVERFLOW) {
+ WARN_ON(1);
+ /* XXX deal with overflow */
+ DRM_ERROR("IH RB overflow\n");
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ wptr = wptr & WPTR_OFFSET_MASK;
+
+ return wptr;
+}
+
+/* r600 IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0] - interrupt source id
+ * [31:8] - reserved
+ * [59:32] - interrupt source data
+ * [127:60] - reserved
+ *
+ * The basic interrupt vector entries
+ * are decoded as follows:
+ * src_id src_data description
+ * 1 0 D1 Vblank
+ * 1 1 D1 Vline
+ * 5 0 D2 Vblank
+ * 5 1 D2 Vline
+ * 19 0 FP Hot plug detection A
+ * 19 1 FP Hot plug detection B
+ * 19 2 DAC A auto-detection
+ * 19 3 DAC B auto-detection
+ * 176 - CP_INT RB
+ * 177 - CP_INT IB1
+ * 178 - CP_INT IB2
+ * 181 - EOP Interrupt
+ * 233 - GUI Idle
+ *
+ * Note, these are based on r600 and may need to be
+ * adjusted or added to on newer asics
+ */
+
+int r600_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr = r600_get_ih_wptr(rdev);
+ u32 rptr = rdev->ih.rptr;
+ u32 src_id, src_data;
+ u32 last_entry = rdev->ih.ring_size - 16;
+ u32 ring_index, disp_int;
+ unsigned long flags;
+
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+ spin_lock_irqsave(&rdev->ih.lock, flags);
+
+ if (rptr == wptr) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+ if (rdev->shutdown) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+
+restart_ih:
+ /* display interrupts */
+ if (ASIC_IS_DCE3(rdev))
+ disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+ else
+ disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ r600_irq_ack(rdev, disp_int);
+
+ rdev->ih.wptr = wptr;
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = rdev->ih.ring[ring_index] & 0xff;
+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 0);
+ disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D1_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (disp_int & LB_D2_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 1);
+ disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D2_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ if (rptr == last_entry)
+ rptr = 0;
+ else
+ rptr += 16;
+ }
+ /* make sure wptr hasn't changed while processing */
+ wptr = r600_get_ih_wptr(rdev);
+ if (wptr != rdev->ih.wptr)
+ goto restart_ih;
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_HANDLED;
+}
/*
* Debugfs info
@@ -1805,21 +2361,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t rdp, wdp;
unsigned count, i, j;
radeon_ring_free_size(rdev);
- rdp = RREG32(CP_RB_RPTR);
- wdp = RREG32(CP_RB_WPTR);
- count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+ count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
- seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
- seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+ seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
+ seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
+ seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
+ seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
+ i = rdev->cp.rptr;
for (j = 0; j <= count; j++) {
- i = (rdp + j) & rdev->cp.ptr_mask;
seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+ i = (i + 1) & rdev->cp.ptr_mask;
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index dec50108160..5ea43234758 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -582,6 +582,8 @@ r600_blit_copy(struct drm_device *dev,
u64 vb_addr;
u32 *vb;
+ vb = r600_nomm_get_vb_ptr(dev);
+
if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
max_bytes = 8192;
@@ -617,8 +619,8 @@ r600_blit_copy(struct drm_device *dev,
if (!dev_priv->blit_vb)
return;
set_shaders(dev);
+ vb = r600_nomm_get_vb_ptr(dev);
}
- vb = r600_nomm_get_vb_ptr(dev);
vb[0] = i2f(dst_x);
vb[1] = 0;
@@ -706,8 +708,8 @@ r600_blit_copy(struct drm_device *dev,
return;
set_shaders(dev);
+ vb = r600_nomm_get_vb_ptr(dev);
}
- vb = r600_nomm_get_vb_ptr(dev);
vb[0] = i2f(dst_x / 4);
vb[1] = 0;
@@ -772,6 +774,7 @@ r600_blit_swap(struct drm_device *dev,
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int cb_format, tex_format;
+ int sx2, sy2, dx2, dy2;
u64 vb_addr;
u32 *vb;
@@ -786,16 +789,10 @@ r600_blit_swap(struct drm_device *dev,
}
vb = r600_nomm_get_vb_ptr(dev);
- if (cpp == 4) {
- cb_format = COLOR_8_8_8_8;
- tex_format = FMT_8_8_8_8;
- } else if (cpp == 2) {
- cb_format = COLOR_5_6_5;
- tex_format = FMT_5_6_5;
- } else {
- cb_format = COLOR_8;
- tex_format = FMT_8;
- }
+ sx2 = sx + w;
+ sy2 = sy + h;
+ dx2 = dx + w;
+ dy2 = dy + h;
vb[0] = i2f(dx);
vb[1] = i2f(dy);
@@ -803,31 +800,46 @@ r600_blit_swap(struct drm_device *dev,
vb[3] = i2f(sy);
vb[4] = i2f(dx);
- vb[5] = i2f(dy + h);
+ vb[5] = i2f(dy2);
vb[6] = i2f(sx);
- vb[7] = i2f(sy + h);
+ vb[7] = i2f(sy2);
+
+ vb[8] = i2f(dx2);
+ vb[9] = i2f(dy2);
+ vb[10] = i2f(sx2);
+ vb[11] = i2f(sy2);
- vb[8] = i2f(dx + w);
- vb[9] = i2f(dy + h);
- vb[10] = i2f(sx + w);
- vb[11] = i2f(sy + h);
+ switch(cpp) {
+ case 4:
+ cb_format = COLOR_8_8_8_8;
+ tex_format = FMT_8_8_8_8;
+ break;
+ case 2:
+ cb_format = COLOR_5_6_5;
+ tex_format = FMT_5_6_5;
+ break;
+ default:
+ cb_format = COLOR_8;
+ tex_format = FMT_8;
+ break;
+ }
/* src */
set_tex_resource(dev_priv, tex_format,
src_pitch / cpp,
- sy + h, src_pitch / cpp,
+ sy2, src_pitch / cpp,
src_gpu_addr);
cp_set_surface_sync(dev_priv,
- R600_TC_ACTION_ENA, (src_pitch * (sy + h)), src_gpu_addr);
+ R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr);
/* dst */
set_render_target(dev_priv, cb_format,
- dst_pitch / cpp, dy + h,
+ dst_pitch / cpp, dy2,
dst_gpu_addr);
/* scissors */
- set_scissors(dev_priv, dx, dy, dx + w, dy + h);
+ set_scissors(dev_priv, dx, dy, dx2, dy2);
/* Vertex buffer setup */
vb_addr = dev_priv->gart_buffers_offset +
@@ -840,7 +852,7 @@ r600_blit_swap(struct drm_device *dev,
cp_set_surface_sync(dev_priv,
R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
- dst_pitch * (dy + h), dst_gpu_addr);
+ dst_pitch * dy2, dst_gpu_addr);
dev_priv->blit_vb->used += 12 * 4;
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 93108bb31d1..9aecafb51b6 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -368,7 +368,7 @@ set_default_state(struct radeon_device *rdev)
if ((rdev->family == CHIP_RV610) ||
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RS780) ||
- (rdev->family == CHIP_RS780) ||
+ (rdev->family == CHIP_RS880) ||
(rdev->family == CHIP_RV710))
sq_config = 0;
else
@@ -473,9 +473,8 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_object_create(rdev, NULL, obj_size,
- true, RADEON_GEM_DOMAIN_VRAM,
- false, &rdev->r600_blit.shader_obj);
+ r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
return r;
@@ -485,12 +484,14 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size,
rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
- r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
if (r) {
DRM_ERROR("failed to map blit object %d\n", r);
return r;
}
-
if (rdev->family >= CHIP_RV770)
memcpy_toio(ptr + rdev->r600_blit.state_offset,
r7xx_default_state, rdev->r600_blit.state_len * 4);
@@ -500,19 +501,26 @@ int r600_blit_init(struct radeon_device *rdev)
if (num_packet2s)
memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
packet2s, num_packet2s * 4);
-
-
memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
-
- radeon_object_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
return 0;
}
void r600_blit_fini(struct radeon_device *rdev)
{
- radeon_object_unpin(rdev->r600_blit.shader_obj);
- radeon_object_unref(&rdev->r600_blit.shader_obj);
+ int r;
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
+ goto out_unref;
+ }
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+out_unref:
+ radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
int r600_vb_ib_get(struct radeon_device *rdev)
@@ -569,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
- ring_size += 3; /* fence emit for VB IB */
+ ring_size += 5; /* fence emit for VB IB */
ring_size += 5; /* done copy */
- ring_size += 3; /* fence emit for done copy */
+ ring_size += 5; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
WARN_ON(r);
@@ -610,6 +618,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
size_bytes, rdev->r600_blit.vb_used);
+ vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
max_bytes = 8192;
@@ -652,7 +661,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
vb = r600_nomm_get_vb_ptr(dev);
#endif
}
- vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
vb[0] = i2f(dst_x);
vb[1] = 0;
@@ -747,7 +755,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
vb = r600_nomm_get_vb_ptr(dev);
}
#endif
- vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
vb[0] = i2f(dst_x / 4);
vb[1] = 0;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 17e42195c63..0d820764f34 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -466,6 +466,23 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
switch (reg) {
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_FBUF_RING_BASE:
+ case SQ_REDUC_RING_BASE:
+ case SX_MEMORY_EXPORT_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
case CP_COHER_BASE:
/* use PACKET3_SURFACE_SYNC */
return -EINVAL;
@@ -487,6 +504,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
reg = start_reg + (4 * i);
switch (reg) {
case DB_DEPTH_BASE:
+ case DB_HTILE_DATA_BASE:
case CB_COLOR0_BASE:
case CB_COLOR1_BASE:
case CB_COLOR2_BASE:
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 9b64d47f1f8..61ccde5637d 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -119,6 +119,7 @@
#define DB_DEBUG 0x9830
#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
#define DB_DEPTH_BASE 0x2800C
+#define DB_HTILE_DATA_BASE 0x28014
#define DB_WATERMARKS 0x9838
#define DEPTH_FREE(x) ((x) << 0)
#define DEPTH_FLUSH(x) ((x) << 5)
@@ -171,6 +172,14 @@
#define SQ_STACK_RESOURCE_MGMT_2 0x8c14
# define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
# define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_ESGS_RING_BASE 0x8c40
+#define SQ_GSVS_RING_BASE 0x8c48
+#define SQ_ESTMP_RING_BASE 0x8c50
+#define SQ_GSTMP_RING_BASE 0x8c58
+#define SQ_VSTMP_RING_BASE 0x8c60
+#define SQ_PSTMP_RING_BASE 0x8c68
+#define SQ_FBUF_RING_BASE 0x8c70
+#define SQ_REDUC_RING_BASE 0x8c78
#define GRBM_CNTL 0x8000
# define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -271,6 +280,10 @@
#define PCIE_PORT_INDEX 0x0038
#define PCIE_PORT_DATA 0x003C
+#define CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+
#define RAMCFG 0x2408
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000001
@@ -352,6 +365,7 @@
#define SX_MISC 0x28350
+#define SX_MEMORY_EXPORT_BASE 0x9010
#define SX_DEBUG_1 0x9054
#define SMX_EVENT_RELEASE (1 << 0)
#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
@@ -442,7 +456,163 @@
#define WAIT_2D_IDLECLEAN_bit (1 << 16)
#define WAIT_3D_IDLECLEAN_bit (1 << 17)
-
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+
+#define RLC_CNTL 0x3f00
+# define RLC_ENABLE (1 << 0)
+#define RLC_HB_BASE 0x3f10
+#define RLC_HB_CNTL 0x3f0c
+#define RLC_HB_RPTR 0x3f20
+#define RLC_HB_WPTR 0x3f1c
+#define RLC_HB_WPTR_LSB_ADDR 0x3f14
+#define RLC_HB_WPTR_MSB_ADDR 0x3f18
+#define RLC_MC_CNTL 0x3f44
+#define RLC_UCODE_CNTL 0x3f48
+#define RLC_UCODE_ADDR 0x3f2c
+#define RLC_UCODE_DATA 0x3f30
+
+#define SRBM_SOFT_RESET 0xe60
+# define SOFT_RESET_RLC (1 << 13)
+
+#define CP_INT_CNTL 0xc124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define SCRATCH_INT_ENABLE (1 << 25)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define IB2_INT_ENABLE (1 << 29)
+# define IB1_INT_ENABLE (1 << 30)
+# define RB_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS 0xc128
+# define SCRATCH_INT_STAT (1 << 25)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define IB2_INT_STAT (1 << 29)
+# define IB1_INT_STAT (1 << 30)
+# define RB_INT_STAT (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+#define INTERRUPT_CNTL 0x5468
+# define IH_DUMMY_RD_OVERRIDE (1 << 0)
+# define IH_DUMMY_RD_EN (1 << 1)
+# define IH_REQ_NONSNOOP_EN (1 << 3)
+# define GEN_IH_INT_EN (1 << 8)
+#define INTERRUPT_CNTL2 0x546c
+
+#define D1MODE_VBLANK_STATUS 0x6534
+#define D2MODE_VBLANK_STATUS 0x6d34
+# define DxMODE_VBLANK_OCCURRED (1 << 0)
+# define DxMODE_VBLANK_ACK (1 << 4)
+# define DxMODE_VBLANK_STAT (1 << 12)
+# define DxMODE_VBLANK_INTERRUPT (1 << 16)
+# define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17)
+#define D1MODE_VLINE_STATUS 0x653c
+#define D2MODE_VLINE_STATUS 0x6d3c
+# define DxMODE_VLINE_OCCURRED (1 << 0)
+# define DxMODE_VLINE_ACK (1 << 4)
+# define DxMODE_VLINE_STAT (1 << 12)
+# define DxMODE_VLINE_INTERRUPT (1 << 16)
+# define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17)
+#define DxMODE_INT_MASK 0x6540
+# define D1MODE_VBLANK_INT_MASK (1 << 0)
+# define D1MODE_VLINE_INT_MASK (1 << 4)
+# define D2MODE_VBLANK_INT_MASK (1 << 8)
+# define D2MODE_VLINE_INT_MASK (1 << 12)
+#define DCE3_DISP_INTERRUPT_STATUS 0x7ddc
+# define DC_HPD1_INTERRUPT (1 << 18)
+# define DC_HPD2_INTERRUPT (1 << 19)
+#define DISP_INTERRUPT_STATUS 0x7edc
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VLINE_INTERRUPT (1 << 3)
+# define LB_D1_VBLANK_INTERRUPT (1 << 4)
+# define LB_D2_VBLANK_INTERRUPT (1 << 5)
+# define DACA_AUTODETECT_INTERRUPT (1 << 16)
+# define DACB_AUTODETECT_INTERRUPT (1 << 17)
+# define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18)
+# define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 20)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 21)
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8
+# define DC_HPD4_INTERRUPT (1 << 14)
+# define DC_HPD4_RX_INTERRUPT (1 << 15)
+# define DC_HPD3_INTERRUPT (1 << 28)
+# define DC_HPD1_RX_INTERRUPT (1 << 29)
+# define DC_HPD2_RX_INTERRUPT (1 << 30)
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec
+# define DC_HPD3_RX_INTERRUPT (1 << 0)
+# define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1)
+# define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2)
+# define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3)
+# define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4)
+# define AUX1_SW_DONE_INTERRUPT (1 << 5)
+# define AUX1_LS_DONE_INTERRUPT (1 << 6)
+# define AUX2_SW_DONE_INTERRUPT (1 << 7)
+# define AUX2_LS_DONE_INTERRUPT (1 << 8)
+# define AUX3_SW_DONE_INTERRUPT (1 << 9)
+# define AUX3_LS_DONE_INTERRUPT (1 << 10)
+# define AUX4_SW_DONE_INTERRUPT (1 << 11)
+# define AUX4_LS_DONE_INTERRUPT (1 << 12)
+# define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13)
+# define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14)
+/* DCE 3.2 */
+# define AUX5_SW_DONE_INTERRUPT (1 << 15)
+# define AUX5_LS_DONE_INTERRUPT (1 << 16)
+# define AUX6_SW_DONE_INTERRUPT (1 << 17)
+# define AUX6_LS_DONE_INTERRUPT (1 << 18)
+# define DC_HPD5_INTERRUPT (1 << 19)
+# define DC_HPD5_RX_INTERRUPT (1 << 20)
+# define DC_HPD6_INTERRUPT (1 << 21)
+# define DC_HPD6_RX_INTERRUPT (1 << 22)
+
+#define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038
+#define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138
+#define DACA_AUTODETECT_INT_CONTROL 0x7838
+#define DACB_AUTODETECT_INT_CONTROL 0x7a38
+# define DACx_AUTODETECT_ACK (1 << 0)
+# define DACx_AUTODETECT_INT_ENABLE (1 << 16)
+
+#define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08
+#define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18
+#define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c
+# define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0)
+# define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8)
+# define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16)
+/* DCE 3.2 */
+#define DC_HPD1_INT_CONTROL 0x7d04
+#define DC_HPD2_INT_CONTROL 0x7d10
+#define DC_HPD3_INT_CONTROL 0x7d1c
+#define DC_HPD4_INT_CONTROL 0x7d28
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
/*
* PM4
@@ -486,7 +656,6 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
-#define PACKET3_CP_INTERRUPT 0x40
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_TC_ACTION_ENA (1 << 23)
@@ -660,4 +829,5 @@
#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
+#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5ab35b81c86..f3deb4982b2 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -28,8 +28,6 @@
#ifndef __RADEON_H__
#define __RADEON_H__
-#include "radeon_object.h"
-
/* TODO: Here are things that needs to be done :
* - surface allocator & initializer : (bit like scratch reg) should
* initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
@@ -67,6 +65,11 @@
#include <linux/list.h>
#include <linux/kref.h>
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
#include "radeon_family.h"
#include "radeon_mode.h"
#include "radeon_reg.h"
@@ -139,6 +142,10 @@ struct radeon_clock {
uint32_t default_sclk;
};
+/*
+ * Power management
+ */
+int radeon_pm_init(struct radeon_device *rdev);
/*
* Fences.
@@ -182,76 +189,60 @@ void radeon_fence_unref(struct radeon_fence **fence);
* Tiling registers
*/
struct radeon_surface_reg {
- struct radeon_object *robj;
+ struct radeon_bo *bo;
};
#define RADEON_GEM_MAX_SURFACES 8
/*
- * Radeon buffer.
+ * TTM.
*/
-struct radeon_object;
+struct radeon_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_global_reference mem_global_ref;
+ bool mem_global_referenced;
+ struct ttm_bo_device bdev;
+};
-struct radeon_object_list {
+struct radeon_bo {
+ /* Protected by gem.mutex */
+ struct list_head list;
+ /* Protected by tbo.reserved */
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+ unsigned pin_count;
+ void *kptr;
+ u32 tiling_flags;
+ u32 pitch;
+ int surface_reg;
+ /* Constant after initialization */
+ struct radeon_device *rdev;
+ struct drm_gem_object *gobj;
+};
+
+struct radeon_bo_list {
struct list_head list;
- struct radeon_object *robj;
+ struct radeon_bo *bo;
uint64_t gpu_offset;
unsigned rdomain;
unsigned wdomain;
- uint32_t tiling_flags;
+ u32 tiling_flags;
};
-int radeon_object_init(struct radeon_device *rdev);
-void radeon_object_fini(struct radeon_device *rdev);
-int radeon_object_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj,
- unsigned long size,
- bool kernel,
- uint32_t domain,
- bool interruptible,
- struct radeon_object **robj_ptr);
-int radeon_object_kmap(struct radeon_object *robj, void **ptr);
-void radeon_object_kunmap(struct radeon_object *robj);
-void radeon_object_unref(struct radeon_object **robj);
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
- uint64_t *gpu_addr);
-void radeon_object_unpin(struct radeon_object *robj);
-int radeon_object_wait(struct radeon_object *robj);
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
-int radeon_object_evict_vram(struct radeon_device *rdev);
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
-void radeon_object_force_delete(struct radeon_device *rdev);
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
- struct list_head *head);
-int radeon_object_list_validate(struct list_head *head, void *fence);
-void radeon_object_list_unvalidate(struct list_head *head);
-void radeon_object_list_clean(struct list_head *head);
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
- struct vm_area_struct *vma);
-unsigned long radeon_object_size(struct radeon_object *robj);
-void radeon_object_clear_surface_reg(struct radeon_object *robj);
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
- bool force_drop);
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
- uint32_t tiling_flags, uint32_t pitch);
-void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
-void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
/*
* GEM objects.
*/
struct radeon_gem {
+ struct mutex mutex;
struct list_head objects;
};
int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev);
int radeon_gem_object_create(struct radeon_device *rdev, int size,
- int alignment, int initial_domain,
- bool discardable, bool kernel,
- bool interruptible,
- struct drm_gem_object **obj);
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct drm_gem_object **obj);
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr);
void radeon_gem_object_unpin(struct drm_gem_object *obj);
@@ -267,7 +258,7 @@ struct radeon_gart_table_ram {
};
struct radeon_gart_table_vram {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
volatile uint32_t *ptr;
};
@@ -276,6 +267,8 @@ union radeon_gart_table {
struct radeon_gart_table_vram vram;
};
+#define RADEON_GPU_PAGE_SIZE 4096
+
struct radeon_gart {
dma_addr_t table_addr;
unsigned num_gpu_pages;
@@ -346,11 +339,14 @@ struct radeon_irq {
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[2];
+ spinlock_t sw_lock;
+ int sw_refcount;
};
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
-
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
/*
* CP & ring.
@@ -370,7 +366,7 @@ struct radeon_ib {
*/
struct radeon_ib_pool {
struct mutex mutex;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
@@ -378,7 +374,7 @@ struct radeon_ib_pool {
};
struct radeon_cp {
- struct radeon_object *ring_obj;
+ struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
unsigned wptr;
@@ -393,8 +389,25 @@ struct radeon_cp {
bool ready;
};
+/*
+ * R6xx+ IH ring
+ */
+struct r600_ih {
+ struct radeon_bo *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr;
+ unsigned wptr;
+ unsigned wptr_old;
+ unsigned ring_size;
+ uint64_t gpu_addr;
+ uint32_t align_mask;
+ uint32_t ptr_mask;
+ spinlock_t lock;
+ bool enabled;
+};
+
struct r600_blit {
- struct radeon_object *shader_obj;
+ struct radeon_bo *shader_obj;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
u32 state_offset;
@@ -424,8 +437,8 @@ void radeon_ring_fini(struct radeon_device *rdev);
*/
struct radeon_cs_reloc {
struct drm_gem_object *gobj;
- struct radeon_object *robj;
- struct radeon_object_list lobj;
+ struct radeon_bo *robj;
+ struct radeon_bo_list lobj;
uint32_t handle;
uint32_t flags;
};
@@ -513,6 +526,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
* AGP
*/
int radeon_agp_init(struct radeon_device *rdev);
+void radeon_agp_resume(struct radeon_device *rdev);
void radeon_agp_fini(struct radeon_device *rdev);
@@ -520,7 +534,7 @@ void radeon_agp_fini(struct radeon_device *rdev);
* Writeback
*/
struct radeon_wb {
- struct radeon_object *wb_obj;
+ struct radeon_bo *wb_obj;
volatile uint32_t *wb;
uint64_t gpu_addr;
};
@@ -621,7 +635,9 @@ struct radeon_asic {
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
+ uint32_t (*get_engine_clock)(struct radeon_device *rdev);
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+ uint32_t (*get_memory_clock)(struct radeon_device *rdev);
void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
@@ -630,6 +646,7 @@ struct radeon_asic {
uint32_t offset, uint32_t obj_size);
int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
+ void (*hdp_flush)(struct radeon_device *rdev);
};
/*
@@ -742,9 +759,9 @@ struct radeon_device {
uint8_t *bios;
bool is_atom_bios;
uint16_t bios_header_start;
- struct radeon_object *stollen_vga_memory;
+ struct radeon_bo *stollen_vga_memory;
struct fb_info *fbdev_info;
- struct radeon_object *fbdev_robj;
+ struct radeon_bo *fbdev_rbo;
struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */
resource_size_t rmmio_base;
@@ -782,7 +799,10 @@ struct radeon_device {
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
+ const struct firmware *rlc_fw; /* r6/700 RLC firmware */
struct r600_blit r600_blit;
+ int msi_enabled; /* msi enabled */
+ struct r600_ih ih; /* r6/700 interrupt ring */
};
int radeon_device_init(struct radeon_device *rdev,
@@ -819,6 +839,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
}
}
+/*
+ * Cast helper
+ */
+#define to_radeon_fence(p) ((struct radeon_fence *)(p))
/*
* Registers read & write functions.
@@ -952,19 +976,23 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
+#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
-#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
+#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
/* Common functions */
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
+extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
extern int radeon_clocks_init(struct radeon_device *rdev);
extern void radeon_clocks_fini(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
@@ -1009,7 +1037,7 @@ extern int r100_cp_reset(struct radeon_device *rdev);
extern void r100_vga_render_disable(struct radeon_device *rdev);
extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
- struct radeon_object *robj);
+ struct radeon_bo *robj);
extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
const unsigned *auth, unsigned n,
@@ -1017,6 +1045,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx);
+extern void r100_enable_bm(struct radeon_device *rdev);
+extern void r100_set_common_regs(struct radeon_device *rdev);
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1092,7 +1122,14 @@ extern void r600_wb_disable(struct radeon_device *rdev);
extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
-extern int r600_cp_init_microcode(struct radeon_device *rdev);
+extern int r600_init_microcode(struct radeon_device *rdev);
extern int r600_gpu_reset(struct radeon_device *rdev);
+/* r600 irq */
+extern int r600_irq_init(struct radeon_device *rdev);
+extern void r600_irq_fini(struct radeon_device *rdev);
+extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+extern int r600_irq_set(struct radeon_device *rdev);
+
+#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 23ea9955ac5..54bf49a6d67 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -237,6 +237,18 @@ int radeon_agp_init(struct radeon_device *rdev)
#endif
}
+void radeon_agp_resume(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+ int r;
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ dev_warn(rdev->dev, "radeon AGP reinit failed\n");
+ }
+#endif
+}
+
void radeon_agp_fini(struct radeon_device *rdev)
{
#if __OS_HAS_AGP
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c3532c7a6f3..755f50555c3 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -31,10 +31,13 @@
/*
* common functions
*/
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
@@ -73,6 +76,7 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
+void r100_hdp_flush(struct radeon_device *rdev);
static struct radeon_asic r100_asic = {
.init = &r100_init,
@@ -95,13 +99,16 @@ static struct radeon_asic r100_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = NULL,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = NULL,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
};
@@ -148,13 +155,16 @@ static struct radeon_asic r300_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = NULL,
.set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
};
/*
@@ -185,13 +195,16 @@ static struct radeon_asic r420_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
};
@@ -227,13 +240,16 @@ static struct radeon_asic rs400_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = NULL,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
};
@@ -273,11 +289,14 @@ static struct radeon_asic rs600_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.bandwidth_update = &rs600_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
};
@@ -312,13 +331,16 @@ static struct radeon_asic rs690_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r300_copy_dma,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
};
@@ -357,13 +379,16 @@ static struct radeon_asic rv515_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
};
@@ -393,13 +418,16 @@ static struct radeon_asic r520_asic = {
.copy_blit = &r100_copy_blit,
.copy_dma = &r300_copy_dma,
.copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
};
/*
@@ -436,6 +464,7 @@ int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
+void r600_hdp_flush(struct radeon_device *rdev);
static struct radeon_asic r600_asic = {
.init = &r600_init,
@@ -451,18 +480,22 @@ static struct radeon_asic r600_asic = {
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = &r600_copy_blit,
.copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r600_hdp_flush,
};
/*
@@ -488,18 +521,22 @@ static struct radeon_asic rv770_asic = {
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = &r600_copy_blit,
.copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r600_hdp_flush,
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5b6c08cee40..5e414102c87 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -46,7 +46,8 @@ radeon_add_atom_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- bool linkb, uint32_t igp_lane_info);
+ bool linkb, uint32_t igp_lane_info,
+ uint16_t connector_object_id);
/* from radeon_legacy_encoder.c */
extern void
@@ -81,18 +82,18 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4;
- i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
- i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
- i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
- i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
- i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
- i2c.put_data_mask = (1 << gpio.ucDataEnShift);
- i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
- i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
+ i2c.en_clk_mask = (1 << gpio.ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio.ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio.ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio.ucDataY_Shift);
i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
i2c.valid = true;
@@ -134,6 +135,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* HIS X1300 is DVI+VGA, not DVI+DVI */
+ if ((dev->pdev->device == 0x7146) &&
+ (dev->pdev->subsystem_vendor == 0x17af) &&
+ (dev->pdev->subsystem_device == 0x2058)) {
+ if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+ return false;
+ }
+
+ /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
+ if ((dev->pdev->device == 0x7142) &&
+ (dev->pdev->subsystem_vendor == 0x1458) &&
+ (dev->pdev->subsystem_device == 0x2134)) {
+ if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+ return false;
+ }
+
+
/* Funky macbooks */
if ((dev->pdev->device == 0x71C5) &&
(dev->pdev->subsystem_vendor == 0x106b) &&
@@ -171,6 +189,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* Acer laptop reports DVI-D as DVI-I */
+ if ((dev->pdev->device == 0x95c4) &&
+ (dev->pdev->subsystem_vendor == 0x1025) &&
+ (dev->pdev->subsystem_device == 0x013c)) {
+ if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+ (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+
return true;
}
@@ -193,6 +220,23 @@ const int supported_devices_connector_convert[] = {
DRM_MODE_CONNECTOR_DisplayPort
};
+const uint16_t supported_devices_connector_object_id_convert[] = {
+ CONNECTOR_OBJECT_ID_NONE,
+ CONNECTOR_OBJECT_ID_VGA,
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D, /* not all boards support DL */
+ CONNECTOR_OBJECT_ID_VGA, /* technically DVI-A */
+ CONNECTOR_OBJECT_ID_COMPOSITE,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ CONNECTOR_OBJECT_ID_LVDS,
+ CONNECTOR_OBJECT_ID_9PIN_DIN,
+ CONNECTOR_OBJECT_ID_9PIN_DIN,
+ CONNECTOR_OBJECT_ID_DISPLAYPORT,
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A,
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B,
+ CONNECTOR_OBJECT_ID_SVIDEO
+};
+
const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DVII,
@@ -229,7 +273,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_OBJECT_HEADER *obj_header;
int i, j, path_size, device_support;
int connector_type;
- uint16_t igp_lane_info, conn_id;
+ uint16_t igp_lane_info, conn_id, connector_object_id;
bool linkb;
struct radeon_i2c_bus_rec ddc_bus;
@@ -277,7 +321,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_DEVICE_CV_SUPPORT)
continue;
- if ((rdev->family == CHIP_RS780) &&
+ /* IGP chips */
+ if ((rdev->flags & RADEON_IS_IGP) &&
(con_obj_id ==
CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) {
uint16_t igp_offset = 0;
@@ -311,6 +356,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
connector_type =
object_connector_convert
[ct];
+ connector_object_id = ct;
igp_lane_info =
slot_config & 0xffff;
} else
@@ -321,6 +367,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
igp_lane_info = 0;
connector_type =
object_connector_convert[con_obj_id];
+ connector_object_id = con_obj_id;
}
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -425,7 +472,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
le16_to_cpu(path->
usDeviceTag),
connector_type, &ddc_bus,
- linkb, igp_lane_info);
+ linkb, igp_lane_info,
+ connector_object_id);
}
}
@@ -435,6 +483,45 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
return true;
}
+static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
+ int connector_type,
+ uint16_t devices)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ return supported_devices_connector_object_id_convert
+ [connector_type];
+ } else if (((connector_type == DRM_MODE_CONNECTOR_DVII) ||
+ (connector_type == DRM_MODE_CONNECTOR_DVID)) &&
+ (devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, XTMDS_Info);
+ uint16_t size, data_offset;
+ uint8_t frev, crev;
+ ATOM_XTMDS_INFO *xtmds;
+
+ atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+ xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
+
+ if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+ else
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+ } else {
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ else
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+ }
+ } else {
+ return supported_devices_connector_object_id_convert
+ [connector_type];
+ }
+}
+
struct bios_connector {
bool valid;
uint16_t line_mux;
@@ -593,14 +680,20 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
/* add the connectors */
for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
- if (bios_connectors[i].valid)
+ if (bios_connectors[i].valid) {
+ uint16_t connector_object_id =
+ atombios_get_connector_object_id(dev,
+ bios_connectors[i].connector_type,
+ bios_connectors[i].devices);
radeon_add_atom_connector(dev,
bios_connectors[i].line_mux,
bios_connectors[i].devices,
bios_connectors[i].
connector_type,
&bios_connectors[i].ddc_bus,
- false, 0);
+ false, 0,
+ connector_object_id);
+ }
}
radeon_link_encoder_connector(dev);
@@ -641,8 +734,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
le16_to_cpu(firmware_info->info.usReferenceClock);
p1pll->reference_div = 0;
- p1pll->pll_out_min =
- le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+ if (crev < 2)
+ p1pll->pll_out_min =
+ le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+ else
+ p1pll->pll_out_min =
+ le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
p1pll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
@@ -651,6 +748,16 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
p1pll->pll_out_min = 64800;
else
p1pll->pll_out_min = 20000;
+ } else if (p1pll->pll_out_min > 64800) {
+ /* Limiting the pll output range is a good thing generally as
+ * it limits the number of possible pll combinations for a given
+ * frequency presumably to the ones that work best on each card.
+ * However, certain duallink DVI monitors seem to like
+ * pll combinations that would be limited by this at least on
+ * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
+ * family.
+ */
+ p1pll->pll_out_min = 64800;
}
p1pll->pll_in_min =
@@ -767,6 +874,46 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
return false;
}
+static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
+ radeon_encoder
+ *encoder,
+ int id)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
+ uint16_t data_offset;
+ struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+ uint8_t frev, crev;
+ struct radeon_atom_ss *ss = NULL;
+
+ if (id > ATOM_MAX_SS_ENTRY)
+ return NULL;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+ &crev, &data_offset);
+
+ ss_info =
+ (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
+
+ if (ss_info) {
+ ss =
+ kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
+
+ if (!ss)
+ return NULL;
+
+ ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage);
+ ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType;
+ ss->step = ss_info->asSS_Info[id].ucSS_Step;
+ ss->delay = ss_info->asSS_Info[id].ucSS_Delay;
+ ss->range = ss_info->asSS_Info[id].ucSS_Range;
+ ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div;
+ }
+ return ss;
+}
+
union lvds_info {
struct _ATOM_LVDS_INFO info;
struct _ATOM_LVDS_INFO_V12 info_12;
@@ -780,7 +927,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
- uint16_t data_offset;
+ uint16_t data_offset, misc;
union lvds_info *lvds_info;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL;
@@ -798,28 +945,45 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
if (!lvds)
return NULL;
- lvds->native_mode.dotclock =
+ lvds->native_mode.clock =
le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
- lvds->native_mode.panel_xres =
+ lvds->native_mode.hdisplay =
le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
- lvds->native_mode.panel_yres =
+ lvds->native_mode.vdisplay =
le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
- lvds->native_mode.hblank =
- le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
- lvds->native_mode.hoverplus =
- le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
- lvds->native_mode.hsync_width =
- le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
- lvds->native_mode.vblank =
- le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
- lvds->native_mode.voverplus =
- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
- lvds->native_mode.vsync_width =
- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
+ lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
+ lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
+ lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
+ lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
le16_to_cpu(lvds_info->info.usOffDelayInMs);
lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+ misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
+ if (misc & ATOM_VSYNC_POLARITY)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ if (misc & ATOM_HSYNC_POLARITY)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ if (misc & ATOM_COMPOSITESYNC)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
+ if (misc & ATOM_INTERLACE)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ if (misc & ATOM_DOUBLE_CLOCK_MODE)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+
+ /* set crtc values */
+ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
+ lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
+
encoder->native_mode = lvds->native_mode;
}
return lvds;
@@ -857,8 +1021,7 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
}
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
- SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing,
- int32_t *pixel_clock)
+ struct drm_display_mode *mode)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
ATOM_ANALOG_TV_INFO *tv_info;
@@ -866,7 +1029,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
ATOM_DTD_FORMAT *dtd_timings;
int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
u8 frev, crev;
- uint16_t data_offset;
+ u16 data_offset, misc;
atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset);
@@ -876,28 +1039,37 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
if (index > MAX_SUPPORTED_TV_TIMING)
return false;
- crtc_timing->usH_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
- crtc_timing->usH_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp);
- crtc_timing->usH_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart);
- crtc_timing->usH_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth);
-
- crtc_timing->usV_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total);
- crtc_timing->usV_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp);
- crtc_timing->usV_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart);
- crtc_timing->usV_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth);
-
- crtc_timing->susModeMiscInfo = tv_info->aModeTimings[index].susModeMiscInfo;
-
- crtc_timing->ucOverscanRight = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanRight);
- crtc_timing->ucOverscanLeft = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanLeft);
- crtc_timing->ucOverscanBottom = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanBottom);
- crtc_timing->ucOverscanTop = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanTop);
- *pixel_clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
+ mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
+ mode->crtc_hdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp);
+ mode->crtc_hsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart);
+ mode->crtc_hsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart) +
+ le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth);
+
+ mode->crtc_vtotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total);
+ mode->crtc_vdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp);
+ mode->crtc_vsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart);
+ mode->crtc_vsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart) +
+ le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth);
+
+ mode->flags = 0;
+ misc = le16_to_cpu(tv_info->aModeTimings[index].susModeMiscInfo.usAccess);
+ if (misc & ATOM_VSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (misc & ATOM_HSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (misc & ATOM_COMPOSITESYNC)
+ mode->flags |= DRM_MODE_FLAG_CSYNC;
+ if (misc & ATOM_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (misc & ATOM_DOUBLE_CLOCK_MODE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+ mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
if (index == 1) {
/* PAL timings appear to have wrong values for totals */
- crtc_timing->usH_Total -= 1;
- crtc_timing->usV_Total -= 1;
+ mode->crtc_htotal -= 1;
+ mode->crtc_vtotal -= 1;
}
break;
case 2:
@@ -906,17 +1078,36 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
return false;
dtd_timings = &tv_info_v1_2->aModeTimings[index];
- crtc_timing->usH_Total = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHBlanking_Time);
- crtc_timing->usH_Disp = le16_to_cpu(dtd_timings->usHActive);
- crtc_timing->usH_SyncStart = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHSyncOffset);
- crtc_timing->usH_SyncWidth = le16_to_cpu(dtd_timings->usHSyncWidth);
- crtc_timing->usV_Total = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVBlanking_Time);
- crtc_timing->usV_Disp = le16_to_cpu(dtd_timings->usVActive);
- crtc_timing->usV_SyncStart = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVSyncOffset);
- crtc_timing->usV_SyncWidth = le16_to_cpu(dtd_timings->usVSyncWidth);
-
- crtc_timing->susModeMiscInfo.usAccess = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess);
- *pixel_clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
+ mode->crtc_htotal = le16_to_cpu(dtd_timings->usHActive) +
+ le16_to_cpu(dtd_timings->usHBlanking_Time);
+ mode->crtc_hdisplay = le16_to_cpu(dtd_timings->usHActive);
+ mode->crtc_hsync_start = le16_to_cpu(dtd_timings->usHActive) +
+ le16_to_cpu(dtd_timings->usHSyncOffset);
+ mode->crtc_hsync_end = mode->crtc_hsync_start +
+ le16_to_cpu(dtd_timings->usHSyncWidth);
+
+ mode->crtc_vtotal = le16_to_cpu(dtd_timings->usVActive) +
+ le16_to_cpu(dtd_timings->usVBlanking_Time);
+ mode->crtc_vdisplay = le16_to_cpu(dtd_timings->usVActive);
+ mode->crtc_vsync_start = le16_to_cpu(dtd_timings->usVActive) +
+ le16_to_cpu(dtd_timings->usVSyncOffset);
+ mode->crtc_vsync_end = mode->crtc_vsync_start +
+ le16_to_cpu(dtd_timings->usVSyncWidth);
+
+ mode->flags = 0;
+ misc = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess);
+ if (misc & ATOM_VSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NVSYNC;
+ if (misc & ATOM_HSYNC_POLARITY)
+ mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (misc & ATOM_COMPOSITESYNC)
+ mode->flags |= DRM_MODE_FLAG_CSYNC;
+ if (misc & ATOM_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (misc & ATOM_DOUBLE_CLOCK_MODE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+ mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
break;
}
return true;
@@ -981,6 +1172,24 @@ void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
+{
+ GET_ENGINE_CLOCK_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ return args.ulReturnEngineClock;
+}
+
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
+{
+ GET_MEMORY_CLOCK_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ return args.ulReturnMemoryClock;
+}
+
void radeon_atom_set_engine_clock(struct radeon_device *rdev,
uint32_t eng_clock)
{
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 2e938f7496f..4ddfd4b5bc5 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -29,8 +29,8 @@
void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
unsigned sdomain, unsigned ddomain)
{
- struct radeon_object *dobj = NULL;
- struct radeon_object *sobj = NULL;
+ struct radeon_bo *dobj = NULL;
+ struct radeon_bo *sobj = NULL;
struct radeon_fence *fence = NULL;
uint64_t saddr, daddr;
unsigned long start_jiffies;
@@ -41,19 +41,27 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
size = bsize;
n = 1024;
- r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
+ r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_pin(sobj, sdomain, &saddr);
+ r = radeon_bo_reserve(sobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(sobj, sdomain, &saddr);
+ radeon_bo_unreserve(sobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
+ r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_pin(dobj, ddomain, &daddr);
+ r = radeon_bo_reserve(dobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(dobj, ddomain, &daddr);
+ radeon_bo_unreserve(dobj);
if (r) {
goto out_cleanup;
}
@@ -63,7 +71,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence);
+ r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) {
goto out_cleanup;
}
@@ -88,7 +96,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence);
+ r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) {
goto out_cleanup;
}
@@ -109,12 +117,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
}
out_cleanup:
if (sobj) {
- radeon_object_unpin(sobj);
- radeon_object_unref(&sobj);
+ r = radeon_bo_reserve(sobj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(sobj);
+ radeon_bo_unreserve(sobj);
+ }
+ radeon_bo_unref(&sobj);
}
if (dobj) {
- radeon_object_unpin(dobj);
- radeon_object_unref(&dobj);
+ r = radeon_bo_reserve(dobj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(dobj);
+ radeon_bo_unreserve(dobj);
+ }
+ radeon_bo_unref(&dobj);
}
if (fence) {
radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 34a9b911951..906921740c6 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -50,19 +50,16 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
vram_base = drm_get_resource_start(rdev->ddev, 0);
bios = ioremap(vram_base, size);
if (!bios) {
- DRM_ERROR("Unable to mmap vram\n");
return false;
}
if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
iounmap(bios);
- DRM_ERROR("bad rom signature\n");
return false;
}
rdev->bios = kmalloc(size, GFP_KERNEL);
if (rdev->bios == NULL) {
iounmap(bios);
- DRM_ERROR("kmalloc failed\n");
return false;
}
memcpy(rdev->bios, bios, size);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index f5c32a766b1..b062109efbe 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -32,7 +32,7 @@
#include "atom.h"
/* 10 khz */
-static uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
{
struct radeon_pll *spll = &rdev->clock.spll;
uint32_t fb_div, ref_div, post_div, sclk;
@@ -44,6 +44,10 @@ static uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (ref_div == 0)
+ return 0;
+
sclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
@@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (ref_div == 0)
+ return 0;
+
mclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
@@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev)
ret = radeon_combios_get_clock_info(dev);
if (ret) {
- if (p1pll->reference_div < 2)
- p1pll->reference_div = 12;
+ if (p1pll->reference_div < 2) {
+ if (!ASIC_IS_AVIVO(rdev)) {
+ u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
+ if (ASIC_IS_R300(rdev))
+ p1pll->reference_div =
+ (tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
+ else
+ p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
+ if (p1pll->reference_div < 2)
+ p1pll->reference_div = 12;
+ } else
+ p1pll->reference_div = 12;
+ }
if (p2pll->reference_div < 2)
p2pll->reference_div = 12;
if (rdev->family < CHIP_RS600) {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 748265a105b..14d3555e4af 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -49,7 +49,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t connector_id,
uint32_t supported_device,
int connector_type,
- struct radeon_i2c_bus_rec *i2c_bus);
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint16_t connector_object_id);
/* from radeon_legacy_encoder.c */
extern void
@@ -449,29 +450,29 @@ struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line)
i2c.mask_data_mask = RADEON_GPIO_EN_0;
i2c.a_clk_mask = RADEON_GPIO_A_1;
i2c.a_data_mask = RADEON_GPIO_A_0;
- i2c.put_clk_mask = RADEON_GPIO_EN_1;
- i2c.put_data_mask = RADEON_GPIO_EN_0;
- i2c.get_clk_mask = RADEON_GPIO_Y_1;
- i2c.get_data_mask = RADEON_GPIO_Y_0;
+ i2c.en_clk_mask = RADEON_GPIO_EN_1;
+ i2c.en_data_mask = RADEON_GPIO_EN_0;
+ i2c.y_clk_mask = RADEON_GPIO_Y_1;
+ i2c.y_data_mask = RADEON_GPIO_Y_0;
if ((ddc_line == RADEON_LCD_GPIO_MASK) ||
(ddc_line == RADEON_MDGPIO_EN_REG)) {
i2c.mask_clk_reg = ddc_line;
i2c.mask_data_reg = ddc_line;
i2c.a_clk_reg = ddc_line;
i2c.a_data_reg = ddc_line;
- i2c.put_clk_reg = ddc_line;
- i2c.put_data_reg = ddc_line;
- i2c.get_clk_reg = ddc_line + 4;
- i2c.get_data_reg = ddc_line + 4;
+ i2c.en_clk_reg = ddc_line;
+ i2c.en_data_reg = ddc_line;
+ i2c.y_clk_reg = ddc_line + 4;
+ i2c.y_data_reg = ddc_line + 4;
} else {
i2c.mask_clk_reg = ddc_line;
i2c.mask_data_reg = ddc_line;
i2c.a_clk_reg = ddc_line;
i2c.a_data_reg = ddc_line;
- i2c.put_clk_reg = ddc_line;
- i2c.put_data_reg = ddc_line;
- i2c.get_clk_reg = ddc_line;
- i2c.get_data_reg = ddc_line;
+ i2c.en_clk_reg = ddc_line;
+ i2c.en_data_reg = ddc_line;
+ i2c.y_clk_reg = ddc_line;
+ i2c.y_data_reg = ddc_line;
}
if (ddc_line)
@@ -494,7 +495,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
uint16_t sclk, mclk;
if (rdev->bios == NULL)
- return NULL;
+ return false;
pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
if (pll_info) {
@@ -808,25 +809,25 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf;
if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
- lvds->native_mode.panel_yres =
+ lvds->native_mode.vdisplay =
((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
RADEON_VERT_PANEL_SHIFT) + 1;
else
- lvds->native_mode.panel_yres =
+ lvds->native_mode.vdisplay =
(RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1;
if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE)
- lvds->native_mode.panel_xres =
+ lvds->native_mode.hdisplay =
(((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >>
RADEON_HORZ_PANEL_SHIFT) + 1) * 8;
else
- lvds->native_mode.panel_xres =
+ lvds->native_mode.hdisplay =
((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8;
- if ((lvds->native_mode.panel_xres < 640) ||
- (lvds->native_mode.panel_yres < 480)) {
- lvds->native_mode.panel_xres = 640;
- lvds->native_mode.panel_yres = 480;
+ if ((lvds->native_mode.hdisplay < 640) ||
+ (lvds->native_mode.vdisplay < 480)) {
+ lvds->native_mode.hdisplay = 640;
+ lvds->native_mode.vdisplay = 480;
}
ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3;
@@ -846,8 +847,8 @@ static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
lvds->panel_vcc_delay = 200;
DRM_INFO("Panel info derived from registers\n");
- DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
- lvds->native_mode.panel_yres);
+ DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+ lvds->native_mode.vdisplay);
return lvds;
}
@@ -882,11 +883,11 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
DRM_INFO("Panel ID String: %s\n", stmp);
- lvds->native_mode.panel_xres = RBIOS16(lcd_info + 0x19);
- lvds->native_mode.panel_yres = RBIOS16(lcd_info + 0x1b);
+ lvds->native_mode.hdisplay = RBIOS16(lcd_info + 0x19);
+ lvds->native_mode.vdisplay = RBIOS16(lcd_info + 0x1b);
- DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
- lvds->native_mode.panel_yres);
+ DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+ lvds->native_mode.vdisplay);
lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
@@ -944,27 +945,25 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
if (tmp == 0)
break;
- if ((RBIOS16(tmp) == lvds->native_mode.panel_xres) &&
+ if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
(RBIOS16(tmp + 2) ==
- lvds->native_mode.panel_yres)) {
- lvds->native_mode.hblank =
- (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
- lvds->native_mode.hoverplus =
- (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) -
- 1) * 8;
- lvds->native_mode.hsync_width =
- RBIOS8(tmp + 23) * 8;
-
- lvds->native_mode.vblank = (RBIOS16(tmp + 24) -
- RBIOS16(tmp + 26));
- lvds->native_mode.voverplus =
- ((RBIOS16(tmp + 28) & 0x7ff) -
- RBIOS16(tmp + 26));
- lvds->native_mode.vsync_width =
- ((RBIOS16(tmp + 28) & 0xf800) >> 11);
- lvds->native_mode.dotclock =
- RBIOS16(tmp + 9) * 10;
+ lvds->native_mode.vdisplay)) {
+ lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8;
+ lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8;
+ lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) +
+ RBIOS16(tmp + 21)) * 8;
+
+ lvds->native_mode.vtotal = RBIOS16(tmp + 24);
+ lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff;
+ lvds->native_mode.vsync_end =
+ ((RBIOS16(tmp + 28) & 0xf800) >> 11) +
+ (RBIOS16(tmp + 28) & 0x7ff);
+
+ lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
lvds->native_mode.flags = 0;
+ /* set crtc values */
+ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
}
}
} else {
@@ -994,8 +993,8 @@ static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */
- {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */
- {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */
+ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS400 */
+ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS480 */
};
bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
@@ -1029,7 +1028,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
if (tmds_info) {
-
ver = RBIOS8(tmds_info);
DRM_INFO("DFP table revision: %d\n", ver);
if (ver == 3) {
@@ -1064,45 +1062,132 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
tmds->tmds_pll[i].value);
}
}
- } else
+ } else {
DRM_INFO("No TMDS info found in BIOS\n");
+ return false;
+ }
return true;
}
-struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder)
+bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds)
{
- struct radeon_encoder_int_tmds *tmds = NULL;
- bool ret;
-
- tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_i2c_bus_rec i2c_bus;
- if (!tmds)
- return NULL;
+ /* default for macs */
+ i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
- if (ret == false)
- radeon_legacy_get_tmds_info_from_table(encoder, tmds);
+ /* XXX some macs have duallink chips */
+ switch (rdev->mode_info.connector_table) {
+ case CT_POWERBOOK_EXTERNAL:
+ case CT_MINI_EXTERNAL:
+ default:
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+ break;
+ }
- return tmds;
+ return true;
}
-void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder)
+bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds)
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
- uint16_t ext_tmds_info;
- uint8_t ver;
+ uint16_t offset;
+ uint8_t ver, id, blocks, clk, data;
+ int i;
+ enum radeon_combios_ddc gpio;
+ struct radeon_i2c_bus_rec i2c_bus;
if (rdev->bios == NULL)
- return;
+ return false;
- ext_tmds_info =
- combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
- if (ext_tmds_info) {
- ver = RBIOS8(ext_tmds_info);
- DRM_INFO("External TMDS Table revision: %d\n", ver);
- // TODO
+ tmds->i2c_bus = NULL;
+ if (rdev->flags & RADEON_IS_IGP) {
+ offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+ if (offset) {
+ ver = RBIOS8(offset);
+ DRM_INFO("GPIO Table revision: %d\n", ver);
+ blocks = RBIOS8(offset + 2);
+ for (i = 0; i < blocks; i++) {
+ id = RBIOS8(offset + 3 + (i * 5) + 0);
+ if (id == 136) {
+ clk = RBIOS8(offset + 3 + (i * 5) + 3);
+ data = RBIOS8(offset + 3 + (i * 5) + 4);
+ i2c_bus.valid = true;
+ i2c_bus.mask_clk_mask = (1 << clk);
+ i2c_bus.mask_data_mask = (1 << data);
+ i2c_bus.a_clk_mask = (1 << clk);
+ i2c_bus.a_data_mask = (1 << data);
+ i2c_bus.en_clk_mask = (1 << clk);
+ i2c_bus.en_data_mask = (1 << data);
+ i2c_bus.y_clk_mask = (1 << clk);
+ i2c_bus.y_data_mask = (1 << data);
+ i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK;
+ i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK;
+ i2c_bus.a_clk_reg = RADEON_GPIOPAD_A;
+ i2c_bus.a_data_reg = RADEON_GPIOPAD_A;
+ i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN;
+ i2c_bus.en_data_reg = RADEON_GPIOPAD_EN;
+ i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y;
+ i2c_bus.y_data_reg = RADEON_GPIOPAD_Y;
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+ break;
+ }
+ }
+ }
+ } else {
+ offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (offset) {
+ ver = RBIOS8(offset);
+ DRM_INFO("External TMDS Table revision: %d\n", ver);
+ tmds->slave_addr = RBIOS8(offset + 4 + 2);
+ tmds->slave_addr >>= 1; /* 7 bit addressing */
+ gpio = RBIOS8(offset + 4 + 3);
+ switch (gpio) {
+ case DDC_MONID:
+ i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_DVI:
+ i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_VGA:
+ i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_CRT2:
+ /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
+ if (rdev->family >= CHIP_R300)
+ i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ else
+ i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_LCD: /* MM i2c */
+ DRM_ERROR("MM i2c requires hw i2c engine\n");
+ break;
+ default:
+ DRM_ERROR("Unsupported gpio %d\n", gpio);
+ break;
+ }
+ }
}
+
+ if (!tmds->i2c_bus) {
+ DRM_INFO("No valid Ext TMDS info found in BIOS\n");
+ return false;
+ }
+
+ return true;
}
bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
@@ -1178,7 +1263,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 0,
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
} else if (rdev->flags & RADEON_IS_MOBILITY) {
/* LVDS */
ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
@@ -1190,7 +1276,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 0,
ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
@@ -1202,7 +1289,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
} else {
/* DVI-I - tv dac, int tmds */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
@@ -1220,7 +1308,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
@@ -1232,7 +1321,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
}
if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
@@ -1245,7 +1335,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2,
ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
}
break;
case CT_IBOOK:
@@ -1259,7 +1350,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
0),
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
- DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
/* VGA - TV DAC */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
radeon_add_legacy_encoder(dev,
@@ -1268,7 +1360,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
- DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1277,7 +1370,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_POWERBOOK_EXTERNAL:
DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
@@ -1290,7 +1384,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
0),
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
- DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
/* DVI-I - primary dac, ext tmds */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
radeon_add_legacy_encoder(dev,
@@ -1303,10 +1398,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
+ /* XXX some are SL */
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1315,7 +1412,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_POWERBOOK_INTERNAL:
DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
@@ -1328,7 +1426,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
0),
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
- DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
/* DVI-I - primary dac, int tmds */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
radeon_add_legacy_encoder(dev,
@@ -1344,7 +1443,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1353,7 +1453,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_POWERBOOK_VGA:
DRM_INFO("Connector Table: %d (powerbook vga)\n",
@@ -1366,7 +1467,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
0),
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
- DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
radeon_add_legacy_encoder(dev,
@@ -1375,7 +1477,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1384,7 +1487,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_MINI_EXTERNAL:
DRM_INFO("Connector Table: %d (mini external tmds)\n",
@@ -1401,10 +1505,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
+ /* XXX are any DL? */
radeon_add_legacy_connector(dev, 0,
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
- DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1413,7 +1519,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_MINI_INTERNAL:
DRM_INFO("Connector Table: %d (mini internal tmds)\n",
@@ -1433,7 +1540,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 0,
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
- DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1442,7 +1550,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_IMAC_G5_ISIGHT:
DRM_INFO("Connector Table: %d (imac g5 isight)\n",
@@ -1455,7 +1564,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
- DRM_MODE_CONNECTOR_DVID, &ddc_i2c);
+ DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
/* VGA - tv dac */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
radeon_add_legacy_encoder(dev,
@@ -1464,7 +1574,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
- DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1473,7 +1584,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
case CT_EMAC:
DRM_INFO("Connector Table: %d (emac)\n",
@@ -1486,7 +1598,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
- DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
/* VGA - tv dac */
ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
radeon_add_legacy_encoder(dev,
@@ -1495,7 +1608,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
- DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
/* TV - TV DAC */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1504,7 +1618,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
@@ -1538,20 +1653,25 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
ddc_i2c->mask_data_mask = 0x80;
ddc_i2c->a_clk_mask = (0x20 << 8);
ddc_i2c->a_data_mask = 0x80;
- ddc_i2c->put_clk_mask = (0x20 << 8);
- ddc_i2c->put_data_mask = 0x80;
- ddc_i2c->get_clk_mask = (0x20 << 8);
- ddc_i2c->get_data_mask = 0x80;
+ ddc_i2c->en_clk_mask = (0x20 << 8);
+ ddc_i2c->en_data_mask = 0x80;
+ ddc_i2c->y_clk_mask = (0x20 << 8);
+ ddc_i2c->y_data_mask = 0x80;
ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
- ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
- ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
- ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
- ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
+ ddc_i2c->en_clk_reg = RADEON_GPIOPAD_EN;
+ ddc_i2c->en_data_reg = RADEON_GPIOPAD_EN;
+ ddc_i2c->y_clk_reg = RADEON_GPIOPAD_Y;
+ ddc_i2c->y_data_reg = RADEON_GPIOPAD_Y;
}
+ /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
+ if ((rdev->family >= CHIP_R300) &&
+ ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+ *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+
/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
if (dev->pdev->device == 0x515e &&
@@ -1581,11 +1701,69 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
return true;
}
+static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
+{
+ /* Acer 5102 has non-existent TV port */
+ if (dev->pdev->device == 0x5975 &&
+ dev->pdev->subsystem_vendor == 0x1025 &&
+ dev->pdev->subsystem_device == 0x009f)
+ return false;
+
+ /* HP dc5750 has non-existent TV port */
+ if (dev->pdev->device == 0x5974 &&
+ dev->pdev->subsystem_vendor == 0x103c &&
+ dev->pdev->subsystem_device == 0x280a)
+ return false;
+
+ /* MSI S270 has non-existent TV port */
+ if (dev->pdev->device == 0x5955 &&
+ dev->pdev->subsystem_vendor == 0x1462 &&
+ dev->pdev->subsystem_device == 0x0131)
+ return false;
+
+ return true;
+}
+
+static uint16_t combios_check_dl_dvi(struct drm_device *dev, int is_dvi_d)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t ext_tmds_info;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ }
+ ext_tmds_info = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (ext_tmds_info) {
+ uint8_t rev = RBIOS8(ext_tmds_info);
+ uint8_t flags = RBIOS8(ext_tmds_info + 4 + 5);
+ if (rev >= 3) {
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+ } else {
+ if (flags & 1) {
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+ }
+ }
+ }
+ if (is_dvi_d)
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+ else
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+}
+
bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
uint32_t conn_info, entry, devices;
- uint16_t tmp;
+ uint16_t tmp, connector_object_id;
enum radeon_combios_ddc ddc_type;
enum radeon_combios_connector connector;
int i = 0;
@@ -1628,8 +1806,9 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
break;
}
- radeon_apply_legacy_quirks(dev, i, &connector,
- &ddc_i2c);
+ if (!radeon_apply_legacy_quirks(dev, i, &connector,
+ &ddc_i2c))
+ continue;
switch (connector) {
case CONNECTOR_PROPRIETARY_LEGACY:
@@ -1644,7 +1823,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
radeon_add_legacy_connector(dev, i, devices,
legacy_connector_convert
[connector],
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
break;
case CONNECTOR_CRT_LEGACY:
if (tmp & 0x1) {
@@ -1669,7 +1849,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
devices,
legacy_connector_convert
[connector],
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
break;
case CONNECTOR_DVI_I_LEGACY:
devices = 0;
@@ -1698,6 +1879,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_DFP2_SUPPORT,
0),
ATOM_DEVICE_DFP2_SUPPORT);
+ connector_object_id = combios_check_dl_dvi(dev, 0);
} else {
devices |= ATOM_DEVICE_DFP1_SUPPORT;
radeon_add_legacy_encoder(dev,
@@ -1706,19 +1888,24 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
+ connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
}
radeon_add_legacy_connector(dev,
i,
devices,
legacy_connector_convert
[connector],
- &ddc_i2c);
+ &ddc_i2c,
+ connector_object_id);
break;
case CONNECTOR_DVI_D_LEGACY:
- if ((tmp >> 4) & 0x1)
+ if ((tmp >> 4) & 0x1) {
devices = ATOM_DEVICE_DFP2_SUPPORT;
- else
+ connector_object_id = combios_check_dl_dvi(dev, 1);
+ } else {
devices = ATOM_DEVICE_DFP1_SUPPORT;
+ connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ }
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
(dev, devices, 0),
@@ -1726,7 +1913,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
radeon_add_legacy_connector(dev, i, devices,
legacy_connector_convert
[connector],
- &ddc_i2c);
+ &ddc_i2c,
+ connector_object_id);
break;
case CONNECTOR_CTV_LEGACY:
case CONNECTOR_STV_LEGACY:
@@ -1740,7 +1928,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT,
legacy_connector_convert
[connector],
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
break;
default:
DRM_ERROR("Unknown connector type: %d\n",
@@ -1772,10 +1961,29 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT |
ATOM_DEVICE_DFP1_SUPPORT,
DRM_MODE_CONNECTOR_DVII,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
} else {
- DRM_DEBUG("No connector info found\n");
- return false;
+ uint16_t crt_info =
+ combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+ DRM_DEBUG("Found CRT table, assuming VGA connector\n");
+ if (crt_info) {
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ radeon_add_legacy_connector(dev,
+ 0,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA);
+ } else {
+ DRM_DEBUG("No connector info found\n");
+ return false;
+ }
}
}
@@ -1828,13 +2036,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.a_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.put_clk_mask =
+ ddc_i2c.en_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.put_data_mask =
+ ddc_i2c.en_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.get_clk_mask =
+ ddc_i2c.y_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.get_data_mask =
+ ddc_i2c.y_data_mask =
RBIOS32(lcd_ddc_info + 7);
break;
case DDC_GPIO:
@@ -1849,13 +2057,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.a_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.put_clk_mask =
+ ddc_i2c.en_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.put_data_mask =
+ ddc_i2c.en_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.get_clk_mask =
+ ddc_i2c.y_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.get_data_mask =
+ ddc_i2c.y_data_mask =
RBIOS32(lcd_ddc_info + 7);
break;
default:
@@ -1870,7 +2078,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
5,
ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS,
- &ddc_i2c);
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS);
}
}
@@ -1880,16 +2089,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (tv_info) {
if (RBIOS8(tv_info + 6) == 'T') {
- radeon_add_legacy_encoder(dev,
- radeon_get_encoder_id
- (dev,
- ATOM_DEVICE_TV1_SUPPORT,
- 2),
- ATOM_DEVICE_TV1_SUPPORT);
- radeon_add_legacy_connector(dev, 6,
- ATOM_DEVICE_TV1_SUPPORT,
- DRM_MODE_CONNECTOR_SVIDEO,
- &ddc_i2c);
+ if (radeon_apply_legacy_tv_quirks(dev)) {
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_id
+ (dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 6,
+ ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO);
+ }
}
}
}
@@ -1899,6 +2111,193 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
return true;
}
+void radeon_external_tmds_setup(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+ if (!tmds)
+ return;
+
+ switch (tmds->dvo_chip) {
+ case DVO_SIL164:
+ /* sil 164 */
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x30);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x09, 0x00);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0a, 0x90);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0c, 0x89);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x3b);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ case DVO_SIL1178:
+ /* sil 1178 - untested */
+ /*
+ * 0x0f, 0x44
+ * 0x0f, 0x4c
+ * 0x0e, 0x01
+ * 0x0a, 0x80
+ * 0x09, 0x30
+ * 0x0c, 0xc9
+ * 0x0d, 0x70
+ * 0x08, 0x32
+ * 0x08, 0x33
+ */
+ break;
+ default:
+ break;
+ }
+
+}
+
+bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint16_t offset;
+ uint8_t blocks, slave_addr, rev;
+ uint32_t index, id;
+ uint32_t reg, val, and_mask, or_mask;
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+ if (rdev->bios == NULL)
+ return false;
+
+ if (!tmds)
+ return false;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
+ rev = RBIOS8(offset);
+ if (offset) {
+ rev = RBIOS8(offset);
+ if (rev > 1) {
+ blocks = RBIOS8(offset + 3);
+ index = offset + 4;
+ while (blocks > 0) {
+ id = RBIOS16(index);
+ index += 2;
+ switch (id >> 13) {
+ case 0:
+ reg = (id & 0x1fff) * 4;
+ val = RBIOS32(index);
+ index += 4;
+ WREG32(reg, val);
+ break;
+ case 2:
+ reg = (id & 0x1fff) * 4;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32(reg, val);
+ break;
+ case 3:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val);
+ break;
+ case 4:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val * 1000);
+ break;
+ case 6:
+ slave_addr = id & 0xff;
+ slave_addr >>= 1; /* 7 bit addressing */
+ index++;
+ reg = RBIOS8(index);
+ index++;
+ val = RBIOS8(index);
+ index++;
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ slave_addr,
+ reg, val);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ default:
+ DRM_ERROR("Unknown id %d\n", id >> 13);
+ break;
+ }
+ blocks--;
+ }
+ return true;
+ }
+ }
+ } else {
+ offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (offset) {
+ index = offset + 10;
+ id = RBIOS16(index);
+ while (id != 0xffff) {
+ index += 2;
+ switch (id >> 13) {
+ case 0:
+ reg = (id & 0x1fff) * 4;
+ val = RBIOS32(index);
+ WREG32(reg, val);
+ break;
+ case 2:
+ reg = (id & 0x1fff) * 4;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32(reg, val);
+ break;
+ case 4:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val);
+ break;
+ case 5:
+ reg = id & 0x1fff;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32_PLL(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32_PLL(reg, val);
+ break;
+ case 6:
+ reg = id & 0x1fff;
+ val = RBIOS8(index);
+ index += 1;
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ reg, val);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ default:
+ DRM_ERROR("Unknown id %d\n", id >> 13);
+ break;
+ }
+ id = RBIOS16(index);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
{
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index e376be47a4a..7ab3c501b4d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -178,25 +178,12 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *mode = NULL;
- struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
-
- if (native_mode->panel_xres != 0 &&
- native_mode->panel_yres != 0 &&
- native_mode->dotclock != 0) {
- mode = drm_mode_create(dev);
-
- mode->hdisplay = native_mode->panel_xres;
- mode->vdisplay = native_mode->panel_yres;
-
- mode->htotal = mode->hdisplay + native_mode->hblank;
- mode->hsync_start = mode->hdisplay + native_mode->hoverplus;
- mode->hsync_end = mode->hsync_start + native_mode->hsync_width;
- mode->vtotal = mode->vdisplay + native_mode->vblank;
- mode->vsync_start = mode->vdisplay + native_mode->voverplus;
- mode->vsync_end = mode->vsync_start + native_mode->vsync_width;
- mode->clock = native_mode->dotclock;
- mode->flags = 0;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ if (native_mode->hdisplay != 0 &&
+ native_mode->vdisplay != 0 &&
+ native_mode->clock != 0) {
+ mode = drm_mode_duplicate(dev, native_mode);
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -210,7 +197,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *mode = NULL;
- struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
int i;
struct mode_size {
int w;
@@ -236,11 +223,16 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
};
for (i = 0; i < 17; i++) {
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+ if (common_modes[i].w > 1024 ||
+ common_modes[i].h > 768)
+ continue;
+ }
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (common_modes[i].w > native_mode->panel_xres ||
- common_modes[i].h > native_mode->panel_yres ||
- (common_modes[i].w == native_mode->panel_xres &&
- common_modes[i].h == native_mode->panel_yres))
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
continue;
}
if (common_modes[i].w < 320 || common_modes[i].h < 200)
@@ -344,28 +336,23 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
/* Try to get native mode details from EDID if necessary */
- if (!native_mode->dotclock) {
+ if (!native_mode->clock) {
struct drm_display_mode *t, *mode;
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
- if (mode->hdisplay == native_mode->panel_xres &&
- mode->vdisplay == native_mode->panel_yres) {
- native_mode->hblank = mode->htotal - mode->hdisplay;
- native_mode->hoverplus = mode->hsync_start - mode->hdisplay;
- native_mode->hsync_width = mode->hsync_end - mode->hsync_start;
- native_mode->vblank = mode->vtotal - mode->vdisplay;
- native_mode->voverplus = mode->vsync_start - mode->vdisplay;
- native_mode->vsync_width = mode->vsync_end - mode->vsync_start;
- native_mode->dotclock = mode->clock;
+ if (mode->hdisplay == native_mode->hdisplay &&
+ mode->vdisplay == native_mode->vdisplay) {
+ *native_mode = *mode;
+ drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
DRM_INFO("Determined LVDS native mode details from EDID\n");
break;
}
}
}
- if (!native_mode->dotclock) {
+ if (!native_mode->clock) {
DRM_INFO("No LVDS native mode details, disabling RMX\n");
radeon_encoder->rmx_type = RMX_OFF;
}
@@ -410,13 +397,64 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
static int radeon_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+ if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+ return MODE_PANEL;
+
+ if (encoder) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+ /* AVIVO hardware supports downscaling modes larger than the panel
+ * to the panel size, but I'm not sure this is desirable.
+ */
+ if ((mode->hdisplay > native_mode->hdisplay) ||
+ (mode->vdisplay > native_mode->vdisplay))
+ return MODE_PANEL;
+
+ /* if scaling is disabled, block non-native modes */
+ if (radeon_encoder->rmx_type == RMX_OFF) {
+ if ((mode->hdisplay != native_mode->hdisplay) ||
+ (mode->vdisplay != native_mode->vdisplay))
+ return MODE_PANEL;
+ }
+ }
+
return MODE_OK;
}
static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
{
- enum drm_connector_status ret = connector_status_connected;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ enum drm_connector_status ret = connector_status_disconnected;
+
+ if (encoder) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+ /* check if panel is valid */
+ if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+ ret = connector_status_connected;
+
+ }
+
+ /* check for edid as well */
+ if (radeon_connector->edid)
+ ret = connector_status_connected;
+ else {
+ if (radeon_connector->ddc_bus) {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ if (radeon_connector->edid)
+ ret = connector_status_connected;
+ }
+ }
/* check acpi lid status ??? */
+
radeon_connector_update_scratch_regs(connector, ret);
return ret;
}
@@ -427,6 +465,8 @@ static void radeon_connector_destroy(struct drm_connector *connector)
if (radeon_connector->ddc_bus)
radeon_i2c_destroy(radeon_connector->ddc_bus);
+ if (radeon_connector->edid)
+ kfree(radeon_connector->edid);
kfree(radeon_connector->con_priv);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -496,6 +536,8 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
static int radeon_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ /* XXX check mode bandwidth */
+ /* XXX verify against max DAC output frequency */
return MODE_OK;
}
@@ -511,12 +553,36 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
if (!encoder)
ret = connector_status_disconnected;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector, 0);
- if (dret)
- ret = connector_status_connected;
- else {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ if (dret) {
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+
+ if (!radeon_connector->edid) {
+ DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+ drm_get_connector_name(connector));
+ ret = connector_status_connected;
+ } else {
+ radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+ /* some oems have boards with separate digital and analog connectors
+ * with a shared ddc line (often vga + hdmi)
+ */
+ if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ } else
+ ret = connector_status_connected;
+ }
+ } else {
if (radeon_connector->dac_load_detect) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
@@ -570,6 +636,8 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
static int radeon_tv_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
+ return MODE_CLOCK_RANGE;
return MODE_OK;
}
@@ -640,24 +708,66 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
enum drm_connector_status ret = connector_status_disconnected;
bool dret;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (dret) {
- radeon_i2c_do_lock(radeon_connector, 1);
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
- DRM_ERROR("DDC responded but not EDID found for %s\n",
- drm_get_connector_name(connector));
+ DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+ drm_get_connector_name(connector));
} else {
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
- /* if this isn't a digital monitor
- then we need to make sure we don't have any
- TV conflicts */
- ret = connector_status_connected;
+ /* some oems have boards with separate digital and analog connectors
+ * with a shared ddc line (often vga + hdmi)
+ */
+ if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ } else
+ ret = connector_status_connected;
+
+ /* multiple connectors on the same encoder with the same ddc line
+ * This tends to be HDMI and DVI on the same encoder with the
+ * same ddc line. If the edid says HDMI, consider the HDMI port
+ * connected and the DVI port disconnected. If the edid doesn't
+ * say HDMI, vice versa.
+ */
+ if (radeon_connector->shared_ddc && connector_status_connected) {
+ struct drm_device *dev = connector->dev;
+ struct drm_connector *list_connector;
+ struct radeon_connector *list_radeon_connector;
+ list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+ if (connector == list_connector)
+ continue;
+ list_radeon_connector = to_radeon_connector(list_connector);
+ if (radeon_connector->devices == list_radeon_connector->devices) {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ }
+ } else {
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ }
+ }
+ }
+ }
+ }
}
}
@@ -753,9 +863,27 @@ static void radeon_dvi_force(struct drm_connector *connector)
radeon_connector->use_digital = true;
}
+static int radeon_dvi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ /* XXX check mode bandwidth */
+
+ if (radeon_connector->use_digital && (mode->clock > 165000)) {
+ if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
+ (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
+ (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
+ return MODE_OK;
+ else
+ return MODE_CLOCK_HIGH;
+ }
+ return MODE_OK;
+}
+
struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
.get_modes = radeon_dvi_get_modes,
- .mode_valid = radeon_vga_mode_valid,
+ .mode_valid = radeon_dvi_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
@@ -775,13 +903,15 @@ radeon_add_atom_connector(struct drm_device *dev,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb,
- uint32_t igp_lane_info)
+ uint32_t igp_lane_info,
+ uint16_t connector_object_id)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *radeon_dig_connector;
uint32_t subpixel_order = SubPixelNone;
+ bool shared_ddc = false;
int ret;
/* fixme - tv/cv/din */
@@ -795,6 +925,13 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->devices |= supported_device;
return;
}
+ if (radeon_connector->ddc_bus && i2c_bus->valid) {
+ if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
+ sizeof(struct radeon_i2c_bus_rec)) == 0) {
+ radeon_connector->shared_ddc = true;
+ shared_ddc = true;
+ }
+ }
}
radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
@@ -805,6 +942,8 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->connector_id = connector_id;
radeon_connector->devices = supported_device;
+ radeon_connector->shared_ddc = shared_ddc;
+ radeon_connector->connector_object_id = connector_object_id;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -914,6 +1053,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ 1);
}
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -956,7 +1098,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t connector_id,
uint32_t supported_device,
int connector_type,
- struct radeon_i2c_bus_rec *i2c_bus)
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint16_t connector_object_id)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -985,6 +1128,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->connector_id = connector_id;
radeon_connector->devices = supported_device;
+ radeon_connector->connector_object_id = connector_object_id;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1042,9 +1186,19 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (ret)
goto failed;
radeon_connector->dac_load_detect = true;
+ /* RS400,RC410,RS480 chipset seems to report a lot
+ * of false positive on load detect, we haven't yet
+ * found a way to make load detect reliable on those
+ * chipset, thus just disable it for TV.
+ */
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+ radeon_connector->dac_load_detect = false;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ 1);
}
break;
case DRM_MODE_CONNECTOR_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5ab2cf96a26..65590a0f1d9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = p->relocs[i].gobj->driver_private;
- p->relocs[i].lobj.robj = p->relocs[i].robj;
+ p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.rdomain = r->read_domains;
p->relocs[i].lobj.wdomain = r->write_domain;
p->relocs[i].handle = r->handle;
p->relocs[i].flags = r->flags;
INIT_LIST_HEAD(&p->relocs[i].lobj.list);
- radeon_object_list_add_object(&p->relocs[i].lobj,
- &p->validated);
+ radeon_bo_list_add_object(&p->relocs[i].lobj,
+ &p->validated);
}
}
- return radeon_object_list_validate(&p->validated, p->ib->fence);
+ return radeon_bo_list_validate(&p->validated, p->ib->fence);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -190,9 +190,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
unsigned i;
if (error) {
- radeon_object_list_unvalidate(&parser->validated);
+ radeon_bo_list_unvalidate(&parser->validated,
+ parser->ib->fence);
} else {
- radeon_object_list_clean(&parser->validated);
+ radeon_bo_list_unreserve(&parser->validated);
}
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj) {
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index b13c79e38bc..28772a37009 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -109,9 +109,15 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
+ else
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, 0);
+ }
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
- else {
+ } else {
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index df988142e6b..a014ba4cc97 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -208,6 +208,24 @@ bool radeon_card_posted(struct radeon_device *rdev)
}
+bool radeon_boot_test_post_card(struct radeon_device *rdev)
+{
+ if (radeon_card_posted(rdev))
+ return true;
+
+ if (rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ if (rdev->is_atom_bios)
+ atom_asic_init(rdev->mode_info.atom_context);
+ else
+ radeon_combios_asic_init(rdev->ddev);
+ return true;
+ } else {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return false;
+ }
+}
+
int radeon_dummy_page_init(struct radeon_device *rdev)
{
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
@@ -444,20 +462,24 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
return r;
}
-static struct card_info atom_card_info = {
- .dev = NULL,
- .reg_read = cail_reg_read,
- .reg_write = cail_reg_write,
- .mc_read = cail_mc_read,
- .mc_write = cail_mc_write,
- .pll_read = cail_pll_read,
- .pll_write = cail_pll_write,
-};
-
int radeon_atombios_init(struct radeon_device *rdev)
{
- atom_card_info.dev = rdev->ddev;
- rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
+ struct card_info *atom_card_info =
+ kzalloc(sizeof(struct card_info), GFP_KERNEL);
+
+ if (!atom_card_info)
+ return -ENOMEM;
+
+ rdev->mode_info.atom_card_info = atom_card_info;
+ atom_card_info->dev = rdev->ddev;
+ atom_card_info->reg_read = cail_reg_read;
+ atom_card_info->reg_write = cail_reg_write;
+ atom_card_info->mc_read = cail_mc_read;
+ atom_card_info->mc_write = cail_mc_write;
+ atom_card_info->pll_read = cail_pll_read;
+ atom_card_info->pll_write = cail_pll_write;
+
+ rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
return 0;
}
@@ -465,6 +487,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
void radeon_atombios_fini(struct radeon_device *rdev)
{
kfree(rdev->mode_info.atom_context);
+ kfree(rdev->mode_info.atom_card_info);
}
int radeon_combios_init(struct radeon_device *rdev)
@@ -539,6 +562,9 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
+ if (rdev->family >= CHIP_R600)
+ spin_lock_init(&rdev->ih.lock);
+ mutex_init(&rdev->gem.mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
@@ -548,7 +574,7 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
}
- if (radeon_agpmode == -1) {
+ if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
}
@@ -628,6 +654,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *crtc;
+ int r;
if (dev == NULL || rdev == NULL) {
return -ENODEV;
@@ -638,18 +665,22 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
- struct radeon_object *robj;
+ struct radeon_bo *robj;
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
robj = rfb->obj->driver_private;
- if (robj != rdev->fbdev_robj) {
- radeon_object_unpin(robj);
+ if (robj != rdev->fbdev_rbo) {
+ r = radeon_bo_reserve(robj, false);
+ if (unlikely(r == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
}
}
/* evict vram memory */
- radeon_object_evict_vram(rdev);
+ radeon_bo_evict_vram(rdev);
/* wait for gpu to finish processing current batch */
radeon_fence_wait_last(rdev);
@@ -657,7 +688,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
radeon_suspend(rdev);
/* evict remaining vram memory */
- radeon_object_evict_vram(rdev);
+ radeon_bo_evict_vram(rdev);
pci_save_state(dev->pdev);
if (state.event == PM_EVENT_SUSPEND) {
@@ -683,6 +714,8 @@ int radeon_resume_kms(struct drm_device *dev)
return -1;
}
pci_set_master(dev->pdev);
+ /* resume AGP if in use */
+ radeon_agp_resume(rdev);
radeon_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
fb_set_suspend(rdev->fbdev_info, 0);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3655d91993a..62b02372cb0 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -137,9 +137,6 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
if (size != 256) {
return;
}
- if (crtc->fb == NULL) {
- return;
- }
/* userspace palettes are always correct as is */
for (i = 0; i < 256; i++) {
@@ -147,7 +144,6 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
radeon_crtc->lut_g[i] = green[i] >> 6;
radeon_crtc->lut_b[i] = blue[i] >> 6;
}
-
radeon_crtc_load_lut(crtc);
}
@@ -274,10 +270,10 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector->ddc_bus->rec.mask_data_reg,
radeon_connector->ddc_bus->rec.a_clk_reg,
radeon_connector->ddc_bus->rec.a_data_reg,
- radeon_connector->ddc_bus->rec.put_clk_reg,
- radeon_connector->ddc_bus->rec.put_data_reg,
- radeon_connector->ddc_bus->rec.get_clk_reg,
- radeon_connector->ddc_bus->rec.get_data_reg);
+ radeon_connector->ddc_bus->rec.en_clk_reg,
+ radeon_connector->ddc_bus->rec.en_data_reg,
+ radeon_connector->ddc_bus->rec.y_clk_reg,
+ radeon_connector->ddc_bus->rec.y_data_reg);
DRM_INFO(" Encoders:\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
@@ -328,6 +324,7 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
ret = radeon_get_legacy_connector_info_from_table(dev);
}
if (ret) {
+ radeon_setup_encoder_clones(dev);
radeon_print_display_setup(dev);
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
radeon_ddc_dump(drm_connector);
@@ -338,27 +335,19 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
- struct edid *edid;
int ret = 0;
if (!radeon_connector->ddc_bus)
return -1;
if (!radeon_connector->edid) {
- radeon_i2c_do_lock(radeon_connector, 1);
- edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
- } else
- edid = radeon_connector->edid;
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
- if (edid) {
- /* update digital bits here */
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- radeon_connector->use_digital = 1;
- else
- radeon_connector->use_digital = 0;
- drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
- ret = drm_add_edid_modes(&radeon_connector->base, edid);
- kfree(edid);
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
return ret;
}
drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
@@ -373,9 +362,9 @@ static int radeon_ddc_dump(struct drm_connector *connector)
if (!radeon_connector->ddc_bus)
return -1;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (edid) {
kfree(edid);
}
@@ -762,10 +751,18 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
if (first) {
- radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+ /* set scaling */
+ if (radeon_encoder->rmx_type == RMX_OFF)
+ radeon_crtc->rmx_type = RMX_OFF;
+ else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
+ mode->vdisplay < radeon_encoder->native_mode.vdisplay)
+ radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+ else
+ radeon_crtc->rmx_type = RMX_OFF;
+ /* copy native mode */
memcpy(&radeon_crtc->native_mode,
- &radeon_encoder->native_mode,
- sizeof(struct radeon_native_mode));
+ &radeon_encoder->native_mode,
+ sizeof(struct drm_display_mode));
first = false;
} else {
if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
@@ -783,10 +780,10 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if (radeon_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
a.full = rfixed_const(crtc->mode.vdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.panel_xres);
+ b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
radeon_crtc->vsc.full = rfixed_div(a, b);
a.full = rfixed_const(crtc->mode.hdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.panel_yres);
+ b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
radeon_crtc->hsc.full = rfixed_div(a, b);
} else {
radeon_crtc->vsc.full = rfixed_const(1);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 350962e0f34..e13785282a8 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1104,7 +1104,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
# define R600_IT_WAIT_REG_MEM 0x00003C00
# define R600_IT_MEM_WRITE 0x00003D00
# define R600_IT_INDIRECT_BUFFER 0x00003200
-# define R600_IT_CP_INTERRUPT 0x00004000
# define R600_IT_SURFACE_SYNC 0x00004300
# define R600_CB0_DEST_BASE_ENA (1 << 6)
# define R600_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index a65ab1a0dad..291f6dd3683 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -31,6 +31,55 @@
extern int atom_debug;
+/* evil but including atombios.h is much worse */
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+ struct drm_display_mode *mode);
+
+static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *clone_encoder;
+ uint32_t index_mask = 0;
+ int count;
+
+ /* DIG routing gets problematic */
+ if (rdev->family >= CHIP_R600)
+ return index_mask;
+ /* LVDS/TV are too wacky */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return index_mask;
+ /* DVO requires 2x ppll clocks depending on tmds chip */
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
+ return index_mask;
+
+ count = -1;
+ list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+ count++;
+
+ if (clone_encoder == encoder)
+ continue;
+ if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ continue;
+ if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
+ continue;
+ else
+ index_mask |= (1 << count);
+ }
+ return index_mask;
+}
+
+void radeon_setup_encoder_clones(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->possible_clones = radeon_encoder_clones(encoder);
+ }
+}
+
uint32_t
radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{
@@ -159,77 +208,48 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
-/* used for both atom and legacy */
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
-
- if (mode->hdisplay < native_mode->panel_xres ||
- mode->vdisplay < native_mode->panel_yres) {
- if (ASIC_IS_AVIVO(rdev)) {
- adjusted_mode->hdisplay = native_mode->panel_xres;
- adjusted_mode->vdisplay = native_mode->panel_yres;
- adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
- adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
- adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
- adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
- adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
- adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
- /* update crtc values */
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- /* adjust crtc values */
- adjusted_mode->crtc_hdisplay = native_mode->panel_xres;
- adjusted_mode->crtc_vdisplay = native_mode->panel_yres;
- adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
- adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
- adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
- adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
- adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
- adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
- } else {
- adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
- adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
- adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
- adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
- adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
- adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
- /* update crtc values */
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- /* adjust crtc values */
- adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
- adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
- adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
- adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
- adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
- adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
- }
- adjusted_mode->flags = native_mode->flags;
- adjusted_mode->clock = native_mode->dotclock;
- }
-}
-
-
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ /* set the active encoder to connector routing */
+ radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- if (radeon_encoder->rmx_type != RMX_OFF)
- radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
-
/* hw bug */
if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ int mode_id = adjusted_mode->base.id;
+ *adjusted_mode = *native_mode;
+ if (!ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->hdisplay = mode->hdisplay;
+ adjusted_mode->vdisplay = mode->vdisplay;
+ }
+ adjusted_mode->base.id = mode_id;
+ }
+
+ /* get the native mode for TV */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+ if (tv_dac) {
+ if (tv_dac->tv_std == TV_STD_NTSC ||
+ tv_dac->tv_std == TV_STD_NTSC_J ||
+ tv_dac->tv_std == TV_STD_PAL_M)
+ radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
+ else
+ radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
+ }
+ }
+
return true;
}
@@ -404,7 +424,7 @@ union lvds_encoder_control {
LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
};
-static void
+void
atombios_digital_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
@@ -461,7 +481,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
case 1:
args.v1.ucMisc = 0;
args.v1.ucAction = action;
- if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
@@ -486,7 +506,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (dig->coherent_mode)
args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
}
- if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v2.ucTruncate = 0;
@@ -544,7 +564,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
@@ -554,7 +574,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -566,7 +586,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
/*if (radeon_output->MonType == MT_DP)
return ATOM_ENCODER_MODE_DP;
else*/
- if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -734,14 +754,17 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
args.v1.ucAction = action;
-
+ if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ args.v1.usInitInfo = radeon_connector->connector_object_id;
+ } else {
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ else
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ }
if (ASIC_IS_DCE32(rdev)) {
- if (radeon_encoder->pixel_clock > 165000) {
- args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 2) / 100);
- args.v2.acConfig.fDualLinkConnector = 1;
- } else {
- args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 4) / 100);
- }
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
if (dig->dig_block)
args.v2.acConfig.ucEncoderSel = 1;
@@ -766,7 +789,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
}
} else {
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
- args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock) / 10);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -874,16 +896,9 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index = 0;
bool is_dig = false;
- int devices;
memset(&args, 0, sizeof(args));
- /* on DPMS off we have no idea if active device is meaningful */
- if (mode != DRM_MODE_DPMS_ON && !radeon_encoder->active_device)
- devices = radeon_encoder->devices;
- else
- devices = radeon_encoder->active_device;
-
DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
@@ -914,18 +929,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- if (devices & (ATOM_DEVICE_TV_SUPPORT))
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
- else if (devices & (ATOM_DEVICE_CV_SUPPORT))
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- if (devices & (ATOM_DEVICE_TV_SUPPORT))
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
- else if (devices & (ATOM_DEVICE_CV_SUPPORT))
+ else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
@@ -935,12 +950,12 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT);
break;
}
} else {
@@ -1104,8 +1119,11 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
/* set scaler clears this on some chips */
- if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
- WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, AVIVO_D1MODE_INTERLEAVE_EN);
+ if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
+ if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
+ WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+ AVIVO_D1MODE_INTERLEAVE_EN);
+ }
}
static void
@@ -1153,6 +1171,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
break;
@@ -1268,8 +1287,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
radeon_atom_output_lock(encoder, true);
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- radeon_encoder_set_active_device(encoder);
}
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
@@ -1369,7 +1386,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
encoder->possible_crtcs = 0x1;
else
encoder->possible_crtcs = 0x3;
- encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index b38c4c8e2c6..66055b3d866 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev,
struct radeon_framebuffer *rfb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
- struct radeon_object *robj = NULL;
+ struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
int size, aligned_size, ret;
u64 fb_gpuaddr;
@@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev,
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
false, ttm_bo_type_kernel,
- false, &gobj);
+ &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
surface_width, surface_height);
ret = -ENOMEM;
goto out;
}
- robj = gobj->driver_private;
+ rbo = gobj->driver_private;
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
@@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev,
}
#endif
- if (tiling_flags)
- radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch);
+ if (tiling_flags) {
+ ret = radeon_bo_set_tiling_flags(rbo,
+ tiling_flags | RADEON_TILING_SURFACE,
+ mode_cmd.pitch);
+ if (ret)
+ dev_err(rdev->dev, "FB failed to set tiling flags\n");
+ }
mutex_lock(&rdev->ddev->struct_mutex);
fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
if (fb == NULL) {
@@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev,
ret = -ENOMEM;
goto out_unref;
}
- ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ ret = radeon_bo_reserve(rbo, false);
+ if (unlikely(ret != 0))
+ goto out_unref;
+ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ if (ret) {
+ radeon_bo_unreserve(rbo);
+ goto out_unref;
+ }
+ if (fb_tiled)
+ radeon_bo_check_tiling(rbo, 0, 0);
+ ret = radeon_bo_kmap(rbo, &fbptr);
+ radeon_bo_unreserve(rbo);
if (ret) {
- printk(KERN_ERR "failed to pin framebuffer\n");
- ret = -ENOMEM;
goto out_unref;
}
@@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev,
*fb_p = fb;
rfb = to_radeon_framebuffer(fb);
rdev->fbdev_rfb = rfb;
- rdev->fbdev_robj = robj;
+ rdev->fbdev_rbo = rbo;
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
if (info == NULL) {
@@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev,
if (ret)
goto out_unref;
- if (fb_tiled)
- radeon_object_check_tiling(robj, 0, 0);
-
- ret = radeon_object_kmap(robj, &fbptr);
- if (ret) {
- goto out_unref;
- }
-
- memset_io(fbptr, 0, aligned_size);
+ memset_io(fbptr, 0xff, aligned_size);
strcpy(info->fix.id, "radeondrmfb");
@@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev,
return 0;
out_unref:
- if (robj) {
- radeon_object_kunmap(robj);
+ if (rbo) {
+ ret = radeon_bo_reserve(rbo, false);
+ if (likely(ret == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unreserve(rbo);
+ }
}
if (fb && ret) {
list_del(&fb->filp_head);
@@ -321,14 +331,22 @@ int radeon_parse_options(char *options)
int radeonfb_probe(struct drm_device *dev)
{
- return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
+ struct radeon_device *rdev = dev->dev_private;
+ int bpp_sel = 32;
+
+ /* select 8 bpp console on RN50 or 16MB cards */
+ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ bpp_sel = 8;
+
+ return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
}
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
{
struct fb_info *info;
struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
- struct radeon_object *robj;
+ struct radeon_bo *rbo;
+ int r;
if (!fb) {
return -EINVAL;
@@ -336,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
info = fb->fbdev;
if (info) {
struct radeon_fb_device *rfbdev = info->par;
- robj = rfb->obj->driver_private;
+ rbo = rfb->obj->driver_private;
unregister_framebuffer(info);
- radeon_object_kunmap(robj);
- radeon_object_unpin(robj);
+ r = radeon_bo_reserve(rbo, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
drm_fb_helper_free(&rfbdev->helper);
framebuffer_release(info);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 3beb26d7471..2ac31633d72 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -168,37 +168,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
return signaled;
}
-int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy)
-{
- struct radeon_device *rdev;
- int ret = 0;
-
- rdev = fence->rdev;
-
- __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
-
- while (1) {
- if (radeon_fence_signaled(fence))
- break;
-
- if (time_after_eq(jiffies, fence->timeout)) {
- ret = -EBUSY;
- break;
- }
-
- if (lazy)
- schedule_timeout(1);
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- }
- __set_current_state(TASK_RUNNING);
- return ret;
-}
-
-
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
@@ -216,13 +185,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
return 0;
}
- if (rdev->family >= CHIP_R600) {
- r = r600_fence_wait(fence, intr, 0);
- if (r == -ERESTARTSYS)
- return -EBUSY;
- return r;
- }
-
retry:
cur_jiffies = jiffies;
timeout = HZ / 100;
@@ -231,14 +193,18 @@ retry:
}
if (intr) {
+ radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
+ radeon_irq_kms_sw_irq_put(rdev);
if (unlikely(r == -ERESTARTSYS)) {
return -EBUSY;
}
} else {
+ radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
+ radeon_irq_kms_sw_irq_put(rdev);
}
if (unlikely(!radeon_fence_signaled(fence))) {
if (unlikely(r == 0)) {
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a931af065dd..e73d56e83fa 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj == NULL) {
- r = radeon_object_create(rdev, NULL,
- rdev->gart.table_size,
- true,
- RADEON_GEM_DOMAIN_VRAM,
- false, &rdev->gart.table.vram.robj);
+ r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
+ true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->gart.table.vram.robj);
if (r) {
return r;
}
@@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
uint64_t gpu_addr;
int r;
- r = radeon_object_pin(rdev->gart.table.vram.robj,
- RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
- if (r) {
- radeon_object_unref(&rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (unlikely(r != 0))
return r;
- }
- r = radeon_object_kmap(rdev->gart.table.vram.robj,
- (void **)&rdev->gart.table.vram.ptr);
+ r = radeon_bo_pin(rdev->gart.table.vram.robj,
+ RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
- radeon_object_unpin(rdev->gart.table.vram.robj);
- radeon_object_unref(&rdev->gart.table.vram.robj);
- DRM_ERROR("radeon: failed to map gart vram table.\n");
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
return r;
}
+ r = radeon_bo_kmap(rdev->gart.table.vram.robj,
+ (void **)&rdev->gart.table.vram.ptr);
+ if (r)
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
rdev->gart.table_addr = gpu_addr;
- return 0;
+ return r;
}
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
+ int r;
+
if (rdev->gart.table.vram.robj == NULL) {
return;
}
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
- radeon_object_unref(&rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ radeon_bo_unref(&rdev->gart.table.vram.robj);
}
@@ -140,15 +144,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
WARN(1, "trying to unbind memory to unitialized GART !\n");
return;
}
- t = offset / 4096;
- p = t / (PAGE_SIZE / 4096);
+ t = offset / RADEON_GPU_PAGE_SIZE;
+ p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = 0;
- for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, 0);
}
}
@@ -169,8 +173,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
DRM_ERROR("trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
- t = offset / 4096;
- p = t / (PAGE_SIZE / 4096);
+ t = offset / RADEON_GPU_PAGE_SIZE;
+ p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
/* we need to support large memory configurations */
@@ -185,9 +189,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
}
rdev->gart.pages[p] = pagelist[i];
page_base = rdev->gart.pages_addr[p];
- for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, page_base);
- page_base += 4096;
+ page_base += RADEON_GPU_PAGE_SIZE;
}
}
mb();
@@ -200,14 +204,14 @@ int radeon_gart_init(struct radeon_device *rdev)
if (rdev->gart.pages) {
return 0;
}
- /* We need PAGE_SIZE >= 4096 */
- if (PAGE_SIZE < 4096) {
+ /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
+ if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
/* Compute table size */
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
- rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096;
+ rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d880edf254d..e927f998f76 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj)
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
- struct radeon_object *robj = gobj->driver_private;
+ struct radeon_bo *robj = gobj->driver_private;
gobj->driver_private = NULL;
if (robj) {
- radeon_object_unref(&robj);
+ radeon_bo_unref(&robj);
}
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
- int alignment, int initial_domain,
- bool discardable, bool kernel,
- bool interruptible,
- struct drm_gem_object **obj)
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct drm_gem_object **obj)
{
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
*obj = NULL;
@@ -65,8 +64,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
- interruptible, &robj);
+ r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
if (r) {
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
size, initial_domain, alignment);
@@ -83,33 +81,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr)
{
- struct radeon_object *robj = obj->driver_private;
- uint32_t flags;
+ struct radeon_bo *robj = obj->driver_private;
+ int r;
- switch (pin_domain) {
- case RADEON_GEM_DOMAIN_VRAM:
- flags = TTM_PL_FLAG_VRAM;
- break;
- case RADEON_GEM_DOMAIN_GTT:
- flags = TTM_PL_FLAG_TT;
- break;
- default:
- flags = TTM_PL_FLAG_SYSTEM;
- break;
- }
- return radeon_object_pin(robj, flags, gpu_addr);
+ r = radeon_bo_reserve(robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(robj, pin_domain, gpu_addr);
+ radeon_bo_unreserve(robj);
+ return r;
}
void radeon_gem_object_unpin(struct drm_gem_object *obj)
{
- struct radeon_object *robj = obj->driver_private;
- radeon_object_unpin(robj);
+ struct radeon_bo *robj = obj->driver_private;
+ int r;
+
+ r = radeon_bo_reserve(robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
}
int radeon_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
- struct radeon_object *robj;
+ struct radeon_bo *robj;
uint32_t domain;
int r;
@@ -127,11 +125,12 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = radeon_object_wait(robj);
+ r = radeon_bo_wait(robj, NULL, false);
if (r) {
printk(KERN_ERR "Failed to wait for object !\n");
return r;
}
+ radeon_hdp_flush(robj->rdev);
}
return 0;
}
@@ -144,7 +143,7 @@ int radeon_gem_init(struct radeon_device *rdev)
void radeon_gem_fini(struct radeon_device *rdev)
{
- radeon_object_force_delete(rdev);
+ radeon_bo_force_delete(rdev);
}
@@ -158,9 +157,13 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct drm_radeon_gem_info *args = data;
args->vram_size = rdev->mc.real_vram_size;
- /* FIXME: report somethings that makes sense */
- args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024);
- args->gart_size = rdev->mc.gtt_size;
+ args->vram_visible = rdev->mc.real_vram_size;
+ if (rdev->stollen_vga_memory)
+ args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
+ if (rdev->fbdev_rbo)
+ args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+ args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
+ RADEON_IB_POOL_SIZE*64*1024;
return 0;
}
@@ -192,8 +195,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
- args->initial_domain, false,
- false, true, &gobj);
+ args->initial_domain, false,
+ false, &gobj);
if (r) {
return r;
}
@@ -218,7 +221,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* just validate the BO into a certain domain */
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
/* for now if someone requests domain CPU -
@@ -244,19 +247,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_mmap *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
- int r;
+ struct radeon_bo *robj;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_mmap(robj, &args->addr_ptr);
+ args->addr_ptr = radeon_bo_mmap_offset(robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
- return r;
+ return 0;
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -264,7 +266,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
uint32_t cur_placement;
@@ -273,7 +275,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_busy_domain(robj, &cur_placement);
+ r = radeon_bo_wait(robj, &cur_placement, true);
switch (cur_placement) {
case TTM_PL_VRAM:
args->domain = RADEON_GEM_DOMAIN_VRAM;
@@ -297,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
@@ -305,10 +307,11 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_wait(robj);
+ r = radeon_bo_wait(robj, NULL, false);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
+ radeon_hdp_flush(robj->rdev);
return r;
}
@@ -317,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_set_tiling *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r = 0;
DRM_DEBUG("%d \n", args->handle);
@@ -325,7 +328,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL)
return -EINVAL;
robj = gobj->driver_private;
- radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch);
+ r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
@@ -337,16 +340,19 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_get_tiling *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *rbo;
int r = 0;
DRM_DEBUG("\n");
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -EINVAL;
- robj = gobj->driver_private;
- radeon_object_get_tiling_flags(robj, &args->tiling_flags,
- &args->pitch);
+ rbo = gobj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
+ radeon_bo_unreserve(rbo);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index dd438d32e5c..6c645fb4dad 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -59,11 +59,11 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
}
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
+void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{
- struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
- struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
/* RV410 appears to have a bug where the hw i2c in reset
* holds the i2c port in a bad state - switch hw i2c away before
@@ -78,16 +78,16 @@ void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_stat
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
}
}
- if (lock_state) {
- temp = RREG32(rec->a_clk_reg);
- temp &= ~(rec->a_clk_mask);
- WREG32(rec->a_clk_reg, temp);
-
- temp = RREG32(rec->a_data_reg);
- temp &= ~(rec->a_data_mask);
- WREG32(rec->a_data_reg, temp);
- }
+ /* clear the output pin values */
+ temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+ WREG32(rec->a_clk_reg, temp);
+
+ temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
+ WREG32(rec->a_data_reg, temp);
+
+
+ /* mask the gpio pins for software use */
temp = RREG32(rec->mask_clk_reg);
if (lock_state)
temp |= rec->mask_clk_mask;
@@ -112,8 +112,9 @@ static int get_clock(void *i2c_priv)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->get_clk_reg);
- val &= rec->get_clk_mask;
+ /* read the value off the pin */
+ val = RREG32(rec->y_clk_reg);
+ val &= rec->y_clk_mask;
return (val != 0);
}
@@ -126,8 +127,10 @@ static int get_data(void *i2c_priv)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->get_data_reg);
- val &= rec->get_data_mask;
+ /* read the value off the pin */
+ val = RREG32(rec->y_data_reg);
+ val &= rec->y_data_mask;
+
return (val != 0);
}
@@ -138,9 +141,10 @@ static void set_clock(void *i2c_priv, int clock)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
- val |= clock ? 0 : rec->put_clk_mask;
- WREG32(rec->put_clk_reg, val);
+ /* set pin direction */
+ val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+ val |= clock ? 0 : rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, val);
}
static void set_data(void *i2c_priv, int data)
@@ -150,14 +154,15 @@ static void set_data(void *i2c_priv, int data)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
- val |= data ? 0 : rec->put_data_mask;
- WREG32(rec->put_data_reg, val);
+ /* set pin direction */
+ val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+ val |= data ? 0 : rec->en_data_mask;
+ WREG32(rec->en_data_reg, val);
}
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
- struct radeon_i2c_bus_rec *rec,
- const char *name)
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
{
struct radeon_i2c_chan *i2c;
int ret;
@@ -207,3 +212,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
{
return NULL;
}
+
+void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val)
+{
+ u8 out_buf[2];
+ u8 in_buf[2];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
+ *val = in_buf[0];
+ DRM_DEBUG("val = 0x%02x\n", *val);
+ } else {
+ DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
+ addr, *val);
+ }
+}
+
+void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 val)
+{
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = val;
+
+ if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
+ DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
+ addr, val);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 8e0a8759e42..26789970c5c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -87,11 +87,26 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_SINGLE_CRTC)
num_crtc = 1;
-
+ spin_lock_init(&rdev->irq.sw_lock);
r = drm_vblank_init(rdev->ddev, num_crtc);
if (r) {
return r;
}
+ /* enable msi */
+ rdev->msi_enabled = 0;
+ /* MSIs don't seem to work on my rs780;
+ * not sure about rs880 or other rs780s.
+ * Needs more investigation.
+ */
+ if ((rdev->family >= CHIP_RV380) &&
+ (rdev->family != CHIP_RS780) &&
+ (rdev->family != CHIP_RS880)) {
+ int ret = pci_enable_msi(rdev->pdev);
+ if (!ret) {
+ rdev->msi_enabled = 1;
+ DRM_INFO("radeon: using MSI.\n");
+ }
+ }
drm_irq_install(rdev->ddev);
rdev->irq.installed = true;
DRM_INFO("radeon: irq initialized.\n");
@@ -103,5 +118,33 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
if (rdev->irq.installed) {
rdev->irq.installed = false;
drm_irq_uninstall(rdev->ddev);
+ if (rdev->msi_enabled)
+ pci_disable_msi(rdev->pdev);
+ }
+}
+
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+ if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
+ rdev->irq.sw_int = true;
+ radeon_irq_set(rdev);
}
+ spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
+
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+ BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
+ if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
+ rdev->irq.sw_int = false;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 36410f85d70..b82ede98e15 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -30,6 +30,18 @@
#include "radeon.h"
#include "atom.h"
+static void radeon_overscan_setup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
+ WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
+ WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
+}
+
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -48,7 +60,7 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active;
u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp;
u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp;
- struct radeon_native_mode *native_mode = &radeon_crtc->native_mode;
+ struct drm_display_mode *native_mode = &radeon_crtc->native_mode;
fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
(RADEON_VERT_STRETCH_RESERVED |
@@ -95,19 +107,19 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
fp_horz_vert_active = 0;
- if (native_mode->panel_xres == 0 ||
- native_mode->panel_yres == 0) {
+ if (native_mode->hdisplay == 0 ||
+ native_mode->vdisplay == 0) {
hscale = false;
vscale = false;
} else {
- if (xres > native_mode->panel_xres)
- xres = native_mode->panel_xres;
- if (yres > native_mode->panel_yres)
- yres = native_mode->panel_yres;
+ if (xres > native_mode->hdisplay)
+ xres = native_mode->hdisplay;
+ if (yres > native_mode->vdisplay)
+ yres = native_mode->vdisplay;
- if (xres == native_mode->panel_xres)
+ if (xres == native_mode->hdisplay)
hscale = false;
- if (yres == native_mode->panel_yres)
+ if (yres == native_mode->vdisplay)
vscale = false;
}
@@ -119,11 +131,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
else {
inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
- / native_mode->panel_xres + 1;
+ / native_mode->hdisplay + 1;
fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
RADEON_HORZ_STRETCH_BLEND |
RADEON_HORZ_STRETCH_ENABLE |
- ((native_mode->panel_xres/8-1) << 16));
+ ((native_mode->hdisplay/8-1) << 16));
}
if (!vscale)
@@ -131,11 +143,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
else {
inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
- / native_mode->panel_yres + 1;
+ / native_mode->vdisplay + 1;
fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
RADEON_VERT_STRETCH_ENABLE |
RADEON_VERT_STRETCH_BLEND |
- ((native_mode->panel_yres-1) << 12));
+ ((native_mode->vdisplay-1) << 12));
}
break;
case RMX_CENTER:
@@ -175,8 +187,8 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
? RADEON_CRTC_V_SYNC_POL
: 0)));
- fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) |
- (((native_mode->panel_xres / 8) & 0x1ff) << 16));
+ fp_horz_vert_active = (((native_mode->vdisplay) & 0xfff) |
+ (((native_mode->hdisplay / 8) & 0x1ff) << 16));
break;
case RMX_OFF:
default:
@@ -292,8 +304,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
uint32_t mask;
if (radeon_crtc->crtc_id)
- mask = (RADEON_CRTC2_EN |
- RADEON_CRTC2_DISP_DIS |
+ mask = (RADEON_CRTC2_DISP_DIS |
RADEON_CRTC2_VSYNC_DIS |
RADEON_CRTC2_HSYNC_DIS |
RADEON_CRTC2_DISP_REQ_EN_B);
@@ -305,7 +316,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
if (radeon_crtc->crtc_id)
- WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
@@ -319,7 +330,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_OFF:
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id)
- WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
@@ -400,14 +411,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
uint64_t base;
uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
uint32_t crtc_pitch, pitch_pixels;
uint32_t tiling_flags;
int format;
uint32_t gen_cntl_reg, gen_cntl_val;
+ int r;
DRM_DEBUG("\n");
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
radeon_fb = to_radeon_framebuffer(crtc->fb);
@@ -431,10 +449,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return false;
}
+ /* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
return -EINVAL;
}
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+ if (tiling_flags & RADEON_TILING_MICRO)
+ DRM_ERROR("trying to scanout microtiled buffer\n");
+
/* if scanout was in GTT this really wouldn't work */
/* crtc offset is from display base addr not FB location */
radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
@@ -449,10 +479,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
(crtc->fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
- radeon_object_get_tiling_flags(obj->driver_private,
- &tiling_flags, NULL);
- if (tiling_flags & RADEON_TILING_MICRO)
- DRM_ERROR("trying to scanout microtiled buffer\n");
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev))
@@ -530,8 +556,17 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
- radeon_gem_object_unpin(radeon_fb->obj);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
}
+
+ /* Bytes per pixel may have changed */
+ radeon_bandwidth_update(rdev);
+
return 0;
}
@@ -638,12 +673,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
uint32_t crtc2_gen_cntl;
uint32_t disp2_merge_cntl;
- /* check to see if TV DAC is enabled for another crtc and keep it enabled */
- if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON)
- crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
- else
- crtc2_gen_cntl = 0;
-
+ /* if TV DAC is enabled for another crtc and keep it enabled */
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
crtc2_gen_cntl |= ((format << 8)
| RADEON_CRTC2_VSYNC_DIS
| RADEON_CRTC2_HSYNC_DIS
@@ -664,12 +695,16 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+
+ WREG32(RADEON_FP_H2_SYNC_STRT_WID, crtc_h_sync_strt_wid);
+ WREG32(RADEON_FP_V2_SYNC_STRT_WID, crtc_v_sync_strt_wid);
} else {
uint32_t crtc_gen_cntl;
uint32_t crtc_ext_cntl;
uint32_t disp_merge_cntl;
- crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN
+ crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
+ crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
| (format << 8)
| RADEON_CRTC_DISP_REQ_EN_B
| ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -772,15 +807,17 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
- if (lvds) {
- if (lvds->use_bios_dividers) {
- pll_ref_div = lvds->panel_ref_divider;
- pll_fb_post_div = (lvds->panel_fb_divider |
- (lvds->panel_post_divider << 16));
- htotal_cntl = 0;
- use_bios_divs = true;
+ if (!rdev->is_atom_bios) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+ if (lvds) {
+ if (lvds->use_bios_dividers) {
+ pll_ref_div = lvds->panel_ref_divider;
+ pll_fb_post_div = (lvds->panel_fb_divider |
+ (lvds->panel_post_divider << 16));
+ htotal_cntl = 0;
+ use_bios_divs = true;
+ }
}
}
pll_flags |= RADEON_PLL_USE_REF_DIV;
@@ -1015,14 +1052,12 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
/* TODO TV */
radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_set_crtc_timing(crtc, adjusted_mode);
radeon_set_pll(crtc, adjusted_mode);
- radeon_bandwidth_update(rdev);
+ radeon_overscan_setup(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0) {
radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
} else {
@@ -1038,12 +1073,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
- radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtci;
+
+ /*
+ * The hardware wedges sometimes if you reconfigure one CRTC
+ * whilst another is running (see fdo bug #24611).
+ */
+ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
+ radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
}
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
- radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtci;
+
+ /*
+ * Reenable the CRTCs that should be running.
+ */
+ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
+ if (crtci->enabled)
+ radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
+ }
}
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 6ceb958fd19..df00515e81f 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -107,8 +107,6 @@ static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
else
radeon_combios_output_lock(encoder, true);
radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- radeon_encoder_set_active_device(encoder);
}
static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
@@ -138,7 +136,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
- if ((!rdev->is_atom_bios)) {
+ if (rdev->is_atom_bios) {
+ /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
+ * need to call that on resume to set up the reg properly.
+ */
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ } else {
struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
if (lvds) {
DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
@@ -149,8 +154,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
(lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
} else
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
- } else
- lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ }
lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
lvds_gen_cntl &= ~(RADEON_LVDS_ON |
RADEON_LVDS_BLON |
@@ -186,23 +190,32 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
-static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ /* set the active encoder to connector routing */
+ radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- if (radeon_encoder->rmx_type != RMX_OFF)
- radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ int mode_id = adjusted_mode->base.id;
+ *adjusted_mode = *native_mode;
+ adjusted_mode->hdisplay = mode->hdisplay;
+ adjusted_mode->vdisplay = mode->vdisplay;
+ adjusted_mode->base.id = mode_id;
+ }
return true;
}
static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.dpms = radeon_legacy_lvds_dpms,
- .mode_fixup = radeon_legacy_lvds_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_lvds_prepare,
.mode_set = radeon_legacy_lvds_mode_set,
.commit = radeon_legacy_lvds_commit,
@@ -214,16 +227,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -272,7 +275,6 @@ static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
else
radeon_combios_output_lock(encoder, true);
radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
- radeon_encoder_set_active_device(encoder);
}
static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
@@ -410,7 +412,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
.dpms = radeon_legacy_primary_dac_dpms,
- .mode_fixup = radeon_legacy_primary_dac_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_primary_dac_prepare,
.mode_set = radeon_legacy_primary_dac_mode_set,
.commit = radeon_legacy_primary_dac_commit,
@@ -423,16 +425,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -468,7 +460,6 @@ static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
else
radeon_combios_output_lock(encoder, true);
radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
- radeon_encoder_set_active_device(encoder);
}
static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
@@ -543,6 +534,14 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+ fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
+ RADEON_FP_DFP_SYNC_SEL |
+ RADEON_FP_CRT_SYNC_SEL |
+ RADEON_FP_CRTC_LOCK_8DOT |
+ RADEON_FP_USE_SHADOW_EN |
+ RADEON_FP_CRTC_USE_SHADOW_VEND |
+ RADEON_FP_CRT_SYNC_ALT);
+
if (1) /* FIXME rgbBits == 8 */
fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
else
@@ -556,7 +555,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
else
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
} else
- fp_gen_cntl |= RADEON_FP_SEL_CRTC1;
+ fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
} else {
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
@@ -577,7 +576,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
.dpms = radeon_legacy_tmds_int_dpms,
- .mode_fixup = radeon_legacy_tmds_int_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tmds_int_prepare,
.mode_set = radeon_legacy_tmds_int_mode_set,
.commit = radeon_legacy_tmds_int_commit,
@@ -589,16 +588,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -636,7 +625,6 @@ static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
else
radeon_combios_output_lock(encoder, true);
radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
- radeon_encoder_set_active_device(encoder);
}
static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
@@ -690,6 +678,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
/*if (mode->clock > 165000)
fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
}
+ if (!radeon_combios_external_tmds_setup(encoder))
+ radeon_external_tmds_setup(encoder);
}
if (radeon_crtc->crtc_id == 0) {
@@ -717,9 +707,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
+static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+ if (tmds) {
+ if (tmds->i2c_bus)
+ radeon_i2c_destroy(tmds->i2c_bus);
+ }
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
+
static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
.dpms = radeon_legacy_tmds_ext_dpms,
- .mode_fixup = radeon_legacy_tmds_ext_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tmds_ext_prepare,
.mode_set = radeon_legacy_tmds_ext_mode_set,
.commit = radeon_legacy_tmds_ext_commit,
@@ -728,19 +731,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs
static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
- .destroy = radeon_enc_destroy,
+ .destroy = radeon_ext_tmds_enc_destroy,
};
-static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -839,7 +832,6 @@ static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
else
radeon_combios_output_lock(encoder, true);
radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
- radeon_encoder_set_active_device(encoder);
}
static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
@@ -1258,7 +1250,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
.dpms = radeon_legacy_tv_dac_dpms,
- .mode_fixup = radeon_legacy_tv_dac_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tv_dac_prepare,
.mode_set = radeon_legacy_tv_dac_mode_set,
.commit = radeon_legacy_tv_dac_commit,
@@ -1295,6 +1287,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
return tmds;
}
+static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_ext_tmds *tmds = NULL;
+ bool ret;
+
+ if (rdev->is_atom_bios)
+ return NULL;
+
+ tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
+
+ if (!tmds)
+ return NULL;
+
+ ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
+
+ if (ret == false)
+ radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
+
+ return tmds;
+}
+
void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
{
@@ -1322,7 +1337,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
encoder->possible_crtcs = 0x1;
else
encoder->possible_crtcs = 0x3;
- encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL;
@@ -1366,7 +1380,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
if (!rdev->is_atom_bios)
- radeon_combios_get_ext_tmds_info(radeon_encoder);
+ radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e61226817cc..135693d5437 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -89,24 +89,38 @@ enum radeon_tv_std {
TV_STD_PAL_CN,
};
+/* radeon gpio-based i2c
+ * 1. "mask" reg and bits
+ * grabs the gpio pins for software use
+ * 0=not held 1=held
+ * 2. "a" reg and bits
+ * output pin value
+ * 0=low 1=high
+ * 3. "en" reg and bits
+ * sets the pin direction
+ * 0=input 1=output
+ * 4. "y" reg and bits
+ * input pin value
+ * 0=low 1=high
+ */
struct radeon_i2c_bus_rec {
bool valid;
uint32_t mask_clk_reg;
uint32_t mask_data_reg;
uint32_t a_clk_reg;
uint32_t a_data_reg;
- uint32_t put_clk_reg;
- uint32_t put_data_reg;
- uint32_t get_clk_reg;
- uint32_t get_data_reg;
+ uint32_t en_clk_reg;
+ uint32_t en_data_reg;
+ uint32_t y_clk_reg;
+ uint32_t y_data_reg;
uint32_t mask_clk_mask;
uint32_t mask_data_mask;
- uint32_t put_clk_mask;
- uint32_t put_data_mask;
- uint32_t get_clk_mask;
- uint32_t get_data_mask;
uint32_t a_clk_mask;
uint32_t a_data_mask;
+ uint32_t en_clk_mask;
+ uint32_t en_data_mask;
+ uint32_t y_clk_mask;
+ uint32_t y_data_mask;
};
struct radeon_tmds_pll {
@@ -170,8 +184,14 @@ enum radeon_connector_table {
CT_EMAC,
};
+enum radeon_dvo_chip {
+ DVO_SIL164,
+ DVO_SIL1178,
+};
+
struct radeon_mode_info {
struct atom_context *atom_context;
+ struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[2];
@@ -186,17 +206,6 @@ struct radeon_mode_info {
};
-struct radeon_native_mode {
- /* preferred mode */
- uint32_t panel_xres, panel_yres;
- uint32_t hoverplus, hsync_width;
- uint32_t hblank;
- uint32_t voverplus, vsync_width;
- uint32_t vblank;
- uint32_t dotclock;
- uint32_t flags;
-};
-
#define MAX_H_CODE_TIMING_LEN 32
#define MAX_V_CODE_TIMING_LEN 32
@@ -228,7 +237,7 @@ struct radeon_crtc {
enum radeon_rmx_type rmx_type;
fixed20_12 vsc;
fixed20_12 hsc;
- struct radeon_native_mode native_mode;
+ struct drm_display_mode native_mode;
};
struct radeon_encoder_primary_dac {
@@ -248,7 +257,7 @@ struct radeon_encoder_lvds {
bool use_bios_dividers;
uint32_t lvds_gen_cntl;
/* panel mode */
- struct radeon_native_mode native_mode;
+ struct drm_display_mode native_mode;
};
struct radeon_encoder_tv_dac {
@@ -271,6 +280,23 @@ struct radeon_encoder_int_tmds {
struct radeon_tmds_pll tmds_pll[4];
};
+struct radeon_encoder_ext_tmds {
+ /* tmds over dvo */
+ struct radeon_i2c_chan *i2c_bus;
+ uint8_t slave_addr;
+ enum radeon_dvo_chip dvo_chip;
+};
+
+/* spread spectrum */
+struct radeon_atom_ss {
+ uint16_t percentage;
+ uint8_t type;
+ uint8_t step;
+ uint8_t delay;
+ uint8_t range;
+ uint8_t refdiv;
+};
+
struct radeon_encoder_atom_dig {
/* atom dig */
bool coherent_mode;
@@ -278,8 +304,9 @@ struct radeon_encoder_atom_dig {
/* atom lvds */
uint32_t lvds_misc;
uint16_t panel_pwr_delay;
+ struct radeon_atom_ss *ss;
/* panel mode */
- struct radeon_native_mode native_mode;
+ struct drm_display_mode native_mode;
};
struct radeon_encoder_atom_dac {
@@ -294,7 +321,7 @@ struct radeon_encoder {
uint32_t flags;
uint32_t pixel_clock;
enum radeon_rmx_type rmx_type;
- struct radeon_native_mode native_mode;
+ struct drm_display_mode native_mode;
void *enc_priv;
};
@@ -308,12 +335,15 @@ struct radeon_connector {
uint32_t connector_id;
uint32_t devices;
struct radeon_i2c_chan *ddc_bus;
+ /* some systems have a an hdmi and vga port with a shared ddc line */
+ bool shared_ddc;
bool use_digital;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
void *con_priv;
bool dac_load_detect;
+ uint16_t connector_object_id;
};
struct radeon_framebuffer {
@@ -325,6 +355,14 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val);
+extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
+ u8 slave_addr,
+ u8 addr,
+ u8 val);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
@@ -339,12 +377,15 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *post_div_p,
int flags);
+extern void radeon_setup_encoder_clones(struct drm_device *dev);
+
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
+extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
@@ -374,12 +415,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig *
radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
-bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds);
extern struct radeon_encoder_primary_dac *
radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_tv_dac *
@@ -391,6 +436,8 @@ extern struct radeon_encoder_tv_dac *
radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_primary_dac *
radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
@@ -422,16 +469,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
void radeon_legacy_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
+extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
void radeon_get_clock_info(struct drm_device *dev);
extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1f056dadc5c..bec49438482 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,74 +34,32 @@
#include "radeon_drm.h"
#include "radeon.h"
-struct radeon_object {
- struct ttm_buffer_object tobj;
- struct list_head list;
- struct radeon_device *rdev;
- struct drm_gem_object *gobj;
- struct ttm_bo_kmap_obj kmap;
- unsigned pin_count;
- uint64_t gpu_addr;
- void *kptr;
- bool is_iomem;
- uint32_t tiling_flags;
- uint32_t pitch;
- int surface_reg;
-};
int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
/*
* To exclude mutual BO access we rely on bo_reserve exclusion, as all
* function are calling it.
*/
-static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
- return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
-}
+ struct radeon_bo *bo;
-static void radeon_object_unreserve(struct radeon_object *robj)
-{
- ttm_bo_unreserve(&robj->tobj);
+ bo = container_of(tbo, struct radeon_bo, tbo);
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&bo->rdev->gem.mutex);
+ radeon_bo_clear_surface_reg(bo);
+ kfree(bo);
}
-static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
+static inline u32 radeon_ttm_flags_from_domain(u32 domain)
{
- struct radeon_object *robj;
-
- robj = container_of(tobj, struct radeon_object, tobj);
- list_del_init(&robj->list);
- radeon_object_clear_surface_reg(robj);
- kfree(robj);
-}
-
-static inline void radeon_object_gpu_addr(struct radeon_object *robj)
-{
- /* Default gpu address */
- robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
- if (robj->tobj.mem.mm_node == NULL) {
- return;
- }
- robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
- switch (robj->tobj.mem.mem_type) {
- case TTM_PL_VRAM:
- robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
- break;
- case TTM_PL_TT:
- robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
- robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
- return;
- }
-}
+ u32 flags = 0;
-static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
-{
- uint32_t flags = 0;
if (domain & RADEON_GEM_DOMAIN_VRAM) {
flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
}
@@ -117,17 +75,13 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
return flags;
}
-int radeon_object_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj,
- unsigned long size,
- bool kernel,
- uint32_t domain,
- bool interruptible,
- struct radeon_object **robj_ptr)
+int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+ unsigned long size, bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr)
{
- struct radeon_object *robj;
+ struct radeon_bo *bo;
enum ttm_bo_type type;
- uint32_t flags;
+ u32 flags;
int r;
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -138,206 +92,140 @@ int radeon_object_create(struct radeon_device *rdev,
} else {
type = ttm_bo_type_device;
}
- *robj_ptr = NULL;
- robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
- if (robj == NULL) {
+ *bo_ptr = NULL;
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ if (bo == NULL)
return -ENOMEM;
- }
- robj->rdev = rdev;
- robj->gobj = gobj;
- robj->surface_reg = -1;
- INIT_LIST_HEAD(&robj->list);
-
- flags = radeon_object_flags_from_domain(domain);
- r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
- 0, 0, false, NULL, size,
- &radeon_ttm_object_object_destroy);
+ bo->rdev = rdev;
+ bo->gobj = gobj;
+ bo->surface_reg = -1;
+ INIT_LIST_HEAD(&bo->list);
+
+ flags = radeon_ttm_flags_from_domain(domain);
+retry:
+ r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
+ flags, 0, 0, true, NULL, size,
+ &radeon_ttm_bo_destroy);
if (unlikely(r != 0)) {
+ if (r == -ERESTART)
+ goto retry;
/* ttm call radeon_ttm_object_object_destroy if error happen */
- DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
- size, flags, 0);
+ dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n",
+ size, flags);
return r;
}
- *robj_ptr = robj;
+ *bo_ptr = bo;
if (gobj) {
- list_add_tail(&robj->list, &rdev->gem.objects);
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_add_tail(&bo->list, &rdev->gem.objects);
+ mutex_unlock(&bo->rdev->gem.mutex);
}
return 0;
}
-int radeon_object_kmap(struct radeon_object *robj, void **ptr)
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
{
+ bool is_iomem;
int r;
- spin_lock(&robj->tobj.lock);
- if (robj->kptr) {
+ if (bo->kptr) {
if (ptr) {
- *ptr = robj->kptr;
+ *ptr = bo->kptr;
}
- spin_unlock(&robj->tobj.lock);
return 0;
}
- spin_unlock(&robj->tobj.lock);
- r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
if (r) {
return r;
}
- spin_lock(&robj->tobj.lock);
- robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
- spin_unlock(&robj->tobj.lock);
+ bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
if (ptr) {
- *ptr = robj->kptr;
+ *ptr = bo->kptr;
}
- radeon_object_check_tiling(robj, 0, 0);
+ radeon_bo_check_tiling(bo, 0, 0);
return 0;
}
-void radeon_object_kunmap(struct radeon_object *robj)
+void radeon_bo_kunmap(struct radeon_bo *bo)
{
- spin_lock(&robj->tobj.lock);
- if (robj->kptr == NULL) {
- spin_unlock(&robj->tobj.lock);
+ if (bo->kptr == NULL)
return;
- }
- robj->kptr = NULL;
- spin_unlock(&robj->tobj.lock);
- radeon_object_check_tiling(robj, 0, 0);
- ttm_bo_kunmap(&robj->kmap);
+ bo->kptr = NULL;
+ radeon_bo_check_tiling(bo, 0, 0);
+ ttm_bo_kunmap(&bo->kmap);
}
-void radeon_object_unref(struct radeon_object **robj)
+void radeon_bo_unref(struct radeon_bo **bo)
{
- struct ttm_buffer_object *tobj;
+ struct ttm_buffer_object *tbo;
- if ((*robj) == NULL) {
+ if ((*bo) == NULL)
return;
- }
- tobj = &((*robj)->tobj);
- ttm_bo_unref(&tobj);
- if (tobj == NULL) {
- *robj = NULL;
- }
-}
-
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
-{
- *offset = robj->tobj.addr_space_offset;
- return 0;
+ tbo = &((*bo)->tbo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
}
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
- uint64_t *gpu_addr)
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
- uint32_t flags;
- uint32_t tmp;
+ u32 flags;
+ u32 tmp;
int r;
- flags = radeon_object_flags_from_domain(domain);
- spin_lock(&robj->tobj.lock);
- if (robj->pin_count) {
- robj->pin_count++;
- if (gpu_addr != NULL) {
- *gpu_addr = robj->gpu_addr;
- }
- spin_unlock(&robj->tobj.lock);
+ flags = radeon_ttm_flags_from_domain(domain);
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = radeon_bo_gpu_offset(bo);
return 0;
}
- spin_unlock(&robj->tobj.lock);
- r = radeon_object_reserve(robj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
- return r;
- }
- tmp = robj->tobj.mem.placement;
+ tmp = bo->tbo.mem.placement;
ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
- robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- false, false);
- radeon_object_gpu_addr(robj);
- if (gpu_addr != NULL) {
- *gpu_addr = robj->gpu_addr;
+ bo->tbo.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT |
+ TTM_PL_MASK_CACHING;
+retry:
+ r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
+ true, false);
+ if (likely(r == 0)) {
+ bo->pin_count = 1;
+ if (gpu_addr != NULL)
+ *gpu_addr = radeon_bo_gpu_offset(bo);
}
- robj->pin_count = 1;
if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to pin object.\n");
+ if (r == -ERESTART)
+ goto retry;
+ dev_err(bo->rdev->dev, "%p pin failed\n", bo);
}
- radeon_object_unreserve(robj);
return r;
}
-void radeon_object_unpin(struct radeon_object *robj)
+int radeon_bo_unpin(struct radeon_bo *bo)
{
- uint32_t flags;
int r;
- spin_lock(&robj->tobj.lock);
- if (!robj->pin_count) {
- spin_unlock(&robj->tobj.lock);
- printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
- return;
- }
- robj->pin_count--;
- if (robj->pin_count) {
- spin_unlock(&robj->tobj.lock);
- return;
- }
- spin_unlock(&robj->tobj.lock);
- r = radeon_object_reserve(robj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
- return;
- }
- flags = robj->tobj.mem.placement;
- robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- false, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to unpin buffer.\n");
- }
- radeon_object_unreserve(robj);
-}
-
-int radeon_object_wait(struct radeon_object *robj)
-{
- int r = 0;
-
- /* FIXME: should use block reservation instead */
- r = radeon_object_reserve(robj, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for waiting.\n");
- return r;
- }
- spin_lock(&robj->tobj.lock);
- if (robj->tobj.sync_obj) {
- r = ttm_bo_wait(&robj->tobj, true, true, false);
+ if (!bo->pin_count) {
+ dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
+ return 0;
}
- spin_unlock(&robj->tobj.lock);
- radeon_object_unreserve(robj);
- return r;
-}
-
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
-{
- int r = 0;
-
- r = radeon_object_reserve(robj, true);
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+ bo->tbo.proposed_placement = bo->tbo.mem.placement &
+ ~TTM_PL_FLAG_NO_EVICT;
+retry:
+ r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement,
+ true, false);
if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for waiting.\n");
+ if (r == -ERESTART)
+ goto retry;
+ dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
}
- spin_lock(&robj->tobj.lock);
- *cur_placement = robj->tobj.mem.mem_type;
- if (robj->tobj.sync_obj) {
- r = ttm_bo_wait(&robj->tobj, true, true, true);
- }
- spin_unlock(&robj->tobj.lock);
- radeon_object_unreserve(robj);
- return r;
+ return 0;
}
-int radeon_object_evict_vram(struct radeon_device *rdev)
+int radeon_bo_evict_vram(struct radeon_device *rdev)
{
if (rdev->flags & RADEON_IS_IGP) {
/* Useless to evict on IGP chips */
@@ -346,30 +234,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev)
return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}
-void radeon_object_force_delete(struct radeon_device *rdev)
+void radeon_bo_force_delete(struct radeon_device *rdev)
{
- struct radeon_object *robj, *n;
+ struct radeon_bo *bo, *n;
struct drm_gem_object *gobj;
if (list_empty(&rdev->gem.objects)) {
return;
}
- DRM_ERROR("Userspace still has active objects !\n");
- list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
+ dev_err(rdev->dev, "Userspace still has active objects !\n");
+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
mutex_lock(&rdev->ddev->struct_mutex);
- gobj = robj->gobj;
- DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
- gobj, robj, (unsigned long)gobj->size,
- *((unsigned long *)&gobj->refcount));
- list_del_init(&robj->list);
- radeon_object_unref(&robj);
+ gobj = bo->gobj;
+ dev_err(rdev->dev, "%p %p %lu %lu force free\n",
+ gobj, bo, (unsigned long)gobj->size,
+ *((unsigned long *)&gobj->refcount));
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&bo->rdev->gem.mutex);
+ radeon_bo_unref(&bo);
gobj->driver_private = NULL;
drm_gem_object_unreference(gobj);
mutex_unlock(&rdev->ddev->struct_mutex);
}
}
-int radeon_object_init(struct radeon_device *rdev)
+int radeon_bo_init(struct radeon_device *rdev)
{
/* Add an MTRR for the VRAM */
rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
@@ -382,13 +272,13 @@ int radeon_object_init(struct radeon_device *rdev)
return radeon_ttm_init(rdev);
}
-void radeon_object_fini(struct radeon_device *rdev)
+void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
}
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
- struct list_head *head)
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head)
{
if (lobj->wdomain) {
list_add(&lobj->list, head);
@@ -397,72 +287,67 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
}
}
-int radeon_object_list_reserve(struct list_head *head)
+int radeon_bo_list_reserve(struct list_head *head)
{
- struct radeon_object_list *lobj;
+ struct radeon_bo_list *lobj;
int r;
list_for_each_entry(lobj, head, list){
- if (!lobj->robj->pin_count) {
- r = radeon_object_reserve(lobj->robj, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object.\n");
- return r;
- }
- } else {
- }
+ r = radeon_bo_reserve(lobj->bo, false);
+ if (unlikely(r != 0))
+ return r;
}
return 0;
}
-void radeon_object_list_unreserve(struct list_head *head)
+void radeon_bo_list_unreserve(struct list_head *head)
{
- struct radeon_object_list *lobj;
+ struct radeon_bo_list *lobj;
list_for_each_entry(lobj, head, list) {
- if (!lobj->robj->pin_count) {
- radeon_object_unreserve(lobj->robj);
- }
+ /* only unreserve object we successfully reserved */
+ if (radeon_bo_is_reserved(lobj->bo))
+ radeon_bo_unreserve(lobj->bo);
}
}
-int radeon_object_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head, void *fence)
{
- struct radeon_object_list *lobj;
- struct radeon_object *robj;
+ struct radeon_bo_list *lobj;
+ struct radeon_bo *bo;
struct radeon_fence *old_fence = NULL;
int r;
- r = radeon_object_list_reserve(head);
+ r = radeon_bo_list_reserve(head);
if (unlikely(r != 0)) {
- radeon_object_list_unreserve(head);
return r;
}
list_for_each_entry(lobj, head, list) {
- robj = lobj->robj;
- if (!robj->pin_count) {
+ bo = lobj->bo;
+ if (!bo->pin_count) {
if (lobj->wdomain) {
- robj->tobj.proposed_placement =
- radeon_object_flags_from_domain(lobj->wdomain);
+ bo->tbo.proposed_placement =
+ radeon_ttm_flags_from_domain(lobj->wdomain);
} else {
- robj->tobj.proposed_placement =
- radeon_object_flags_from_domain(lobj->rdomain);
+ bo->tbo.proposed_placement =
+ radeon_ttm_flags_from_domain(lobj->rdomain);
}
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- true, false);
+retry:
+ r = ttm_buffer_object_validate(&bo->tbo,
+ bo->tbo.proposed_placement,
+ true, false);
if (unlikely(r)) {
- DRM_ERROR("radeon: failed to validate.\n");
+ if (r == -ERESTART)
+ goto retry;
return r;
}
- radeon_object_gpu_addr(robj);
}
- lobj->gpu_offset = robj->gpu_addr;
- lobj->tiling_flags = robj->tiling_flags;
+ lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+ lobj->tiling_flags = bo->tiling_flags;
if (fence) {
- old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
- robj->tobj.sync_obj = radeon_fence_ref(fence);
- robj->tobj.sync_obj_arg = NULL;
+ old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+ bo->tbo.sync_obj = radeon_fence_ref(fence);
+ bo->tbo.sync_obj_arg = NULL;
}
if (old_fence) {
radeon_fence_unref(&old_fence);
@@ -471,51 +356,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
return 0;
}
-void radeon_object_list_unvalidate(struct list_head *head)
+void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
{
- struct radeon_object_list *lobj;
- struct radeon_fence *old_fence = NULL;
+ struct radeon_bo_list *lobj;
+ struct radeon_fence *old_fence;
- list_for_each_entry(lobj, head, list) {
- old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
- lobj->robj->tobj.sync_obj = NULL;
- if (old_fence) {
- radeon_fence_unref(&old_fence);
+ if (fence)
+ list_for_each_entry(lobj, head, list) {
+ old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
+ if (old_fence == fence) {
+ lobj->bo->tbo.sync_obj = NULL;
+ radeon_fence_unref(&old_fence);
+ }
}
- }
- radeon_object_list_unreserve(head);
-}
-
-void radeon_object_list_clean(struct list_head *head)
-{
- radeon_object_list_unreserve(head);
+ radeon_bo_list_unreserve(head);
}
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma)
{
- return ttm_fbdev_mmap(vma, &robj->tobj);
+ return ttm_fbdev_mmap(vma, &bo->tbo);
}
-unsigned long radeon_object_size(struct radeon_object *robj)
+static int radeon_bo_get_surface_reg(struct radeon_bo *bo)
{
- return robj->tobj.num_pages << PAGE_SHIFT;
-}
-
-int radeon_object_get_surface_reg(struct radeon_object *robj)
-{
- struct radeon_device *rdev = robj->rdev;
+ struct radeon_device *rdev = bo->rdev;
struct radeon_surface_reg *reg;
- struct radeon_object *old_object;
+ struct radeon_bo *old_object;
int steal;
int i;
- if (!robj->tiling_flags)
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+ if (!bo->tiling_flags)
return 0;
- if (robj->surface_reg >= 0) {
- reg = &rdev->surface_regs[robj->surface_reg];
- i = robj->surface_reg;
+ if (bo->surface_reg >= 0) {
+ reg = &rdev->surface_regs[bo->surface_reg];
+ i = bo->surface_reg;
goto out;
}
@@ -523,10 +401,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
reg = &rdev->surface_regs[i];
- if (!reg->robj)
+ if (!reg->bo)
break;
- old_object = reg->robj;
+ old_object = reg->bo;
if (old_object->pin_count == 0)
steal = i;
}
@@ -537,91 +415,101 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
return -ENOMEM;
/* find someone with a surface reg and nuke their BO */
reg = &rdev->surface_regs[steal];
- old_object = reg->robj;
+ old_object = reg->bo;
/* blow away the mapping */
DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
- ttm_bo_unmap_virtual(&old_object->tobj);
+ ttm_bo_unmap_virtual(&old_object->tbo);
old_object->surface_reg = -1;
i = steal;
}
- robj->surface_reg = i;
- reg->robj = robj;
+ bo->surface_reg = i;
+ reg->bo = bo;
out:
- radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
- robj->tobj.mem.mm_node->start << PAGE_SHIFT,
- robj->tobj.num_pages << PAGE_SHIFT);
+ radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
+ bo->tbo.mem.mm_node->start << PAGE_SHIFT,
+ bo->tbo.num_pages << PAGE_SHIFT);
return 0;
}
-void radeon_object_clear_surface_reg(struct radeon_object *robj)
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
{
- struct radeon_device *rdev = robj->rdev;
+ struct radeon_device *rdev = bo->rdev;
struct radeon_surface_reg *reg;
- if (robj->surface_reg == -1)
+ if (bo->surface_reg == -1)
return;
- reg = &rdev->surface_regs[robj->surface_reg];
- radeon_clear_surface_reg(rdev, robj->surface_reg);
+ reg = &rdev->surface_regs[bo->surface_reg];
+ radeon_clear_surface_reg(rdev, bo->surface_reg);
- reg->robj = NULL;
- robj->surface_reg = -1;
+ reg->bo = NULL;
+ bo->surface_reg = -1;
}
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
- uint32_t tiling_flags, uint32_t pitch)
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+ uint32_t tiling_flags, uint32_t pitch)
{
- robj->tiling_flags = tiling_flags;
- robj->pitch = pitch;
+ int r;
+
+ r = radeon_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ return r;
+ bo->tiling_flags = tiling_flags;
+ bo->pitch = pitch;
+ radeon_bo_unreserve(bo);
+ return 0;
}
-void radeon_object_get_tiling_flags(struct radeon_object *robj,
- uint32_t *tiling_flags,
- uint32_t *pitch)
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+ uint32_t *tiling_flags,
+ uint32_t *pitch)
{
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
if (tiling_flags)
- *tiling_flags = robj->tiling_flags;
+ *tiling_flags = bo->tiling_flags;
if (pitch)
- *pitch = robj->pitch;
+ *pitch = bo->pitch;
}
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
- bool force_drop)
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+ bool force_drop)
{
- if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+ if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
if (force_drop) {
- radeon_object_clear_surface_reg(robj);
+ radeon_bo_clear_surface_reg(bo);
return 0;
}
- if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
+ if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
if (!has_moved)
return 0;
- if (robj->surface_reg >= 0)
- radeon_object_clear_surface_reg(robj);
+ if (bo->surface_reg >= 0)
+ radeon_bo_clear_surface_reg(bo);
return 0;
}
- if ((robj->surface_reg >= 0) && !has_moved)
+ if ((bo->surface_reg >= 0) && !has_moved)
return 0;
- return radeon_object_get_surface_reg(robj);
+ return radeon_bo_get_surface_reg(bo);
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem)
{
- struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
- radeon_object_check_tiling(robj, 0, 1);
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 1);
}
void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
- radeon_object_check_tiling(robj, 0, 0);
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 10e8af6bb45..e9da13077e2 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -28,19 +28,152 @@
#ifndef __RADEON_OBJECT_H__
#define __RADEON_OBJECT_H__
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
-/*
- * TTM.
+/**
+ * radeon_mem_type_to_domain - return domain corresponding to mem_type
+ * @mem_type: ttm memory type
+ *
+ * Returns corresponding domain of the ttm mem_type
+ */
+static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
+{
+ switch (mem_type) {
+ case TTM_PL_VRAM:
+ return RADEON_GEM_DOMAIN_VRAM;
+ case TTM_PL_TT:
+ return RADEON_GEM_DOMAIN_GTT;
+ case TTM_PL_SYSTEM:
+ return RADEON_GEM_DOMAIN_CPU;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo: bo structure
+ * @no_wait: don't sleep while trying to reserve (return -EBUSY)
+ *
+ * Returns:
+ * -EBUSY: buffer is busy and @no_wait is true
+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
+{
+ int r;
+
+retry:
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r == -ERESTART)
+ goto retry;
+ dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+ return r;
+ }
+ return 0;
+}
+
+static inline void radeon_bo_unreserve(struct radeon_bo *bo)
+{
+ ttm_bo_unreserve(&bo->tbo);
+}
+
+/**
+ * radeon_bo_gpu_offset - return GPU offset of bo
+ * @bo: radeon object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be usefull to add check for this for debugging.
+ */
+static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
+{
+ return bo->tbo.offset;
+}
+
+static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
+{
+ return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
+{
+ return !!atomic_read(&bo->tbo.reserved);
+}
+
+/**
+ * radeon_bo_mmap_offset - return mmap offset of bo
+ * @bo: radeon object for which we query the offset
+ *
+ * Returns mmap offset of the object.
+ *
+ * Note: addr_space_offset is constant after ttm bo init thus isn't protected
+ * by any lock.
*/
-struct radeon_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct ttm_global_reference mem_global_ref;
- bool mem_global_referenced;
- struct ttm_bo_device bdev;
-};
+static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
+{
+ return bo->tbo.addr_space_offset;
+}
+
+static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+ bool no_wait)
+{
+ int r;
+
+retry:
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r == -ERESTART)
+ goto retry;
+ dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
+ return r;
+ }
+ spin_lock(&bo->tbo.lock);
+ if (mem_type)
+ *mem_type = bo->tbo.mem.mem_type;
+ if (bo->tbo.sync_obj)
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ spin_unlock(&bo->tbo.lock);
+ ttm_bo_unreserve(&bo->tbo);
+ if (unlikely(r == -ERESTART))
+ goto retry;
+ return r;
+}
+
+extern int radeon_bo_create(struct radeon_device *rdev,
+ struct drm_gem_object *gobj, unsigned long size,
+ bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr);
+extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
+extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern void radeon_bo_unref(struct radeon_bo **bo);
+extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern void radeon_bo_force_delete(struct radeon_device *rdev);
+extern int radeon_bo_init(struct radeon_device *rdev);
+extern void radeon_bo_fini(struct radeon_device *rdev);
+extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head);
+extern int radeon_bo_list_reserve(struct list_head *head);
+extern void radeon_bo_list_unreserve(struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head, void *fence);
+extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+ struct vm_area_struct *vma);
+extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+ u32 tiling_flags, u32 pitch);
+extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+ u32 *tiling_flags, u32 *pitch);
+extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+ bool force_drop);
+extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
new file mode 100644
index 00000000000..34b08d307c8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -0,0 +1,65 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rafał Miłecki <zajec5@gmail.com>
+ */
+#include "drmP.h"
+#include "radeon.h"
+
+int radeon_debugfs_pm_init(struct radeon_device *rdev);
+
+int radeon_pm_init(struct radeon_device *rdev)
+{
+ if (radeon_debugfs_pm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for PM!\n");
+ }
+
+ return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+
+ return 0;
+}
+
+static struct drm_info_list radeon_pm_info_list[] = {
+ {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
+};
+#endif
+
+int radeon_debugfs_pm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index bfa1ab9c93e..c4c41c8d908 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -290,6 +290,8 @@
#define RADEON_BUS_CNTL 0x0030
# define RADEON_BUS_MASTER_DIS (1 << 6)
# define RADEON_BUS_BIOS_DIS_ROM (1 << 12)
+# define RS600_BUS_MASTER_DIS (1 << 14)
+# define RS600_MSI_REARM (1 << 20) /* rs600/rs690/rs740 */
# define RADEON_BUS_RD_DISCARD_EN (1 << 24)
# define RADEON_BUS_RD_ABORT_EN (1 << 25)
# define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
@@ -297,6 +299,9 @@
# define RADEON_BUS_READ_BURST (1 << 30)
#define RADEON_BUS_CNTL1 0x0034
# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
+/* rv370/rv380, rv410, r423/r430/r480, r5xx */
+#define RADEON_MSI_REARM_EN 0x0160
+# define RV370_MSI_REARM_EN (1 << 0)
/* #define RADEON_PCIE_INDEX 0x0030 */
/* #define RADEON_PCIE_DATA 0x0034 */
@@ -1046,20 +1051,25 @@
/* Multimedia I2C bus */
#define RADEON_I2C_CNTL_0 0x0090
-#define RADEON_I2C_DONE (1<<0)
-#define RADEON_I2C_NACK (1<<1)
-#define RADEON_I2C_HALT (1<<2)
-#define RADEON_I2C_SOFT_RST (1<<5)
-#define RADEON_I2C_DRIVE_EN (1<<6)
-#define RADEON_I2C_DRIVE_SEL (1<<7)
-#define RADEON_I2C_START (1<<8)
-#define RADEON_I2C_STOP (1<<9)
-#define RADEON_I2C_RECEIVE (1<<10)
-#define RADEON_I2C_ABORT (1<<11)
-#define RADEON_I2C_GO (1<<12)
+#define RADEON_I2C_DONE (1 << 0)
+#define RADEON_I2C_NACK (1 << 1)
+#define RADEON_I2C_HALT (1 << 2)
+#define RADEON_I2C_SOFT_RST (1 << 5)
+#define RADEON_I2C_DRIVE_EN (1 << 6)
+#define RADEON_I2C_DRIVE_SEL (1 << 7)
+#define RADEON_I2C_START (1 << 8)
+#define RADEON_I2C_STOP (1 << 9)
+#define RADEON_I2C_RECEIVE (1 << 10)
+#define RADEON_I2C_ABORT (1 << 11)
+#define RADEON_I2C_GO (1 << 12)
+#define RADEON_I2C_PRESCALE_SHIFT 16
#define RADEON_I2C_CNTL_1 0x0094
-#define RADEON_I2C_SEL (1<<16)
-#define RADEON_I2C_EN (1<<17)
+#define RADEON_I2C_DATA_COUNT_SHIFT 0
+#define RADEON_I2C_ADDR_COUNT_SHIFT 4
+#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
+#define RADEON_I2C_SEL (1 << 16)
+#define RADEON_I2C_EN (1 << 17)
+#define RADEON_I2C_TIME_LIMIT_SHIFT 24
#define RADEON_I2C_DATA 0x0098
#define RADEON_DVI_I2C_CNTL_0 0x02e0
@@ -1067,7 +1077,7 @@
# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
-#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */
+#define RADEON_DVI_I2C_CNTL_1 0x02e4
#define RADEON_DVI_I2C_DATA 0x02e8
#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
@@ -1138,14 +1148,15 @@
# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14)
# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15)
-#define RADEON_LCD_GPIO_MASK 0x01a0
+#define RADEON_GPIOPAD_MASK 0x0198
+#define RADEON_GPIOPAD_A 0x019c
#define RADEON_GPIOPAD_EN 0x01a0
+#define RADEON_GPIOPAD_Y 0x01a4
+#define RADEON_LCD_GPIO_MASK 0x01a0
#define RADEON_LCD_GPIO_Y_REG 0x01a4
#define RADEON_MDGPIO_A_REG 0x01ac
#define RADEON_MDGPIO_EN_REG 0x01b0
#define RADEON_MDGPIO_MASK 0x0198
-#define RADEON_GPIOPAD_MASK 0x0198
-#define RADEON_GPIOPAD_A 0x019c
#define RADEON_MDGPIO_Y_REG 0x01b4
#define RADEON_MEM_ADDR_CONFIG 0x0148
#define RADEON_MEM_BASE 0x0f10 /* PCI */
@@ -1355,6 +1366,9 @@
#define RADEON_OVR_CLR 0x0230
#define RADEON_OVR_WID_LEFT_RIGHT 0x0234
#define RADEON_OVR_WID_TOP_BOTTOM 0x0238
+#define RADEON_OVR2_CLR 0x0330
+#define RADEON_OVR2_WID_LEFT_RIGHT 0x0334
+#define RADEON_OVR2_WID_TOP_BOTTOM 0x0338
/* first capture unit */
@@ -3311,6 +3325,7 @@
#define RADEON_AIC_CNTL 0x01d0
# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
# define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
+# define RS400_MSI_REARM (1 << 3) /* rs400/rs480 */
#define RADEON_AIC_LO_ADDR 0x01dc
#define RADEON_AIC_PT_BASE 0x01d8
#define RADEON_AIC_HI_ADDR 0x01e0
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 747b4bffb84..4d12b2d17b4 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -165,19 +165,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
return 0;
/* Allocate 1M object buffer */
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
- r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
- true, RADEON_GEM_DOMAIN_GTT,
- false, &rdev->ib_pool.robj);
+ r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
+ true, RADEON_GEM_DOMAIN_GTT,
+ &rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
return r;
}
- r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+ r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
if (r) {
+ radeon_bo_unreserve(rdev->ib_pool.robj);
DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
return r;
}
- r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
+ r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
+ radeon_bo_unreserve(rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
return r;
@@ -203,14 +208,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
+ int r;
+
if (!rdev->ib_pool.ready) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) {
- radeon_object_kunmap(rdev->ib_pool.robj);
- radeon_object_unref(&rdev->ib_pool.robj);
+ r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->ib_pool.robj);
+ radeon_bo_unpin(rdev->ib_pool.robj);
+ radeon_bo_unreserve(rdev->ib_pool.robj);
+ }
+ radeon_bo_unref(&rdev->ib_pool.robj);
rdev->ib_pool.robj = NULL;
}
mutex_unlock(&rdev->ib_pool.mutex);
@@ -288,29 +300,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
- r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
- true,
- RADEON_GEM_DOMAIN_GTT,
- false,
- &rdev->cp.ring_obj);
+ r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->cp.ring_obj);
if (r) {
- DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
- r = radeon_object_pin(rdev->cp.ring_obj,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->cp.gpu_addr);
+ r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->cp.gpu_addr);
if (r) {
- DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
+ dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
- r = radeon_object_kmap(rdev->cp.ring_obj,
+ r = radeon_bo_kmap(rdev->cp.ring_obj,
(void **)&rdev->cp.ring);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
if (r) {
- DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
@@ -321,11 +332,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
void radeon_ring_fini(struct radeon_device *rdev)
{
+ int r;
+
mutex_lock(&rdev->cp.mutex);
if (rdev->cp.ring_obj) {
- radeon_object_kunmap(rdev->cp.ring_obj);
- radeon_object_unpin(rdev->cp.ring_obj);
- radeon_object_unref(&rdev->cp.ring_obj);
+ r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->cp.ring_obj);
+ radeon_bo_unpin(rdev->cp.ring_obj);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
+ }
+ radeon_bo_unref(&rdev->cp.ring_obj);
rdev->cp.ring = NULL;
rdev->cp.ring_obj = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 03c33cf4e14..391c973ec4d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -30,8 +30,8 @@
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
void radeon_test_moves(struct radeon_device *rdev)
{
- struct radeon_object *vram_obj = NULL;
- struct radeon_object **gtt_obj = NULL;
+ struct radeon_bo *vram_obj = NULL;
+ struct radeon_bo **gtt_obj = NULL;
struct radeon_fence *fence = NULL;
uint64_t gtt_addr, vram_addr;
unsigned i, n, size;
@@ -42,7 +42,7 @@ void radeon_test_moves(struct radeon_device *rdev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffer) / test size
*/
- n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 -
+ n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
rdev->cp.ring_size) / size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
@@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
- false, &vram_obj);
+ r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
+ &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
}
-
- r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
+ r = radeon_bo_reserve(vram_obj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
if (r) {
DRM_ERROR("Failed to pin VRAM object\n");
goto out_cleanup;
}
-
for (i = 0; i < n; i++) {
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- r = radeon_object_create(rdev, NULL, size, true,
- RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i);
+ r = radeon_bo_create(rdev, NULL, size, true,
+ RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_cleanup;
}
- r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
+ r = radeon_bo_reserve(gtt_obj[i], false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
if (r) {
DRM_ERROR("Failed to pin GTT object %d\n", i);
goto out_cleanup;
}
- r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+ r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
DRM_ERROR("Failed to map GTT object %d\n", i);
goto out_cleanup;
@@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev)
gtt_start++)
*gtt_start = gtt_start;
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
r = radeon_fence_create(rdev, &fence);
if (r) {
@@ -102,7 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence);
+ r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
goto out_cleanup;
@@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_fence_unref(&fence);
- r = radeon_object_kmap(vram_obj, &vram_map);
+ r = radeon_bo_kmap(vram_obj, &vram_map);
if (r) {
DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
goto out_cleanup;
@@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev)
"expected 0x%p (GTT map 0x%p-0x%p)\n",
i, *vram_start, gtt_start, gtt_map,
gtt_end);
- radeon_object_kunmap(vram_obj);
+ radeon_bo_kunmap(vram_obj);
goto out_cleanup;
}
*vram_start = vram_start;
}
- radeon_object_kunmap(vram_obj);
+ radeon_bo_kunmap(vram_obj);
r = radeon_fence_create(rdev, &fence);
if (r) {
@@ -145,7 +149,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence);
+ r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
goto out_cleanup;
@@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_fence_unref(&fence);
- r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+ r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
DRM_ERROR("Failed to map GTT object after copy %d\n", i);
goto out_cleanup;
@@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev)
"expected 0x%p (VRAM map 0x%p-0x%p)\n",
i, *gtt_start, vram_start, vram_map,
vram_end);
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
goto out_cleanup;
}
}
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
gtt_addr - rdev->mc.gtt_location);
@@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev)
out_cleanup:
if (vram_obj) {
- radeon_object_unpin(vram_obj);
- radeon_object_unref(&vram_obj);
+ if (radeon_bo_is_reserved(vram_obj)) {
+ radeon_bo_unpin(vram_obj);
+ radeon_bo_unreserve(vram_obj);
+ }
+ radeon_bo_unref(&vram_obj);
}
if (gtt_obj) {
for (i = 0; i < n; i++) {
if (gtt_obj[i]) {
- radeon_object_unpin(gtt_obj[i]);
- radeon_object_unref(&gtt_obj[i]);
+ if (radeon_bo_is_reserved(gtt_obj[i])) {
+ radeon_bo_unpin(gtt_obj[i]);
+ radeon_bo_unreserve(gtt_obj[i]);
+ }
+ radeon_bo_unref(&gtt_obj[i]);
}
}
kfree(gtt_obj);
@@ -206,4 +216,3 @@ out_cleanup:
printk(KERN_WARNING "Error while testing BO move.\n");
}
}
-
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 765bd184b6f..bdb46c8cadd 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
- man->gpu_offset = 0;
+ man->gpu_offset = rdev->mc.gtt_location;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->gpu_offset = 0;
+ man->gpu_offset = rdev->mc.vram_location;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -295,6 +295,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
+
+ r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+
r = ttm_tt_bind(bo->ttm, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
@@ -476,27 +482,31 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
- r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
- ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+ 0, rdev->mc.real_vram_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = radeon_object_create(rdev, NULL, 256 * 1024, true,
- RADEON_GEM_DOMAIN_VRAM, false,
- &rdev->stollen_vga_memory);
+ r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ &rdev->stollen_vga_memory);
if (r) {
return r;
}
- r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (r)
+ return r;
+ r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ radeon_bo_unreserve(rdev->stollen_vga_memory);
if (r) {
- radeon_object_unref(&rdev->stollen_vga_memory);
+ radeon_bo_unref(&rdev->stollen_vga_memory);
return r;
}
DRM_INFO("radeon: %uM of VRAM memory ready\n",
(unsigned)rdev->mc.real_vram_size / (1024 * 1024));
- r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
- ((rdev->mc.gtt_size) >> PAGE_SHIFT));
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
+ 0, rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
@@ -517,9 +527,15 @@ int radeon_ttm_init(struct radeon_device *rdev)
void radeon_ttm_fini(struct radeon_device *rdev)
{
+ int r;
+
if (rdev->stollen_vga_memory) {
- radeon_object_unpin(rdev->stollen_vga_memory);
- radeon_object_unref(&rdev->stollen_vga_memory);
+ r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (r == 0) {
+ radeon_bo_unpin(rdev->stollen_vga_memory);
+ radeon_bo_unreserve(rdev->stollen_vga_memory);
+ }
+ radeon_bo_unref(&rdev->stollen_vga_memory);
}
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index a769c296f6a..eda6d757b5c 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -352,7 +352,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
u32 tmp;
/* Setup GPU memory space */
- tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
+ tmp = RREG32(R_00015C_NB_TOM);
rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev);
@@ -387,13 +387,13 @@ static int rs400_startup(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
rs400_gpu_init(rdev);
+ r100_enable_bm(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r = rs400_gart_enable(rdev);
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -418,6 +418,8 @@ int rs400_resume(struct radeon_device *rdev)
rs400_gart_disable(rdev);
/* Resume clock before doing reset */
r300_clock_startup(rdev);
+ /* setup MC before calling post tables */
+ rs400_mc_program(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
@@ -450,7 +452,7 @@ void rs400_fini(struct radeon_device *rdev)
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -488,10 +490,9 @@ int rs400_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Get vram informations */
@@ -508,7 +509,7 @@ int rs400_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 10dfa78762d..84b26376027 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,6 +45,21 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+int rs600_mc_init(struct radeon_device *rdev)
+{
+ /* read back the MC value from the hw */
+ int r;
+ u32 tmp;
+
+ /* Setup GPU memory space */
+ tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
+ rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
+ rdev->mc.gtt_location = 0xffffffffUL;
+ r = radeon_mc_setup(rdev);
+ if (r)
+ return r;
+ return 0;
+}
/*
* GART.
*/
@@ -100,40 +115,40 @@ int rs600_gart_enable(struct radeon_device *rdev)
WREG32(R_00004C_BUS_CNTL, tmp);
/* FIXME: setup default page */
WREG32_MC(R_000100_MC_PT0_CNTL,
- (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
- S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+ (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+ S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
for (i = 0; i < 19; i++) {
WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
- S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
- S_00016C_SYSTEM_ACCESS_MODE_MASK(
- V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
- S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
- V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
- S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
- S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
- S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
+ S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+ S_00016C_SYSTEM_ACCESS_MODE_MASK(
+ V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
+ S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+ V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
+ S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
+ S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+ S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
}
-
- /* System context map to GART space */
- WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
- WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
-
/* enable first context */
- WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
- WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
- S_000102_ENABLE_PAGE_TABLE(1) |
- S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+ S_000102_ENABLE_PAGE_TABLE(1) |
+ S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+
/* disable all other contexts */
- for (i = 1; i < 8; i++) {
+ for (i = 1; i < 8; i++)
WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
- }
/* setup the page table */
WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
- rdev->gart.table_addr);
+ rdev->gart.table_addr);
+ WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+ WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+ /* System context maps to VRAM space */
+ WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
+ WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
+
/* enable page tables */
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
@@ -146,15 +161,20 @@ int rs600_gart_enable(struct radeon_device *rdev)
void rs600_gart_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 tmp;
+ int r;
/* FIXME: disable out of gart access */
WREG32_MC(R_000100_MC_PT0_CNTL, 0);
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (r == 0) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -242,7 +262,7 @@ void rs600_irq_disable(struct radeon_device *rdev)
int rs600_irq_process(struct radeon_device *rdev)
{
- uint32_t status;
+ uint32_t status, msi_rearm;
uint32_t r500_disp_int;
status = rs600_irq_ack(rdev, &r500_disp_int);
@@ -260,6 +280,22 @@ int rs600_irq_process(struct radeon_device *rdev)
drm_handle_vblank(rdev->ddev, 1);
status = rs600_irq_ack(rdev, &r500_disp_int);
}
+ if (rdev->msi_enabled) {
+ switch (rdev->family) {
+ case CHIP_RS600:
+ case CHIP_RS690:
+ case CHIP_RS740:
+ msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
+ WREG32(RADEON_BUS_CNTL, msi_rearm);
+ WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
+ break;
+ default:
+ msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
+ WREG32(RADEON_MSI_REARM_EN, msi_rearm);
+ WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
+ break;
+ }
+ }
return IRQ_HANDLED;
}
@@ -285,9 +321,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs600 ? */
r100_hdp_reset(rdev);
- /* FIXME: is this correct ? */
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
@@ -296,9 +330,20 @@ void rs600_gpu_init(struct radeon_device *rdev)
void rs600_vram_info(struct radeon_device *rdev)
{
- /* FIXME: to do or is these values sane ? */
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
+
+ rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+ if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+ rdev->mc.real_vram_size = rdev->mc.aper_size;
}
void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -372,7 +417,6 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -429,7 +473,7 @@ void rs600_fini(struct radeon_device *rdev)
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -466,16 +510,17 @@ int rs600_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
rs600_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
+ r = rs600_mc_init(rdev);
if (r)
return r;
rs600_debugfs(rdev);
@@ -487,7 +532,7 @@ int rs600_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs600_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 025e3225346..eb486ee7ea0 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -131,24 +131,25 @@ void rs690_pm_info(struct radeon_device *rdev)
void rs690_vram_info(struct radeon_device *rdev)
{
- uint32_t tmp;
fixed20_12 a;
rs400_gart_adjust_size(rdev);
- /* DDR for all card after R300 & IGP */
+
rdev->mc.vram_is_ddr = true;
- /* FIXME: is this correct for RS690/RS740 ? */
- tmp = RREG32(RADEON_MEM_CNTL);
- if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
- rdev->mc.vram_width = 128;
- } else {
- rdev->mc.vram_width = 64;
- }
+ rdev->mc.vram_width = 128;
+
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+ if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+ rdev->mc.real_vram_size = rdev->mc.aper_size;
+
rs690_pm_info(rdev);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
@@ -161,6 +162,21 @@ void rs690_vram_info(struct radeon_device *rdev)
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
}
+static int rs690_mc_init(struct radeon_device *rdev)
+{
+ int r;
+ u32 tmp;
+
+ /* Setup GPU memory space */
+ tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ r = radeon_mc_setup(rdev);
+ if (r)
+ return r;
+ return 0;
+}
+
void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1,
struct drm_display_mode *mode2)
@@ -605,7 +621,6 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -662,7 +677,7 @@ void rs690_fini(struct radeon_device *rdev)
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -700,16 +715,17 @@ int rs690_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
rs690_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
+ r = rs690_mc_init(rdev);
if (r)
return r;
rv515_debugfs(rdev);
@@ -721,7 +737,7 @@ int rs690_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 41a34c23e6d..7793239e24b 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -380,7 +380,6 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
/* Stop all video */
- WREG32(R_000330_D1VGA_CONTROL, 0);
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
WREG32(R_000300_VGA_RENDER_CONTROL, 0);
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
@@ -389,6 +388,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
WREG32(R_006880_D2CRTC_CONTROL, 0);
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+ WREG32(R_000330_D1VGA_CONTROL, 0);
+ WREG32(R_000338_D2VGA_CONTROL, 0);
}
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
@@ -402,14 +403,14 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
/* Restore video state */
+ WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
+ WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
- WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
- WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
}
@@ -477,7 +478,6 @@ static int rv515_startup(struct radeon_device *rdev)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -539,11 +539,11 @@ void rv515_fini(struct radeon_device *rdev)
r100_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
- rv370_pcie_gart_fini(rdev);
+ rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -579,12 +579,12 @@ int rv515_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
rv515_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -600,7 +600,7 @@ int rv515_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rv370_pcie_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 595ac638039..dd4f02096a8 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
void rv770_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
+ int i, r;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -529,11 +533,11 @@ static void rv770_gpu_init(struct radeon_device *rdev)
if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1);
else
- gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_SHIFT) >> NOOFBANK_MASK);
+ gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
gb_tiling_config |= GROUP_SIZE(0);
- if (((mc_arb_ramcfg & NOOFROWS_MASK) & NOOFROWS_SHIFT) > 3) {
+ if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
} else {
@@ -579,14 +583,14 @@ static void rv770_gpu_init(struct radeon_device *rdev)
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
- ROQ_IB2_START(0x2b)));
+ ROQ_IB2_START(0x2b)));
WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
- SYNC_GRADIENT |
- SYNC_WALKER |
- SYNC_ALIGNER));
+ SYNC_GRADIENT |
+ SYNC_WALKER |
+ SYNC_ALIGNER));
sx_debug_1 = RREG32(SX_DEBUG_1);
sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
@@ -598,9 +602,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
- GS_FLUSH_CTL(4) |
- ACK_FLUSH_CTL(3) |
- SYNC_FLUSH_CTL));
+ GS_FLUSH_CTL(4) |
+ ACK_FLUSH_CTL(3) |
+ SYNC_FLUSH_CTL));
if (rdev->family == CHIP_RV770)
WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
@@ -611,12 +615,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
}
WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
- POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
- SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
+ POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
- SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
- SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
@@ -774,14 +778,36 @@ int rv770_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u32 tmp;
+ int chansize, numchan;
int r;
/* Get VRAM informations */
- /* FIXME: Don't know how to determine vram width, need to check
- * vram_width usage
- */
- rdev->mc.vram_width = 128;
rdev->mc.vram_is_ddr = true;
+ tmp = RREG32(MC_ARB_RAMCFG);
+ if (tmp & CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & CHANSIZE_MASK) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ }
+ rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
@@ -858,13 +884,26 @@ static int rv770_startup(struct radeon_device *rdev)
}
rv770_gpu_init(rdev);
- r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("failed to pin blit object %d\n", r);
return r;
}
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ r600_irq_set(rdev);
+
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
@@ -912,13 +951,19 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
+ int r;
+
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
r600_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
/* unpin shaders bo */
- radeon_object_unpin(rdev->r600_blit.shader_obj);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
return 0;
}
@@ -953,7 +998,11 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev) && rdev->bios) {
+ if (!r600_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
@@ -961,10 +1010,13 @@ int rv770_init(struct radeon_device *rdev)
r600_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
r = radeon_clocks_init(rdev);
if (r)
return r;
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -973,14 +1025,22 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
+
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
- if (!rdev->me_fw || !rdev->pfp_fw) {
- r = r600_cp_init_microcode(rdev);
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
@@ -1026,6 +1086,8 @@ void rv770_fini(struct radeon_device *rdev)
rv770_suspend(rdev);
r600_blit_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
radeon_ring_fini(rdev);
r600_wb_fini(rdev);
rv770_pcie_gart_fini(rdev);
@@ -1034,7 +1096,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_clocks_fini(rdev);
if (rdev->flags & RADEON_IS_AGP)
radeon_agp_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 4b9c3d6396f..a1367ab6f26 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -129,6 +129,10 @@
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
@@ -142,6 +146,7 @@
#define CHANSIZE_MASK 0x00000100
#define BURSTLENGTH_SHIFT 9
#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
#define MC_VM_AGP_TOP 0x2028
#define MC_VM_AGP_BOT 0x202C
#define MC_VM_AGP_BASE 0x2030
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 90ea46aed05..9c2b1cc5dba 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -280,6 +280,7 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
return ttm_tt_set_caching(ttm, state);
}
+EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{