summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c41
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c60
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c195
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h29
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h31
-rw-r--r--drivers/gpu/drm/radeon/r100.c7
-rw-r--r--drivers/gpu/drm/radeon/r300.c94
-rw-r--r--drivers/gpu/drm/radeon/r600.c118
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon.h53
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c304
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
-rw-r--r--drivers/gpu/drm/radeon/rs600.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c7
25 files changed, 708 insertions, 384 deletions
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 87921c88a95..2b97262e3ab 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- if (tiling_flags & RADEON_TILING_MACRO)
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0: /* 1KB rows */
+ default:
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+ break;
+ case 1: /* 2KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+ break;
+ case 2: /* 4KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+ break;
+ }
+
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
- else if (tiling_flags & RADEON_TILING_MICRO)
+ } else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
switch (radeon_crtc->crtc_id) {
@@ -1522,12 +1553,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- /* adjust pm to upcoming mode change */
- radeon_pm_compute_clocks(rdev);
-
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index a0de48542f7..6fb335a4fdd 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
}
- DRM_ERROR("aux i2c too many retries, giving up\n");
+ DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
return -EREMOTEIO;
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e4c384b9511..5e00d1670aa 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
@@ -157,6 +162,57 @@ int sumo_get_temp(struct radeon_device *rdev)
return actual_temp * 1000;
}
+void sumo_pm_init_profile(struct radeon_device *rdev)
+{
+ int idx;
+
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+
+ /* low,mid sh/mh */
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ else
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+
+ /* high sh/mh */
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
+ rdev->pm.power_state[idx].num_clock_modes - 1;
+
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
+ rdev->pm.power_state[idx].num_clock_modes - 1;
+}
+
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -1219,7 +1275,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
- WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
if (rdev->flags & RADEON_IS_IGP) {
tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7fdfa8ea757..cd4590aae15 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
u32 npipes;
+ u32 row_size;
/* value we track */
u32 nsamples;
u32 cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
struct radeon_bo *db_s_write_bo;
};
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+ if (tiling_flags & RADEON_TILING_MACRO)
+ return ARRAY_2D_TILED_THIN1;
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ return ARRAY_1D_TILED_THIN1;
+ else
+ return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+ switch (nbanks) {
+ case 2:
+ return ADDR_SURF_2_BANK;
+ case 4:
+ return ADDR_SURF_4_BANK;
+ case 8:
+ default:
+ return ADDR_SURF_8_BANK;
+ case 16:
+ return ADDR_SURF_16_BANK;
+ }
+}
+
+static u32 evergreen_cs_get_tile_split(u32 row_size)
+{
+ switch (row_size) {
+ case 1:
+ default:
+ return ADDR_SURF_TILE_SPLIT_1KB;
+ case 2:
+ return ADDR_SURF_TILE_SPLIT_2KB;
+ case 4:
+ return ADDR_SURF_TILE_SPLIT_4KB;
+ }
+}
+
static void evergreen_cs_track_init(struct evergreen_cs_track *track)
{
int i;
@@ -480,21 +519,22 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
break;
case DB_Z_INFO:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
track->db_z_info = radeon_get_ib_value(p, idx);
- ib[idx] &= ~Z_ARRAY_MODE(0xf);
- track->db_z_info &= ~Z_ARRAY_MODE(0xf);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] &= ~Z_ARRAY_MODE(0xf);
+ track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ }
}
break;
case DB_STENCIL_INFO:
@@ -607,40 +647,34 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_INFO:
case CB_COLOR6_INFO:
case CB_COLOR7_INFO:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR8_INFO:
case CB_COLOR9_INFO:
case CB_COLOR10_INFO:
case CB_COLOR11_INFO:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR0_PITCH:
@@ -695,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ }
break;
case CB_COLOR0_DIM:
case CB_COLOR1_DIM:
@@ -1311,10 +1355,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ ib[idx+1+(i*8)+1] |=
+ TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx+1+(i*8)+6] |=
+ TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ ib[idx+1+(i*8)+7] |=
+ TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ }
+ }
texture = reloc->robj;
/* tex mip base */
r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -1414,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_packet pkt;
struct evergreen_cs_track *track;
+ u32 tmp;
int r;
if (p->track == NULL) {
@@ -1422,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
if (track == NULL)
return -ENOMEM;
evergreen_cs_track_init(track);
- track->npipes = p->rdev->config.evergreen.tiling_npipes;
- track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
- track->group_size = p->rdev->config.evergreen.tiling_group_size;
+ if (p->rdev->family >= CHIP_CAYMAN)
+ tmp = p->rdev->config.cayman.tile_config;
+ else
+ tmp = p->rdev->config.evergreen.tile_config;
+
+ switch (tmp & 0xf) {
+ case 0:
+ track->npipes = 1;
+ break;
+ case 1:
+ default:
+ track->npipes = 2;
+ break;
+ case 2:
+ track->npipes = 4;
+ break;
+ case 3:
+ track->npipes = 8;
+ break;
+ }
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0:
+ track->nbanks = 4;
+ break;
+ case 1:
+ default:
+ track->nbanks = 8;
+ break;
+ case 2:
+ track->nbanks = 16;
+ break;
+ }
+
+ switch ((tmp & 0xf00) >> 8) {
+ case 0:
+ track->group_size = 256;
+ break;
+ case 1:
+ default:
+ track->group_size = 512;
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0:
+ track->row_size = 1;
+ break;
+ case 1:
+ default:
+ track->row_size = 2;
+ break;
+ case 2:
+ track->row_size = 4;
+ break;
+ }
+
p->track = track;
}
do {
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index c781c92c345..7d7f2155e34 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -42,6 +42,17 @@
# define EVERGREEN_GRPH_DEPTH_8BPP 0
# define EVERGREEN_GRPH_DEPTH_16BPP 1
# define EVERGREEN_GRPH_DEPTH_32BPP 2
+# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define EVERGREEN_ADDR_SURF_2_BANK 0
+# define EVERGREEN_ADDR_SURF_4_BANK 1
+# define EVERGREEN_ADDR_SURF_8_BANK 2
+# define EVERGREEN_ADDR_SURF_16_BANK 3
+# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
@@ -61,6 +72,24 @@
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
+# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
+# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b937c49054d..e00039e59a7 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -899,6 +899,10 @@
#define DB_HTILE_DATA_BASE 0x28014
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
+# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
+# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
+# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
+# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
#define DB_STENCIL_INFO 0x28044
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
@@ -951,6 +955,29 @@
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_ATTRIB 0x28c74
+# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
+# define ADDR_SURF_TILE_SPLIT_64B 0
+# define ADDR_SURF_TILE_SPLIT_128B 1
+# define ADDR_SURF_TILE_SPLIT_256B 2
+# define ADDR_SURF_TILE_SPLIT_512B 3
+# define ADDR_SURF_TILE_SPLIT_1KB 4
+# define ADDR_SURF_TILE_SPLIT_2KB 5
+# define ADDR_SURF_TILE_SPLIT_4KB 6
+# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
+# define ADDR_SURF_2_BANK 0
+# define ADDR_SURF_4_BANK 1
+# define ADDR_SURF_8_BANK 2
+# define ADDR_SURF_16_BANK 3
+# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
+# define ADDR_SURF_BANK_WIDTH_1 0
+# define ADDR_SURF_BANK_WIDTH_2 1
+# define ADDR_SURF_BANK_WIDTH_4 2
+# define ADDR_SURF_BANK_WIDTH_8 3
+# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
+# define ADDR_SURF_BANK_HEIGHT_1 0
+# define ADDR_SURF_BANK_HEIGHT_2 1
+# define ADDR_SURF_BANK_HEIGHT_4 2
+# define ADDR_SURF_BANK_HEIGHT_8 3
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
@@ -1137,7 +1164,11 @@
# define SQ_SEL_1 5
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
+# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
+# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
+# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad158ea4990..bfc08f6320f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ int i;
/* Lock the graphics update lock */
/* update the scanout addresses */
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
/* Wait for update_pending to go high. */
- while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 400b26df652..c93bc64707e 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_TXO_MACRO_TILE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_TXO_MICRO_TILE;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
-
- tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ if (p->keep_tiling_flags) {
+ ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
+ ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
+ } else {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_TXO_MICRO_TILE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
+
+ tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH1 */
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
+ if (!p->keep_tiling_flags) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_COLOR_MICROTILE_ENABLE;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_COLOR_TILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = idx_value & 0x3FFE;
switch (((idx_value >> 21) & 0xF)) {
@@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
-
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_DEPTHMACROTILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_DEPTHMICROTILE_TILED;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+ if (!p->keep_tiling_flags) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_DEPTHMACROTILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_DEPTHMICROTILE_TILED;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
track->zb.pitch = idx_value & 0x3FFC;
track->zb_dirty = true;
break;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 19afc43ad17..9cdda0b3b08 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -288,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
pcie_lanes);
}
-static int r600_pm_get_type_index(struct radeon_device *rdev,
- enum radeon_pm_state_type ps_type,
- int instance)
-{
- int i;
- int found_instance = -1;
-
- for (i = 0; i < rdev->pm.num_power_states; i++) {
- if (rdev->pm.power_state[i].type == ps_type) {
- found_instance++;
- if (found_instance == instance)
- return i;
- }
- }
- /* return default if no match */
- return rdev->pm.default_power_state_index;
-}
-
void rs780_pm_init_profile(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states == 2) {
@@ -421,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
void r600_pm_init_profile(struct radeon_device *rdev)
{
+ int idx;
+
if (rdev->family == CHIP_R600) {
/* XXX */
/* default */
@@ -502,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
/* low sh */
- if (rdev->flags & RADEON_IS_MOBILITY) {
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
- } else {
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
- }
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ else
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* mid sh */
- if (rdev->flags & RADEON_IS_MOBILITY) {
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
- } else {
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
- }
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
/* high sh */
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
- rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
/* low mh */
- if (rdev->flags & RADEON_IS_MOBILITY) {
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
- } else {
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
- }
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ else
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* mid mh */
- if (rdev->flags & RADEON_IS_MOBILITY) {
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
- } else {
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
- rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
- }
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
/* high mh */
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
- rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
- r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0a2e023c155..cb1acffd243 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
case R_028010_DB_DEPTH_INFO:
- if (r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!p->keep_tiling_flags &&
+ r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B4_CB_COLOR5_INFO:
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
- if (r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!p->keep_tiling_flags &&
+ r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
@@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
mip_offset <<= 8;
word0 = radeon_get_ib_value(p, idx + 0);
- if (tiling_flags & RADEON_TILING_MACRO)
- word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
- else if (tiling_flags & RADEON_TILING_MICRO)
- word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ }
word1 = radeon_get_ib_value(p, idx + 1);
w0 = G_038000_TEX_WIDTH(word0) + 1;
h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ }
texture = reloc->robj;
/* tex mip base */
r = r600_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b316b301152..8227e76b5c7 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -611,7 +611,8 @@ struct radeon_cs_parser {
struct radeon_ib *ib;
void *track;
unsigned family;
- int parser_error;
+ int parser_error;
+ bool keep_tiling_flags;
};
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
@@ -784,8 +785,7 @@ struct radeon_pm_clock_info {
struct radeon_power_state {
enum radeon_pm_state_type type;
- /* XXX: use a define for num clock modes */
- struct radeon_pm_clock_info clock_info[8];
+ struct radeon_pm_clock_info *clock_info;
/* number of valid clock modes in this power state */
int num_clock_modes;
struct radeon_pm_clock_info *default_clock_mode;
@@ -855,6 +855,9 @@ struct radeon_pm {
struct device *int_hwmon_dev;
};
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+ int instance);
/*
* Benchmarking
@@ -1142,6 +1145,48 @@ struct r600_vram_scratch {
u64 gpu_addr;
};
+
+/*
+ * Mutex which allows recursive locking from the same process.
+ */
+struct radeon_mutex {
+ struct mutex mutex;
+ struct task_struct *owner;
+ int level;
+};
+
+static inline void radeon_mutex_init(struct radeon_mutex *mutex)
+{
+ mutex_init(&mutex->mutex);
+ mutex->owner = NULL;
+ mutex->level = 0;
+}
+
+static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
+{
+ if (mutex_trylock(&mutex->mutex)) {
+ /* The mutex was unlocked before, so it's ours now */
+ mutex->owner = current;
+ } else if (mutex->owner != current) {
+ /* Another process locked the mutex, take it */
+ mutex_lock(&mutex->mutex);
+ mutex->owner = current;
+ }
+ /* Otherwise the mutex was already locked by this process */
+
+ mutex->level++;
+}
+
+static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
+{
+ if (--mutex->level > 0)
+ return;
+
+ mutex->owner = NULL;
+ mutex_unlock(&mutex->mutex);
+}
+
+
/*
* Core structure, functions and helpers.
*/
@@ -1197,7 +1242,7 @@ struct radeon_device {
struct radeon_gem gem;
struct radeon_pm pm;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
- struct mutex cs_mutex;
+ struct radeon_mutex cs_mutex;
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
bool gpu_lockup;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3f6636bb2d7..3516a6081dc 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+ DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+ acpi_format_exception(status));
kfree(buffer.pointer);
return 1;
}
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
acpi_handle handle;
int ret;
- /* No need to proceed if we're sure that ATIF is not supported */
- if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
- return 0;
-
/* Get the device handle */
handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ /* No need to proceed if we're sure that ATIF is not supported */
+ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+ return 0;
+
/* Call the ATIF method */
ret = radeon_atif_call(handle);
if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e2944566ffe..a2e1eae114e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = {
.pm_misc = &evergreen_pm_misc,
.pm_prepare = &evergreen_pm_prepare,
.pm_finish = &evergreen_pm_finish,
- .pm_init_profile = &rs780_pm_init_profile,
+ .pm_init_profile = &sumo_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
.pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 85f14f0337e..59914842a72 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -413,6 +413,7 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
+extern void sumo_pm_init_profile(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 08d0b94332e..d24baf30efc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -62,6 +62,87 @@ union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
};
+static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
+ ATOM_GPIO_I2C_ASSIGMENT *gpio,
+ u8 index)
+{
+ /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+ if ((rdev->family == CHIP_R420) ||
+ (rdev->family == CHIP_R423) ||
+ (rdev->family == CHIP_RV410)) {
+ if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+ gpio->ucClkMaskShift = 0x19;
+ gpio->ucDataMaskShift = 0x18;
+ }
+ }
+
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((index == 7) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((index == 4) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+}
+
+static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
+{
+ struct radeon_i2c_bus_rec i2c;
+
+ memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ if (i2c.mask_clk_reg)
+ i2c.valid = true;
+ else
+ i2c.valid = false;
+
+ return i2c;
+}
+
static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
uint8_t id)
{
@@ -85,59 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
- /* some evergreen boards have bad data for this entry */
- if (ASIC_IS_DCE4(rdev)) {
- if ((i == 7) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
- (gpio->sucI2cId.ucAccess == 0)) {
- gpio->sucI2cId.ucAccess = 0x97;
- gpio->ucDataMaskShift = 8;
- gpio->ucDataEnShift = 8;
- gpio->ucDataY_Shift = 8;
- gpio->ucDataA_Shift = 8;
- }
- }
-
- /* some DCE3 boards have bad data for this entry */
- if (ASIC_IS_DCE3(rdev)) {
- if ((i == 4) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
- (gpio->sucI2cId.ucAccess == 0x94))
- gpio->sucI2cId.ucAccess = 0x14;
- }
+ radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
if (gpio->sucI2cId.ucAccess == id) {
- i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
- i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
- i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
- i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
- i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
- i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
- i2c.en_data_mask = (1 << gpio->ucDataEnShift);
- i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
- i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
-
- if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
- i2c.hw_capable = true;
- else
- i2c.hw_capable = false;
-
- if (gpio->sucI2cId.ucAccess == 0xa0)
- i2c.mm_i2c = true;
- else
- i2c.mm_i2c = false;
-
- i2c.i2c_id = gpio->sucI2cId.ucAccess;
-
- if (i2c.mask_clk_reg)
- i2c.valid = true;
+ i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
break;
}
}
@@ -157,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
int i, num_indices;
char stmp[32];
- memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
-
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
@@ -167,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
- i2c.valid = false;
-
- /* some evergreen boards have bad data for this entry */
- if (ASIC_IS_DCE4(rdev)) {
- if ((i == 7) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
- (gpio->sucI2cId.ucAccess == 0)) {
- gpio->sucI2cId.ucAccess = 0x97;
- gpio->ucDataMaskShift = 8;
- gpio->ucDataEnShift = 8;
- gpio->ucDataY_Shift = 8;
- gpio->ucDataA_Shift = 8;
- }
- }
- /* some DCE3 boards have bad data for this entry */
- if (ASIC_IS_DCE3(rdev)) {
- if ((i == 4) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
- (gpio->sucI2cId.ucAccess == 0x94))
- gpio->sucI2cId.ucAccess = 0x14;
- }
+ radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
- i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
- i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
- i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
- i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
- i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
- i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
- i2c.en_data_mask = (1 << gpio->ucDataEnShift);
- i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
- i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
-
- if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
- i2c.hw_capable = true;
- else
- i2c.hw_capable = false;
-
- if (gpio->sucI2cId.ucAccess == 0xa0)
- i2c.mm_i2c = true;
- else
- i2c.mm_i2c = false;
+ i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
- i2c.i2c_id = gpio->sucI2cId.ucAccess;
-
- if (i2c.mask_clk_reg) {
- i2c.valid = true;
+ if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
@@ -1996,10 +1978,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
+ rdev->pm.power_state[state_index].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (!rdev->pm.power_state[state_index].clock_info)
+ return state_index;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
case 1:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2035,7 +2021,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
state_index++;
break;
case 2:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2072,7 +2057,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
state_index++;
break;
case 3:
- rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2257,7 +2241,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
- if (ASIC_IS_DCE5(rdev)) {
+ if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
/* NI chips post without MC ucode, so default clocks are strobe mode only */
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2377,17 +2361,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
- for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
- clock_info = (union pplib_clock_info *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
- valid = radeon_atombios_parse_pplib_clock_info(rdev,
- state_index, mode_index,
- clock_info);
- if (valid)
- mode_index++;
+ rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
+ ((power_info->pplib.ucStateEntrySize - 1) ?
+ (power_info->pplib.ucStateEntrySize - 1) : 1),
+ GFP_KERNEL);
+ if (!rdev->pm.power_state[i].clock_info)
+ return state_index;
+ if (power_info->pplib.ucStateEntrySize - 1) {
+ for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+ clock_info = (union pplib_clock_info *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+ (power_state->v1.ucClockStateIndices[j] *
+ power_info->pplib.ucClockInfoSize));
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ } else {
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ rdev->clock.default_sclk;
+ mode_index++;
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
@@ -2456,18 +2454,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
- /* XXX this might be an inagua bug... */
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
- clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index];
- valid = radeon_atombios_parse_pplib_clock_info(rdev,
- state_index, mode_index,
- clock_info);
- if (valid)
- mode_index++;
+ rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
+ (power_state->v2.ucNumDPMLevels ?
+ power_state->v2.ucNumDPMLevels : 1),
+ GFP_KERNEL);
+ if (!rdev->pm.power_state[i].clock_info)
+ return state_index;
+ if (power_state->v2.ucNumDPMLevels) {
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = power_state->v2.clockInfoIndex[j];
+ /* XXX this might be an inagua bug... */
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index];
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ state_index, mode_index,
+ clock_info);
+ if (valid)
+ mode_index++;
+ }
+ } else {
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ rdev->clock.default_sclk;
+ mode_index++;
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
@@ -2524,19 +2536,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
} else {
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
- /* add the default mode */
- rdev->pm.power_state[state_index].type =
- POWER_STATE_TYPE_DEFAULT;
- rdev->pm.power_state[state_index].num_clock_modes = 1;
- rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
- rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
- rdev->pm.power_state[state_index].default_clock_mode =
- &rdev->pm.power_state[state_index].clock_info[0];
- rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- rdev->pm.power_state[state_index].pcie_lanes = 16;
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.power_state[state_index].flags = 0;
- state_index++;
+ rdev->pm.power_state[0].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (rdev->pm.power_state[0].clock_info) {
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].flags = 0;
+ state_index++;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 5cafc90de7f..17e1a9b2d8f 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -98,7 +98,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
struct radeon_bo *sobj = NULL;
uint64_t saddr, daddr;
int r, n;
- unsigned int time;
+ int time;
n = RADEON_BENCHMARK_ITERATIONS;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 8bf83c4b414..81fc100be7e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2563,14 +2563,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
/* allocate 2 power states */
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
- if (!rdev->pm.power_state) {
- rdev->pm.default_power_state_index = state_index;
- rdev->pm.num_power_states = 0;
-
- rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
- rdev->pm.current_clock_mode_index = 0;
- return;
- }
+ if (rdev->pm.power_state) {
+ /* allocate 1 clock mode per state */
+ rdev->pm.power_state[0].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ rdev->pm.power_state[1].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (!rdev->pm.power_state[0].clock_info ||
+ !rdev->pm.power_state[1].clock_info)
+ goto pm_failed;
+ } else
+ goto pm_failed;
/* check for a thermal chip */
offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
@@ -2735,6 +2738,14 @@ default_mode:
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
+ return;
+
+pm_failed:
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.num_power_states = 0;
+
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
}
void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index fae00c0d75a..29afd71e084 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
- unsigned size, i;
+ unsigned size, i, flags = 0;
if (!cs->num_chunks) {
return 0;
@@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
+ !p->chunks[i].length_dw) {
+ return -EINVAL;
+ }
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[i].user_ptr, size)) {
return -EFAULT;
}
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ flags = p->chunks[i].kdata[0];
+ }
} else {
p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
+
+ p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
return 0;
}
@@ -222,7 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_cs_chunk *ib_chunk;
int r;
- mutex_lock(&rdev->cs_mutex);
+ radeon_mutex_lock(&rdev->cs_mutex);
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
@@ -233,14 +242,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_ib_get(rdev, &parser.ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_parser_relocs(&parser);
@@ -248,7 +257,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
/* Copy the packet into the IB, the parser will read from the
@@ -260,14 +269,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r || parser.parser_error) {
DRM_ERROR("Invalid command stream !\n");
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_finish_pages(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_ib_schedule(rdev, parser.ib);
@@ -275,7 +284,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_ERROR("Failed to schedule IB !\n");
}
radeon_cs_parser_fini(&parser, r);
- mutex_unlock(&rdev->cs_mutex);
+ radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c33bc914d93..c4d00a17141 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
- mutex_init(&rdev->cs_mutex);
+ radeon_mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
mutex_init(&rdev->dc_hw_i2c_mutex);
@@ -955,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int r;
int resched;
+ /* Prevent CS ioctl from interfering */
+ radeon_mutex_lock(&rdev->cs_mutex);
+
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -967,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_restore_bios_scratch_regs(rdev);
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
- return 0;
}
- /* bad news, how to tell it to userspace ? */
- dev_info(rdev->dev, "GPU reset failed\n");
+
+ radeon_mutex_unlock(&rdev->cs_mutex);
+
+ if (r) {
+ /* bad news, how to tell it to userspace ? */
+ dev_info(rdev->dev, "GPU reset failed\n");
+ }
+
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index a0b35e90948..71499fc3daf 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -53,9 +53,10 @@
* 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
* 2.10.0 - fusion 2D tiling
* 2.11.0 - backend map, initial compute support for the CS checker
+ * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 11
+#define KMS_DRIVER_MINOR 12
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 06e413e6a92..4b27efa4405 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
- return true;
+ return radeon_encoder->encoder_id;
default:
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
}
-
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 41a5d48e657..daadf211104 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- /* adjust pm to upcoming mode change */
- radeon_pm_compute_clocks(rdev);
-
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6fabe89fa6a..78a665bd951 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev);
#define ACPI_AC_CLASS "ac_adapter"
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+ int instance)
+{
+ int i;
+ int found_instance = -1;
+
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].type == ps_type) {
+ found_instance++;
+ if (found_instance == instance)
+ return i;
+ }
+ }
+ /* return default if no match */
+ return rdev->pm.default_power_state_index;
+}
+
#ifdef CONFIG_ACPI
static int radeon_acpi_event(struct notifier_block *nb,
unsigned long val,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 481b99e89f6..b1053d64042 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a983f410ab8..23ae1c60ab3 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */