summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-05-22 00:36:56 -0600
committerGrant Likely <grant.likely@secretlab.ca>2010-05-22 00:36:56 -0600
commitcf9b59e9d3e008591d1f54830f570982bb307a0d (patch)
tree113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/gpu/drm
parent44504b2bebf8b5823c59484e73096a7d6574471d (diff)
parentf4b87dee923342505e1ddba8d34ce9de33e75050 (diff)
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and build failures in vio.c after merge. Conflicts: drivers/i2c/busses/i2c-cpm.c drivers/i2c/busses/i2c-mpc.c drivers/net/gianfar.c Also fixed up one line in arch/powerpc/kernel/vio.c to use the correct node pointer. Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_bufs.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c9
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c490
-rw-r--r--drivers/gpu/drm/drm_dma.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c802
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c908
-rw-r--r--drivers/gpu/drm/drm_fops.c3
-rw-r--r--drivers/gpu/drm/drm_gem.c49
-rw-r--r--drivers/gpu/drm/drm_irq.c1
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c105
-rw-r--r--drivers/gpu/drm/drm_stub.c4
-rw-r--r--drivers/gpu/drm/drm_sysfs.c28
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h10
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c46
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c44
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c38
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c32
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c34
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h48
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c303
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c27
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c39
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c54
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h156
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c41
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h88
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c93
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1079
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c256
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h31
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c103
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c217
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c71
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c111
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c21
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1009
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c185
-rw-r--r--drivers/gpu/drm/nouveau/Makefile3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c500
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c116
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c259
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c58
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c566
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_calc.c87
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c15
-rw-r--r--drivers/gpu/drm/radeon/atombios.h82
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c22
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1549
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h556
-rw-r--r--drivers/gpu/drm/radeon/r100.c731
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h2
-rw-r--r--drivers/gpu/drm/radeon/r100d.h164
-rw-r--r--drivers/gpu/drm/radeon/r300.c158
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r300d.h47
-rw-r--r--drivers/gpu/drm/radeon/r420.c42
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/r520.c7
-rw-r--r--drivers/gpu/drm/radeon/r600.c600
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c58
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c65
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h57
-rw-r--r--drivers/gpu/drm/radeon/radeon.h258
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c140
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c246
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c137
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c358
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h67
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h51
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c814
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c122
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c231
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h80
-rw-r--r--drivers/gpu/drm/radeon/rs690.c289
-rw-r--r--drivers/gpu/drm/radeon/rv515.c287
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h46
-rw-r--r--drivers/gpu/drm/radeon/rv770.c28
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c3
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c128
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c122
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c845
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c44
-rw-r--r--drivers/gpu/drm/via/via_video.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
147 files changed, 12368 insertions, 5441 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 305c5900396..88910e5a2c7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,7 @@ menuconfig DRM
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
+ select SLOW_WORK
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -59,6 +60,7 @@ config DRM_RADEON
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
+ select POWER_SUPPLY
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 932b5aa96a6..3f46772f0cb 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
entry->priv = priv;
entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index f7ba82ebf65..2092e7bb788 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -961,7 +961,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
- /* No allocations failed, so now we can replace the orginal pagelist
+ /* No allocations failed, so now we can replace the original pagelist
* with the new one.
*/
if (dma->page_count) {
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 61b9bcfdf04..994d23beeb1 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -34,6 +34,7 @@
#include "drm.h"
#include "drmP.h"
#include "drm_crtc.h"
+#include "drm_edid.h"
struct drm_prop_enum_list {
int type;
@@ -494,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
- kfree(connector->fb_helper_private);
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
@@ -858,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev)
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
- INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
@@ -2350,7 +2349,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
- int ret = 0;
+ int ret = 0, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -2362,7 +2361,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
return ret;
}
- connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+ size = EDID_LENGTH * (1 + edid->extensions);
+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+ size, edid);
ret = drm_connector_property_set_value(connector,
dev->mode_config.edid_property,
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 51103aa469f..76440195104 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
}
/**
- * drm_helper_probe_connector_modes - get complete set of display modes
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
* @dev: DRM device
* @maxX: max width for modes
* @maxY: max height for modes
@@ -154,21 +154,6 @@ prune:
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
-int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
- uint32_t maxY)
-{
- struct drm_connector *connector;
- int count = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- count += drm_helper_probe_single_connector_modes(connector,
- maxX, maxY);
- }
-
- return count;
-}
-EXPORT_SYMBOL(drm_helper_probe_connector_modes);
-
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
@@ -263,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
-static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_display_mode *mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- if (drm_mode_width(mode) > width ||
- drm_mode_height(mode) > height)
- continue;
- if (mode->type & DRM_MODE_TYPE_PREFERRED)
- return mode;
- }
- return NULL;
-}
-
-static bool drm_has_cmdline_mode(struct drm_connector *connector)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
-
- if (!fb_help_conn)
- return false;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- return cmdline_mode->specified;
-}
-
-static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
-{
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
- struct drm_fb_helper_cmdline_mode *cmdline_mode;
- struct drm_display_mode *mode = NULL;
-
- if (!fb_help_conn)
- return mode;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
- if (cmdline_mode->specified == false)
- return mode;
-
- /* attempt to find a matching mode in the list of modes
- * we have gotten so far, if not add a CVT mode that conforms
- */
- if (cmdline_mode->rb || cmdline_mode->margins)
- goto create_mode;
-
- list_for_each_entry(mode, &connector->modes, head) {
- /* check width/height */
- if (mode->hdisplay != cmdline_mode->xres ||
- mode->vdisplay != cmdline_mode->yres)
- continue;
-
- if (cmdline_mode->refresh_specified) {
- if (mode->vrefresh != cmdline_mode->refresh)
- continue;
- }
-
- if (cmdline_mode->interlace) {
- if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
- continue;
- }
- return mode;
- }
-
-create_mode:
- mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
- cmdline_mode->yres,
- cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
- cmdline_mode->rb, cmdline_mode->interlace,
- cmdline_mode->margins);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- list_add(&mode->head, &connector->modes);
- return mode;
-}
-
-static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
-{
- bool enable;
-
- if (strict) {
- enable = connector->status == connector_status_connected;
- } else {
- enable = connector->status != connector_status_disconnected;
- }
- return enable;
-}
-
-static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
-{
- bool any_enabled = false;
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, true);
- DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
- any_enabled |= enabled[i];
- i++;
- }
-
- if (any_enabled)
- return;
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- enabled[i] = drm_connector_enabled(connector, false);
- i++;
- }
-}
-
-static bool drm_target_preferred(struct drm_device *dev,
- struct drm_display_mode **modes,
- bool *enabled, int width, int height)
-{
- struct drm_connector *connector;
- int i = 0;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-
- if (enabled[i] == false) {
- i++;
- continue;
- }
-
- DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
- connector->base.id);
-
- /* got for command line mode first */
- modes[i] = drm_pick_cmdline_mode(connector, width, height);
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- connector->base.id);
- modes[i] = drm_has_preferred_mode(connector, width, height);
- }
- /* No preferred modes, pick one off the list */
- if (!modes[i] && !list_empty(&connector->modes)) {
- list_for_each_entry(modes[i], &connector->modes, head)
- break;
- }
- DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
- "none");
- i++;
- }
- return true;
-}
-
-static int drm_pick_crtcs(struct drm_device *dev,
- struct drm_crtc **best_crtcs,
- struct drm_display_mode **modes,
- int n, int width, int height)
-{
- int c, o;
- struct drm_connector *connector;
- struct drm_connector_helper_funcs *connector_funcs;
- struct drm_encoder *encoder;
- struct drm_crtc *best_crtc;
- int my_score, best_score, score;
- struct drm_crtc **crtcs, *crtc;
-
- if (n == dev->mode_config.num_connector)
- return 0;
- c = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (c == n)
- break;
- c++;
- }
-
- best_crtcs[n] = NULL;
- best_crtc = NULL;
- best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
- if (modes[n] == NULL)
- return best_score;
-
- crtcs = kmalloc(dev->mode_config.num_connector *
- sizeof(struct drm_crtc *), GFP_KERNEL);
- if (!crtcs)
- return best_score;
-
- my_score = 1;
- if (connector->status == connector_status_connected)
- my_score++;
- if (drm_has_cmdline_mode(connector))
- my_score++;
- if (drm_has_preferred_mode(connector, width, height))
- my_score++;
-
- connector_funcs = connector->helper_private;
- encoder = connector_funcs->best_encoder(connector);
- if (!encoder)
- goto out;
-
- connector->encoder = encoder;
-
- /* select a crtc for this connector and then attempt to configure
- remaining connectors */
- c = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- if ((encoder->possible_crtcs & (1 << c)) == 0) {
- c++;
- continue;
- }
-
- for (o = 0; o < n; o++)
- if (best_crtcs[o] == crtc)
- break;
-
- if (o < n) {
- /* ignore cloning for now */
- c++;
- continue;
- }
-
- crtcs[n] = crtc;
- memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
- score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
- width, height);
- if (score > best_score) {
- best_crtc = crtc;
- best_score = score;
- memcpy(best_crtcs, crtcs,
- dev->mode_config.num_connector *
- sizeof(struct drm_crtc *));
- }
- c++;
- }
-out:
- kfree(crtcs);
- return best_score;
-}
-
-static void drm_setup_crtcs(struct drm_device *dev)
-{
- struct drm_crtc **crtcs;
- struct drm_display_mode **modes;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- bool *enabled;
- int width, height;
- int i, ret;
-
- DRM_DEBUG_KMS("\n");
-
- width = dev->mode_config.max_width;
- height = dev->mode_config.max_height;
-
- /* clean out all the encoder/crtc combos */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder->crtc = NULL;
- }
-
- crtcs = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_crtc *), GFP_KERNEL);
- modes = kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_display_mode *), GFP_KERNEL);
- enabled = kcalloc(dev->mode_config.num_connector,
- sizeof(bool), GFP_KERNEL);
-
- drm_enable_connectors(dev, enabled);
-
- ret = drm_target_preferred(dev, modes, enabled, width, height);
- if (!ret)
- DRM_ERROR("Unable to find initial modes\n");
-
- DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
-
- drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
-
- i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_display_mode *mode = modes[i];
- struct drm_crtc *crtc = crtcs[i];
-
- if (connector->encoder == NULL) {
- i++;
- continue;
- }
-
- if (mode && crtc) {
- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
- mode->name, crtc->base.id);
- crtc->desired_mode = mode;
- connector->encoder->crtc = crtc;
- } else {
- connector->encoder->crtc = NULL;
- connector->encoder = NULL;
- }
- i++;
- }
-
- kfree(crtcs);
- kfree(modes);
- kfree(enabled);
-}
-
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
@@ -936,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
ret = -EINVAL;
goto fail;
}
- /* TODO are these needed? */
- set->crtc->desired_x = set->x;
- set->crtc->desired_y = set->y;
- set->crtc->desired_mode = set->mode;
}
drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
@@ -984,63 +669,6 @@ fail:
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
-bool drm_helper_plugged_event(struct drm_device *dev)
-{
- DRM_DEBUG_KMS("\n");
-
- drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- /* FIXME: send hotplug event */
- return true;
-}
-/**
- * drm_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
- *
- * LOCKING:
- * Called at init time, must take mode config lock.
- *
- * Scan the CRTCs and connectors and try to put together an initial setup.
- * At the moment, this is a cloned configuration across all heads with
- * a new framebuffer object as the backing store.
- *
- * RETURNS:
- * Zero if everything went ok, nonzero otherwise.
- */
-bool drm_helper_initial_config(struct drm_device *dev)
-{
- int count = 0;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- drm_fb_helper_parse_command_line(dev);
-
- count = drm_helper_probe_connector_modes(dev,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
-
- /*
- * we shouldn't end up with no modes here.
- */
- if (count == 0)
- printk(KERN_INFO "No connectors reported connected with modes\n");
-
- drm_setup_crtcs(dev);
-
- /* alert the driver fb layer */
- dev->mode_config.funcs->fb_changed(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_initial_config);
-
static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
int dpms = DRM_MODE_DPMS_OFF;
@@ -1123,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
-/**
- * drm_hotplug_stage_two
- * @dev DRM device
- * @connector hotpluged connector
- *
- * LOCKING.
- * Caller must hold mode config lock, function might grab struct lock.
- *
- * Stage two of a hotplug.
- *
- * RETURNS:
- * Zero on success, errno on failure.
- */
-int drm_helper_hotplug_stage_two(struct drm_device *dev)
-{
- drm_helper_plugged_event(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
-
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd)
{
@@ -1200,3 +807,98 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
return 0;
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+static struct slow_work_ops output_poll_ops;
+
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void output_poll_execute(struct slow_work *work)
+{
+ struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
+ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
+ struct drm_connector *connector;
+ enum drm_connector_status old_status, status;
+ bool repoll = false, changed = false;
+ int ret;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ /* if this is HPD or polled don't check it -
+ TV out for instance */
+ if (!connector->polled)
+ continue;
+
+ else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
+ repoll = true;
+
+ old_status = connector->status;
+ /* if we are connected and don't want to poll for disconnect
+ skip it */
+ if (old_status == connector_status_connected &&
+ !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
+ !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ status = connector->funcs->detect(connector);
+ if (old_status != status)
+ changed = true;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed) {
+ /* send a uevent + call fbdev */
+ drm_sysfs_hotplug_event(dev);
+ if (dev->mode_config.funcs->output_poll_changed)
+ dev->mode_config.funcs->output_poll_changed(dev);
+ }
+
+ if (repoll) {
+ ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
+ if (ret)
+ DRM_ERROR("delayed enqueue failed %d\n", ret);
+ }
+}
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ bool poll = false;
+ int ret;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->polled)
+ poll = true;
+ }
+ slow_work_register_user(THIS_MODULE);
+ delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
+ &output_poll_ops);
+
+ if (poll) {
+ ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
+ if (ret)
+ DRM_ERROR("delayed enqueue failed %d\n", ret);
+ }
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_init);
+
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ slow_work_unregister_user(THIS_MODULE);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+
+void drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+ if (!dev->mode_config.poll_enabled)
+ return;
+ delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ /* schedule a slow work asap */
+ delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
+}
+EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+static struct slow_work_ops output_poll_ops = {
+ .execute = output_poll_execute,
+};
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 13f1537413f..252cbd74df0 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -47,12 +47,10 @@ int drm_dma_setup(struct drm_device *dev)
{
int i;
- dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
+ dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
- memset(dev->dma, 0, sizeof(*dev->dma));
-
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 18f41d7061f..f569ae88ab3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2,6 +2,7 @@
* Copyright (c) 2006 Luc Verhaegen (quirks list)
* Copyright (c) 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
*
* DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
* FB layer.
@@ -33,10 +34,9 @@
#include "drmP.h"
#include "drm_edid.h"
-/*
- * TODO:
- * - support EDID 1.4 (incl. CE blocks)
- */
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
/*
* EDID blocks out in the wild have a variety of bugs, try to collect
@@ -65,7 +65,8 @@
#define LEVEL_DMT 0
#define LEVEL_GTF 1
-#define LEVEL_CVT 2
+#define LEVEL_GTF2 2
+#define LEVEL_CVT 3
static struct edid_quirk {
char *vendor;
@@ -109,36 +110,38 @@ static struct edid_quirk {
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
};
+/*** DDC fetch and block validation ***/
-/* Valid EDID header has these bytes */
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
-/**
- * drm_edid_is_valid - sanity check EDID data
- * @edid: EDID data
- *
- * Sanity check the EDID block by looking at the header, the version number
- * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
- * valid.
+/*
+ * Sanity check the EDID block (base or extension). Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
*/
-bool drm_edid_is_valid(struct edid *edid)
+static bool
+drm_edid_block_valid(u8 *raw_edid)
{
- int i, score = 0;
+ int i;
u8 csum = 0;
- u8 *raw_edid = (u8 *)edid;
+ struct edid *edid = (struct edid *)raw_edid;
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
+ if (raw_edid[0] == 0x00) {
+ int score = 0;
- if (score == 8) ;
- else if (score >= 6) {
- DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
- memcpy(raw_edid, edid_header, sizeof(edid_header));
- } else
- goto bad;
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ if (score == 8) ;
+ else if (score >= 6) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else {
+ goto bad;
+ }
+ }
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
@@ -147,13 +150,21 @@ bool drm_edid_is_valid(struct edid *edid)
goto bad;
}
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
- goto bad;
- }
+ /* per-block-type checks */
+ switch (raw_edid[0]) {
+ case 0: /* base */
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ break;
+
+ default:
+ break;
+ }
return 1;
@@ -165,8 +176,158 @@ bad:
}
return 0;
}
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+ int i;
+ u8 *raw = (u8 *)edid;
+
+ if (!edid)
+ return false;
+
+ for (i = 0; i <= edid->extensions; i++)
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+ return false;
+
+ return true;
+}
EXPORT_SYMBOL(drm_edid_is_valid);
+#define DDC_ADDR 0x50
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf : EDID data buffer to be filled
+ * \param len : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+ int block, int len)
+{
+ unsigned char start = block * EDID_LENGTH;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf + start,
+ }
+ };
+
+ if (i2c_transfer(adapter, msgs, 2) == 2)
+ return 0;
+
+ return -1;
+}
+
+static u8 *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+ int i, j = 0;
+ u8 *block, *new;
+
+ if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block))
+ break;
+ }
+ if (i == 4)
+ goto carp;
+
+ /* if there's no extensions, we're done */
+ if (block[0x7e] == 0)
+ return block;
+
+ new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto out;
+ block = new;
+
+ for (j = 1; j <= block[0x7e]; j++) {
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, block, j,
+ EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(block + j * EDID_LENGTH))
+ break;
+ }
+ if (i == 4)
+ goto carp;
+ }
+
+ return block;
+
+carp:
+ dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n",
+ drm_get_connector_name(connector), j);
+
+out:
+ kfree(block);
+ return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+static bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+ unsigned char out;
+
+ return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible. If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid = NULL;
+
+ if (drm_probe_ddc(adapter))
+ edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+ connector->display_info.raw_edid = (char *)edid;
+
+ return edid;
+
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/*** EDID parsing ***/
+
/**
* edid_vendor - match a string against EDID's obfuscated vendor field
* @edid: EDID to match
@@ -335,7 +496,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1072, 1376, 0, 768, 769, 772, 808, 0,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
@@ -426,7 +587,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664,
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@85Hz */
@@ -497,8 +658,8 @@ static struct drm_display_mode drm_dmt_modes[] = {
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh)
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh)
{
int i;
struct drm_display_mode *ptr, *mode;
@@ -516,6 +677,111 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
}
return mode;
}
+EXPORT_SYMBOL(drm_mode_find_dmt);
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+ int i;
+ struct edid *edid = (struct edid *)raw_edid;
+
+ if (edid == NULL)
+ return;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+ cb(&(edid->detailed_timings[i]), closure);
+
+ /* XXX extension block walk */
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+ if (r[15] & 0x10)
+ *(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+ if (edid->revision >= 4) {
+ bool ret;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+
+ return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+ u8 *r = (u8 *)t;
+ if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+ *(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+ u8 *r = NULL;
+ drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+ return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+ if (edid->revision >= 2) {
+ if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+ return LEVEL_CVT;
+ if (drm_gtf2_hbreak(edid))
+ return LEVEL_GTF2;
+ return LEVEL_GTF;
+ }
+ return LEVEL_DMT;
+}
/*
* 0 is reserved. The spec says 0x01 fill for unused timings. Some old
@@ -536,22 +802,20 @@ bad_std_timing(u8 a, u8 b)
*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
- *
- * Punts for now, but should eventually use the FB layer's CVT based mode
- * generation code.
*/
-struct drm_display_mode *drm_mode_std(struct drm_device *dev,
- struct std_timing *t,
- int revision,
- int timing_level)
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+ struct std_timing *t, int revision)
{
- struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *m, *mode = NULL;
int hsize, vsize;
int vrefresh_rate;
unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
>> EDID_TIMING_ASPECT_SHIFT;
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
>> EDID_TIMING_VFREQ_SHIFT;
+ int timing_level = standard_timing_level(edid);
if (bad_std_timing(t->hsize, t->vfreq_aspect))
return NULL;
@@ -572,18 +836,38 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
vsize = (hsize * 4) / 5;
else
vsize = (hsize * 9) / 16;
- /* HDTV hack */
- if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
- mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+
+ /* HDTV hack, part 1 */
+ if (vrefresh_rate == 60 &&
+ ((hsize == 1360 && vsize == 765) ||
+ (hsize == 1368 && vsize == 769))) {
+ hsize = 1366;
+ vsize = 768;
+ }
+
+ /*
+ * If this connector already has a mode for this size and refresh
+ * rate (because it came from detailed or CVT info), use that
+ * instead. This way we don't have to guess at interlace or
+ * reduced blanking.
+ */
+ list_for_each_entry(m, &connector->probed_modes, head)
+ if (m->hdisplay == hsize && m->vdisplay == vsize &&
+ drm_mode_vrefresh(m) == vrefresh_rate)
+ return NULL;
+
+ /* HDTV hack, part 2 */
+ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+ mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
false);
mode->hdisplay = 1366;
mode->vsync_start = mode->vsync_start - 1;
mode->vsync_end = mode->vsync_end - 1;
return mode;
}
- mode = NULL;
+
/* check whether it can be found in default mode table */
- mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate);
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
if (mode)
return mode;
@@ -593,6 +877,23 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
case LEVEL_GTF:
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
+ case LEVEL_GTF2:
+ /*
+ * This is potentially wrong if there's ever a monitor with
+ * more than one ranges section, each claiming a different
+ * secondary GTF curve. Please don't do that.
+ */
+ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+ kfree(mode);
+ mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ vrefresh_rate, 0, 0,
+ drm_gtf2_m(edid),
+ drm_gtf2_2c(edid),
+ drm_gtf2_k(edid),
+ drm_gtf2_2j(edid));
+ }
+ break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
@@ -716,10 +1017,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
if (mode->vsync_end > mode->vtotal)
mode->vtotal = mode->vsync_end + 1;
- drm_mode_set_name(mode);
-
drm_mode_do_interlace_quirk(mode, pt);
+ drm_mode_set_name(mode);
+
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
@@ -802,10 +1103,6 @@ static struct drm_display_mode edid_est_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
-#define EDID_EST_TIMINGS 16
-#define EDID_STD_TIMINGS 8
-#define EDID_DETAILED_TIMINGS 4
-
/**
* add_established_modes - get est. modes from EDID and add them
* @edid: EDID block to scan
@@ -833,19 +1130,6 @@ static int add_established_modes(struct drm_connector *connector, struct edid *e
return modes;
}
-/**
- * stanard_timing_level - get std. timing level(CVT/GTF/DMT)
- * @edid: EDID block to scan
- */
-static int standard_timing_level(struct edid *edid)
-{
- if (edid->revision >= 2) {
- if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
- return LEVEL_CVT;
- return LEVEL_GTF;
- }
- return LEVEL_DMT;
-}
/**
* add_standard_modes - get std. modes from EDID and add them
@@ -856,22 +1140,14 @@ static int standard_timing_level(struct edid *edid)
*/
static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
- struct drm_device *dev = connector->dev;
int i, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct std_timing *t = &edid->standard_timings[i];
struct drm_display_mode *newmode;
- /* If std timings bytes are 1, 1 it's empty */
- if (t->hsize == 1 && t->vfreq_aspect == 1)
- continue;
-
- newmode = drm_mode_std(dev, &edid->standard_timings[i],
- edid->revision, timing_level);
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i],
+ edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -881,36 +1157,86 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
return modes;
}
-/*
- * XXX fix this for:
- * - GTF secondary curve formula
- * - EDID 1.4 range offsets
- * - CVT extended bits
- */
static bool
-mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+mode_is_rb(struct drm_display_mode *mode)
{
- struct detailed_data_monitor_range *range;
- int hsync, vrefresh;
-
- range = &timing->data.other_data.data.range;
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+static bool
+mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+ int hsync, hmin, hmax;
+
+ hmin = t[7];
+ if (edid->revision >= 4)
+ hmin += ((t[4] & 0x04) ? 255 : 0);
+ hmax = t[8];
+ if (edid->revision >= 4)
+ hmax += ((t[4] & 0x08) ? 255 : 0);
hsync = drm_mode_hsync(mode);
- vrefresh = drm_mode_vrefresh(mode);
- if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+ return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+{
+ int vsync, vmin, vmax;
+
+ vmin = t[5];
+ if (edid->revision >= 4)
+ vmin += ((t[4] & 0x01) ? 255 : 0);
+ vmax = t[6];
+ if (edid->revision >= 4)
+ vmax += ((t[4] & 0x02) ? 255 : 0);
+ vsync = drm_mode_vrefresh(mode);
+
+ return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+ /* unspecified */
+ if (t[9] == 0 || t[9] == 255)
+ return 0;
+
+ /* 1.4 with CVT support gives us real precision, yay */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+ /* 1.3 is pathetic, so fuzz up a bit */
+ return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ u32 max_clock;
+ u8 *t = (u8 *)timing;
+
+ if (!mode_in_hsync_range(mode, edid, t))
return false;
- if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+ if (!mode_in_vsync_range(mode, edid, t))
return false;
- if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
- /* be forgiving since it's in units of 10MHz */
- int max_clock = range->pixel_clock_mhz * 10 + 9;
- max_clock *= 1000;
+ if ((max_clock = range_pixel_clock(edid, t)))
if (mode->clock > max_clock)
return false;
- }
+
+ /* 1.4 max horizontal check */
+ if (edid->revision >= 4 && t[10] == 0x04)
+ if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+ return false;
+
+ if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+ return false;
return true;
}
@@ -919,15 +1245,16 @@ mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
-static int drm_gtf_modes_for_range(struct drm_connector *connector,
- struct detailed_timing *timing)
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, timing)) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
@@ -988,13 +1315,100 @@ static int drm_cvt_modes(struct drm_connector *connector,
return modes;
}
+static const struct {
+ short w;
+ short h;
+ short r;
+ short rb;
+} est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+ int i, j, m, modes = 0;
+ struct drm_display_mode *mode;
+ u8 *est = ((u8 *)timing) + 5;
+
+ for (i = 0; i < 6; i++) {
+ for (j = 7; j > 0; j--) {
+ m = (i * 8) + (7 - j);
+ if (m >= num_est3_modes)
+ break;
+ if (est[i] & (1 << j)) {
+ mode = drm_mode_find_dmt(connector->dev,
+ est3_modes[m].w,
+ est3_modes[m].h,
+ est3_modes[m].r
+ /*, est3_modes[m].rb */);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
static int add_detailed_modes(struct drm_connector *connector,
struct detailed_timing *timing,
struct edid *edid, u32 quirks, int preferred)
{
int i, modes = 0;
struct detailed_non_pixel *data = &timing->data.other_data;
- int timing_level = standard_timing_level(edid);
int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
@@ -1015,7 +1429,8 @@ static int add_detailed_modes(struct drm_connector *connector,
switch (data->type) {
case EDID_DETAIL_MONITOR_RANGE:
if (gtf)
- modes += drm_gtf_modes_for_range(connector, timing);
+ modes += drm_gtf_modes_for_range(connector, edid,
+ timing);
break;
case EDID_DETAIL_STD_MODES:
/* Six modes per detailed section */
@@ -1024,8 +1439,8 @@ static int add_detailed_modes(struct drm_connector *connector,
struct drm_display_mode *newmode;
std = &data->data.timings[i];
- newmode = drm_mode_std(dev, std, edid->revision,
- timing_level);
+ newmode = drm_mode_std(connector, edid, std,
+ edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@@ -1035,6 +1450,9 @@ static int add_detailed_modes(struct drm_connector *connector,
case EDID_DETAIL_CVT_3BYTE:
modes += drm_cvt_modes(connector, timing);
break;
+ case EDID_DETAIL_EST_TIMINGS:
+ modes += drm_est3_modes(connector, timing);
+ break;
default:
break;
}
@@ -1058,7 +1476,10 @@ static int add_detailed_info(struct drm_connector *connector,
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
- int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+ int preferred = (i == 0);
+
+ if (preferred && edid->version == 1 && edid->revision < 4)
+ preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
/* In 1.0, only timings are allowed */
if (!timing->pixel_clock && edid->version == 1 &&
@@ -1088,39 +1509,22 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
int i, modes = 0;
char *edid_ext = NULL;
struct detailed_timing *timing;
- int edid_ext_num;
int start_offset, end_offset;
- int timing_level;
- if (edid->version == 1 && edid->revision < 3) {
- /* If the EDID version is less than 1.3, there is no
- * extension EDID.
- */
+ if (edid->version == 1 && edid->revision < 3)
return 0;
- }
- if (!edid->extensions) {
- /* if there is no extension EDID, it is unnecessary to
- * parse the E-EDID to get detailed info
- */
+ if (!edid->extensions)
return 0;
- }
-
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
- DRM_MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
+ for (i = 0; i < edid->extensions; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- /* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
- if (i == edid_ext_num) {
- /* if there is no additional timing EDID block, return */
+ if (i == edid->extensions)
return 0;
- }
/* Get the start offset of detailed timing block */
start_offset = edid_ext[2];
@@ -1132,7 +1536,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
return 0;
}
- timing_level = standard_timing_level(edid);
end_offset = EDID_LENGTH;
end_offset -= sizeof(struct detailed_timing);
for (i = start_offset; i < end_offset;
@@ -1144,123 +1547,6 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
return modes;
}
-#define DDC_ADDR 0x50
-/**
- * Get EDID information via I2C.
- *
- * \param adapter : i2c device adaptor
- * \param buf : EDID data buffer to be filled
- * \param len : EDID data buffer length
- * \return 0 on success or -1 on failure.
- *
- * Try to fetch EDID information by calling i2c driver function.
- */
-int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
- unsigned char *buf, int len)
-{
- unsigned char start = 0x0;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
-
- return -1;
-}
-EXPORT_SYMBOL(drm_do_probe_ddc_edid);
-
-static int drm_ddc_read_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter,
- char *buf, int len)
-{
- int i;
-
- for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, buf, len))
- return -1;
- if (drm_edid_is_valid((struct edid *)buf))
- return 0;
- }
-
- /* repeated checksum failures; warn, but carry on */
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- return -1;
-}
-
-/**
- * drm_get_edid - get EDID data, if available
- * @connector: connector we're probing
- * @adapter: i2c adapter to use for DDC
- *
- * Poke the given connector's i2c channel to grab EDID data if possible.
- *
- * Return edid data or NULL if we couldn't find any.
- */
-struct edid *drm_get_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter)
-{
- int ret;
- struct edid *edid;
-
- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
- if (edid == NULL) {
- dev_warn(&connector->dev->pdev->dev,
- "Failed to allocate EDID\n");
- goto end;
- }
-
- /* Read first EDID block */
- ret = drm_ddc_read_edid(connector, adapter,
- (unsigned char *)edid, EDID_LENGTH);
- if (ret != 0)
- goto clean_up;
-
- /* There are EDID extensions to be read */
- if (edid->extensions != 0) {
- int edid_ext_num = edid->extensions;
-
- if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
- dev_warn(&connector->dev->pdev->dev,
- "The number of extension(%d) is "
- "over max (%d), actually read number (%d)\n",
- edid_ext_num, DRM_MAX_EDID_EXT_NUM,
- DRM_MAX_EDID_EXT_NUM);
- /* Reset EDID extension number to be read */
- edid_ext_num = DRM_MAX_EDID_EXT_NUM;
- }
- /* Read EDID including extensions too */
- ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
- EDID_LENGTH * (edid_ext_num + 1));
- if (ret != 0)
- goto clean_up;
-
- }
-
- connector->display_info.raw_edid = (char *)edid;
- goto end;
-
-clean_up:
- kfree(edid);
- edid = NULL;
-end:
- return edid;
-
-}
-EXPORT_SYMBOL(drm_get_edid);
-
#define HDMI_IDENTIFIER 0x000C03
#define VENDOR_BLOCK 0x03
/**
@@ -1273,7 +1559,7 @@ EXPORT_SYMBOL(drm_get_edid);
bool drm_detect_hdmi_monitor(struct edid *edid)
{
char *edid_ext = NULL;
- int i, hdmi_id, edid_ext_num;
+ int i, hdmi_id;
int start_offset, end_offset;
bool is_hdmi = false;
@@ -1281,19 +1567,15 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
if (edid == NULL || edid->extensions == 0)
goto end;
- /* Chose real EDID extension number */
- edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
- DRM_MAX_EDID_EXT_NUM : edid->extensions;
-
/* Find CEA extension */
- for (i = 0; i < edid_ext_num; i++) {
+ for (i = 0; i < edid->extensions; i++) {
edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
/* This block is CEA extension */
if (edid_ext[0] == 0x02)
break;
}
- if (i == edid_ext_num)
+ if (i == edid->extensions)
goto end;
/* Data block offset in CEA extension block */
@@ -1348,10 +1630,24 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
quirks = edid_get_quirks(edid);
- num_modes += add_established_modes(connector, edid);
- num_modes += add_standard_modes(connector, edid);
+ /*
+ * EDID spec says modes should be preferred in this order:
+ * - preferred detailed mode
+ * - other detailed modes from base block
+ * - detailed modes from extension blocks
+ * - CVT 3-byte code modes
+ * - standard timing codes
+ * - established timing codes
+ * - modes inferred from GTF or CVT range information
+ *
+ * We don't quite implement this yet, but we're close.
+ *
+ * XXX order for additional mode types in extension blocks?
+ */
num_modes += add_detailed_info(connector, edid, quirks);
num_modes += add_detailed_info_eedid(connector, edid, quirks);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 288ea2f3277..b3779d243ae 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -42,15 +42,33 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
-int drm_fb_helper_add_connector(struct drm_connector *connector)
+/* simple single crtc case helper function */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
- connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
- if (!connector->fb_helper_private)
- return -ENOMEM;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ int i;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ goto fail;
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ }
return 0;
+fail:
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ kfree(fb_helper->connector_info[i]);
+ fb_helper->connector_info[i] = NULL;
+ }
+ fb_helper->connector_count = 0;
+ return -ENOMEM;
}
-EXPORT_SYMBOL(drm_fb_helper_add_connector);
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
/**
* drm_fb_helper_connector_parse_command_line - parse command line for connector
@@ -65,7 +83,7 @@ EXPORT_SYMBOL(drm_fb_helper_add_connector);
*
* enable/enable Digital/disable bit at the end
*/
-static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
+static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_connector *fb_helper_conn,
const char *mode_option)
{
const char *name;
@@ -75,13 +93,13 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ struct drm_connector *connector = fb_helper_conn->connector;
- if (!fb_help_conn)
+ if (!fb_helper_conn)
return false;
- cmdline_mode = &fb_help_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
if (!mode_option)
mode_option = fb_mode_option;
@@ -204,18 +222,21 @@ done:
return true;
}
-int drm_fb_helper_parse_command_line(struct drm_device *dev)
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
- struct drm_connector *connector;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ for (i = 0; i < fb_helper->connector_count; i++) {
char *option = NULL;
+ fb_helper_conn = fb_helper->connector_info[i];
+
/* do something on return - turn off connector maybe */
- if (fb_get_options(drm_get_connector_name(connector), &option))
+ if (fb_get_options(drm_get_connector_name(fb_helper_conn->connector), &option))
continue;
- drm_fb_helper_connector_parse_command_line(connector, option);
+ drm_fb_helper_connector_parse_command_line(fb_helper_conn, option);
}
return 0;
}
@@ -293,6 +314,7 @@ static void drm_fb_helper_on(struct fb_info *info)
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_encoder *encoder;
int i;
@@ -300,33 +322,28 @@ static void drm_fb_helper_on(struct fb_info *info)
* For each CRTC in this fb, turn the crtc on then,
* find all associated encoders and turn them on.
*/
+ mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
+ if (!crtc->enabled)
+ continue;
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
}
}
+ mutex_unlock(&dev->mode_config.mutex);
}
static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
@@ -334,6 +351,7 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_encoder *encoder;
int i;
@@ -341,32 +359,26 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
* For each CRTC in this fb, find all associated encoders
* and turn them off, then turn off the CRTC.
*/
+ mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
- /* Only mess with CRTCs in this fb */
- if (crtc->base.id != fb_helper->crtc_info[i].crtc_id ||
- !crtc->enabled)
- continue;
+ if (!crtc->enabled)
+ continue;
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
- encoder_funcs->dpms(encoder, dpms_mode);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, dpms_mode);
}
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- mutex_unlock(&dev->mode_config.mutex);
}
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
+ mutex_unlock(&dev->mode_config.mutex);
}
int drm_fb_helper_blank(int blank, struct fb_info *info)
@@ -401,50 +413,81 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
+ for (i = 0; i < helper->connector_count; i++)
+ kfree(helper->connector_info[i]);
+ kfree(helper->connector_info);
for (i = 0; i < helper->crtc_count; i++)
kfree(helper->crtc_info[i].mode_set.connectors);
kfree(helper->crtc_info);
}
-int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, int max_conn_count)
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ int crtc_count, int max_conn_count)
{
- struct drm_device *dev = helper->dev;
struct drm_crtc *crtc;
int ret = 0;
int i;
- helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
- if (!helper->crtc_info)
+ fb_helper->dev = dev;
+
+ INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+ fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+ if (!fb_helper->crtc_info)
return -ENOMEM;
- helper->crtc_count = crtc_count;
+ fb_helper->crtc_count = crtc_count;
+ fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+ if (!fb_helper->connector_info) {
+ kfree(fb_helper->crtc_info);
+ return -ENOMEM;
+ }
+ fb_helper->connector_count = 0;
for (i = 0; i < crtc_count; i++) {
- helper->crtc_info[i].mode_set.connectors =
+ fb_helper->crtc_info[i].mode_set.connectors =
kcalloc(max_conn_count,
sizeof(struct drm_connector *),
GFP_KERNEL);
- if (!helper->crtc_info[i].mode_set.connectors) {
+ if (!fb_helper->crtc_info[i].mode_set.connectors) {
ret = -ENOMEM;
goto out_free;
}
- helper->crtc_info[i].mode_set.num_connectors = 0;
+ fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- helper->crtc_info[i].crtc_id = crtc->base.id;
- helper->crtc_info[i].mode_set.crtc = crtc;
+ fb_helper->crtc_info[i].crtc_id = crtc->base.id;
+ fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
- helper->conn_limit = max_conn_count;
+ fb_helper->conn_limit = max_conn_count;
return 0;
out_free:
- drm_fb_helper_crtc_free(helper);
+ drm_fb_helper_crtc_free(fb_helper);
return -ENOMEM;
}
-EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
+EXPORT_SYMBOL(drm_fb_helper_init);
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+ if (!list_empty(&fb_helper->kernel_fb_list)) {
+ list_del(&fb_helper->kernel_fb_list);
+ if (list_empty(&kernel_fb_helper_list)) {
+ printk(KERN_INFO "drm: unregistered panic notifier\n");
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &paniced);
+ unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+ }
+ }
+
+ drm_fb_helper_crtc_free(fb_helper);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fini);
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
@@ -508,20 +551,15 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
+ struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, rc = 0;
int start;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ crtc_funcs = crtc->helper_private;
red = cmap->red;
green = cmap->green;
@@ -549,41 +587,6 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
-int drm_fb_helper_setcolreg(unsigned regno,
- unsigned red,
- unsigned green,
- unsigned blue,
- unsigned transp,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
- int i;
- int ret;
-
- if (regno > 255)
- return 1;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
-
- ret = setcolreg(crtc, red, green, blue, regno, info);
- if (ret)
- return ret;
-
- crtc_funcs->load_lut(crtc);
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_setcolreg);
-
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@@ -687,23 +690,21 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
- if (i == fb_helper->crtc_count)
- continue;
-
- if (crtc->fb == fb_helper->crtc_info[i].mode_set.fb) {
- mutex_lock(&dev->mode_config.mutex);
- ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
+ if (ret) {
mutex_unlock(&dev->mode_config.mutex);
- if (ret)
- return ret;
+ return ret;
}
}
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (fb_helper->delayed_hotplug) {
+ fb_helper->delayed_hotplug = false;
+ drm_fb_helper_hotplug_event(fb_helper);
+ }
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -718,14 +719,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- for (i = 0; i < fb_helper->crtc_count; i++) {
- if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
- break;
- }
-
- if (i == fb_helper->crtc_count)
- continue;
+ mutex_lock(&dev->mode_config.mutex);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
modeset = &fb_helper->crtc_info[i].mode_set;
@@ -733,209 +729,138 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
modeset->y = var->yoffset;
if (modeset->num_connectors) {
- mutex_lock(&dev->mode_config.mutex);
ret = crtc->funcs->set_config(modeset);
- mutex_unlock(&dev->mode_config.mutex);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
}
}
}
+ mutex_unlock(&dev->mode_config.mutex);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-int drm_fb_helper_single_fb_probe(struct drm_device *dev,
- int preferred_bpp,
- int (*fb_create)(struct drm_device *dev,
- uint32_t fb_width,
- uint32_t fb_height,
- uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth,
- uint32_t surface_bpp,
- struct drm_framebuffer **fb_ptr))
+int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
{
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
- unsigned int surface_width = 0, surface_height = 0;
int new_fb = 0;
int crtc_count = 0;
- int ret, i, conn_count = 0;
+ int i;
struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_set *modeset = NULL;
- struct drm_fb_helper *fb_helper;
- uint32_t surface_depth = 24, surface_bpp = 32;
+ struct drm_fb_helper_surface_size sizes;
+ int gamma_size = 0;
+
+ memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
+ sizes.fb_width = (unsigned)-1;
+ sizes.fb_height = (unsigned)-1;
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
- if (preferred_bpp != surface_bpp) {
- surface_depth = surface_bpp = preferred_bpp;
+ if (preferred_bpp != sizes.surface_bpp) {
+ sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
}
/* first up get a count of crtcs now in use and new min/maxes width/heights */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
-
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_fb_helper_cmdline_mode *cmdline_mode;
- if (!fb_help_conn)
- continue;
-
- cmdline_mode = &fb_help_conn->cmdline_mode;
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
case 8:
- surface_depth = surface_bpp = 8;
+ sizes.surface_depth = sizes.surface_bpp = 8;
break;
case 15:
- surface_depth = 15;
- surface_bpp = 16;
+ sizes.surface_depth = 15;
+ sizes.surface_bpp = 16;
break;
case 16:
- surface_depth = surface_bpp = 16;
+ sizes.surface_depth = sizes.surface_bpp = 16;
break;
case 24:
- surface_depth = surface_bpp = 24;
+ sizes.surface_depth = sizes.surface_bpp = 24;
break;
case 32:
- surface_depth = 24;
- surface_bpp = 32;
+ sizes.surface_depth = 24;
+ sizes.surface_bpp = 32;
break;
}
break;
}
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (drm_helper_crtc_in_use(crtc)) {
- if (crtc->desired_mode) {
- if (crtc->desired_mode->hdisplay < fb_width)
- fb_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay < fb_height)
- fb_height = crtc->desired_mode->vdisplay;
-
- if (crtc->desired_mode->hdisplay > surface_width)
- surface_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay > surface_height)
- surface_height = crtc->desired_mode->vdisplay;
- }
+ crtc_count = 0;
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_display_mode *desired_mode;
+ desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+ if (desired_mode) {
+ if (gamma_size == 0)
+ gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+ if (desired_mode->hdisplay < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay;
+ if (desired_mode->vdisplay > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay;
crtc_count++;
}
}
- if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+ if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
/* hmm everyone went away - assume VGA cable just fell out
and will come back later. */
- return 0;
+ DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+ sizes.fb_width = sizes.surface_width = 1024;
+ sizes.fb_height = sizes.surface_height = 768;
}
- /* do we have an fb already? */
- if (list_empty(&dev->mode_config.fb_kernel_list)) {
- ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
- surface_height, surface_depth, surface_bpp,
- &fb);
- if (ret)
- return -EINVAL;
- new_fb = 1;
- } else {
- fb = list_first_entry(&dev->mode_config.fb_kernel_list,
- struct drm_framebuffer, filp_head);
-
- /* if someone hotplugs something bigger than we have already allocated, we are pwned.
- As really we can't resize an fbdev that is in the wild currently due to fbdev
- not really being designed for the lower layers moving stuff around under it.
- - so in the grand style of things - punt. */
- if ((fb->width < surface_width) ||
- (fb->height < surface_height)) {
- DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
- return -EINVAL;
- }
- }
-
- info = fb->fbdev;
- fb_helper = info->par;
-
- crtc_count = 0;
- /* okay we need to setup new connector sets in the crtcs */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- modeset = &fb_helper->crtc_info[crtc_count].mode_set;
- modeset->fb = fb;
- conn_count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder)
- if (connector->encoder->crtc == modeset->crtc) {
- modeset->connectors[conn_count] = connector;
- conn_count++;
- if (conn_count > fb_helper->conn_limit)
- BUG();
- }
- }
-
- for (i = conn_count; i < fb_helper->conn_limit; i++)
- modeset->connectors[i] = NULL;
+ /* push down into drivers */
+ new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (new_fb < 0)
+ return new_fb;
- modeset->crtc = crtc;
- crtc_count++;
+ info = fb_helper->fbdev;
- modeset->num_connectors = conn_count;
- if (modeset->crtc->desired_mode) {
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = drm_mode_duplicate(dev,
- modeset->crtc->desired_mode);
- }
+ /* set the fb pointer */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
}
- fb_helper->crtc_count = crtc_count;
- fb_helper->fb = fb;
if (new_fb) {
info->var.pixclock = 0;
- ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
- if (ret)
- return ret;
if (register_framebuffer(info) < 0) {
- fb_dealloc_cmap(&info->cmap);
return -EINVAL;
}
+
+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ info->fix.id);
+
} else {
drm_fb_helper_set_par(info);
}
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "registered panic notifier\n");
+ printk(KERN_INFO "drm: registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+ if (new_fb)
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
-void drm_fb_helper_free(struct drm_fb_helper *helper)
-{
- list_del(&helper->kernel_fb_list);
- if (list_empty(&kernel_fb_helper_list)) {
- printk(KERN_INFO "unregistered panic notifier\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &paniced);
- unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
- drm_fb_helper_crtc_free(helper);
- fb_dealloc_cmap(&helper->fb->fbdev->cmap);
-}
-EXPORT_SYMBOL(drm_fb_helper_free);
-
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
@@ -954,10 +879,11 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
- info->pseudo_palette = fb->pseudo_palette;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
@@ -1025,3 +951,457 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
info->var.yres = fb_height;
}
EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+ uint32_t maxX,
+ uint32_t maxY)
+{
+ struct drm_connector *connector;
+ int count = 0;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ count += connector->funcs->fill_modes(connector, maxX, maxY);
+ }
+
+ return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+ if (drm_mode_width(mode) > width ||
+ drm_mode_height(mode) > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ cmdline_mode = &fb_connector->cmdline_mode;
+ return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height)
+{
+ struct drm_fb_helper_cmdline_mode *cmdline_mode;
+ struct drm_display_mode *mode = NULL;
+
+ cmdline_mode = &fb_helper_conn->cmdline_mode;
+ if (cmdline_mode->specified == false)
+ return mode;
+
+ /* attempt to find a matching mode in the list of modes
+ * we have gotten so far, if not add a CVT mode that conforms
+ */
+ if (cmdline_mode->rb || cmdline_mode->margins)
+ goto create_mode;
+
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ /* check width/height */
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ if (mode->vrefresh != cmdline_mode->refresh)
+ continue;
+ }
+
+ if (cmdline_mode->interlace) {
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ continue;
+ }
+ return mode;
+ }
+
+create_mode:
+ mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres,
+ cmdline_mode->yres,
+ cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+ cmdline_mode->rb, cmdline_mode->interlace,
+ cmdline_mode->margins);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ list_add(&mode->head, &fb_helper_conn->connector->modes);
+ return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (strict) {
+ enable = connector->status == connector_status_connected;
+ } else {
+ enable = connector->status != connector_status_disconnected;
+ }
+ return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+ bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, true);
+ DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+ enabled[i] ? "yes" : "no");
+ any_enabled |= enabled[i];
+ }
+
+ if (any_enabled)
+ return;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ connector = fb_helper->connector_info[i]->connector;
+ enabled[i] = drm_connector_enabled(connector, false);
+ }
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ int count, i, j;
+ bool can_clone = false;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ struct drm_display_mode *dmt_mode, *mode;
+
+ /* only contemplate cloning in the single crtc case */
+ if (fb_helper->crtc_count > 1)
+ return false;
+
+ count = 0;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (enabled[i])
+ count++;
+ }
+
+ /* only contemplate cloning if more than one connector is enabled */
+ if (count <= 1)
+ return false;
+
+ /* check the command line or if nothing common pick 1024x768 */
+ can_clone = true;
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (!enabled[i])
+ continue;
+ fb_helper_conn = fb_helper->connector_info[i];
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ can_clone = false;
+ break;
+ }
+ for (j = 0; j < i; j++) {
+ if (!enabled[j])
+ continue;
+ if (!drm_mode_equal(modes[j], modes[i]))
+ can_clone = false;
+ }
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using command line\n");
+ return true;
+ }
+
+ /* try and find a 1024x768 mode on each connector */
+ can_clone = true;
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+
+ if (!enabled[i])
+ continue;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+ if (drm_mode_equal(mode, dmt_mode))
+ modes[i] = mode;
+ }
+ if (!modes[i])
+ can_clone = false;
+ }
+
+ if (can_clone) {
+ DRM_DEBUG_KMS("can clone using 1024x768\n");
+ return true;
+ }
+ DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+ return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+
+ if (enabled[i] == false)
+ continue;
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+
+ /* got for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+ fb_helper_conn->connector->base.id);
+ modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+ }
+ /* No preferred modes, pick one off the list */
+ if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+ list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+ break;
+ }
+ DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+ "none");
+ }
+ return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ int c, o;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *best_crtc;
+ int my_score, best_score, score;
+ struct drm_fb_helper_crtc **crtcs, *crtc;
+ struct drm_fb_helper_connector *fb_helper_conn;
+
+ if (n == fb_helper->connector_count)
+ return 0;
+
+ fb_helper_conn = fb_helper->connector_info[n];
+ connector = fb_helper_conn->connector;
+
+ best_crtcs[n] = NULL;
+ best_crtc = NULL;
+ best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ if (!crtcs)
+ return best_score;
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (drm_has_cmdline_mode(fb_helper_conn))
+ my_score++;
+ if (drm_has_preferred_mode(fb_helper_conn, width, height))
+ my_score++;
+
+ connector_funcs = connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+ if (!encoder)
+ goto out;
+
+ /* select a crtc for this connector and then attempt to configure
+ remaining connectors */
+ for (c = 0; c < fb_helper->crtc_count; c++) {
+ crtc = &fb_helper->crtc_info[c];
+
+ if ((encoder->possible_crtcs & (1 << c)) == 0) {
+ continue;
+ }
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning unless only a single crtc */
+ if (fb_helper->crtc_count > 1)
+ continue;
+
+ if (!drm_mode_equal(modes[o], modes[n]))
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+ score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+ width, height);
+ if (score > best_score) {
+ best_crtc = crtc;
+ best_score = score;
+ memcpy(best_crtcs, crtcs,
+ dev->mode_config.num_connector *
+ sizeof(struct drm_fb_helper_crtc *));
+ }
+ }
+out:
+ kfree(crtcs);
+ return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_encoder *encoder;
+ struct drm_mode_set *modeset;
+ bool *enabled;
+ int width, height;
+ int i, ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ /* clean out all the encoder/crtc combos */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->crtc = NULL;
+ }
+
+ crtcs = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ modes = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_display_mode *), GFP_KERNEL);
+ enabled = kcalloc(dev->mode_config.num_connector,
+ sizeof(bool), GFP_KERNEL);
+
+ drm_enable_connectors(fb_helper, enabled);
+
+ ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
+ if (!ret) {
+ ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
+ if (!ret)
+ DRM_ERROR("Unable to find initial modes\n");
+ }
+
+ DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
+
+ drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+
+ /* need to set the modesets up here for use later */
+ /* fill out the connector<->crtc mappings into the modesets */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+ modeset->num_connectors = 0;
+ }
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ modeset = &fb_crtc->mode_set;
+
+ if (mode && fb_crtc) {
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id);
+ fb_crtc->desired_mode = mode;
+ if (modeset->mode)
+ drm_mode_destroy(dev, modeset->mode);
+ modeset->mode = drm_mode_duplicate(dev,
+ fb_crtc->desired_mode);
+ modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ }
+ }
+
+ kfree(crtcs);
+ kfree(modes);
+ kfree(enabled);
+}
+
+/**
+ * drm_helper_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ int count = 0;
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(fb_helper->dev);
+
+ drm_fb_helper_parse_command_line(fb_helper);
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ /*
+ * we shouldn't end up with no modes here.
+ */
+ if (count == 0) {
+ printk(KERN_INFO "No connectors reported connected with modes\n");
+ }
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
+
+bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ int count = 0;
+ u32 max_width, max_height, bpp_sel;
+ bool bound = false, crtcs_bound = false;
+ struct drm_crtc *crtc;
+
+ if (!fb_helper->fb)
+ return false;
+
+ list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
+ if (crtc->fb)
+ crtcs_bound = true;
+ if (crtc->fb == fb_helper->fb)
+ bound = true;
+ }
+
+ if (!bound && crtcs_bound) {
+ fb_helper->delayed_hotplug = true;
+ return false;
+ }
+ DRM_DEBUG_KMS("\n");
+
+ max_width = fb_helper->fb->width;
+ max_height = fb_helper->fb->height;
+ bpp_sel = fb_helper->fb->bits_per_pixel;
+
+ count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+ max_height);
+ drm_setup_crtcs(fb_helper);
+
+ return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 9d532d7fdf5..e7aace20981 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -243,11 +243,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->filp = filp;
priv->uid = current_euid();
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index aa89d4b0b4c..33dad3fa604 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -124,6 +124,31 @@ drm_gem_destroy(struct drm_device *dev)
}
/**
+ * Initialize an already allocate GEM object of the specified size with
+ * shmfs backing store.
+ */
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj->dev = dev;
+ obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (IS_ERR(obj->filp))
+ return -ENOMEM;
+
+ kref_init(&obj->refcount);
+ kref_init(&obj->handlecount);
+ obj->size = size;
+
+ atomic_inc(&dev->object_count);
+ atomic_add(obj->size, &dev->object_memory);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_object_init);
+
+/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
@@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
goto free;
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
+ if (drm_gem_object_init(dev, obj, size) != 0)
goto free;
- kref_init(&obj->refcount);
- kref_init(&obj->handlecount);
- obj->size = size;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
goto fput;
}
- atomic_inc(&dev->object_count);
- atomic_add(obj->size, &dev->object_memory);
return obj;
fput:
+ /* Object_init mangles the global counters - readjust them. */
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
fput(obj->filp);
free:
kfree(obj);
@@ -403,15 +422,15 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
idr_destroy(&file_private->object_idr);
}
-static void
-drm_gem_object_free_common(struct drm_gem_object *obj)
+void
+drm_gem_object_release(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
fput(obj->filp);
atomic_dec(&dev->object_count);
atomic_sub(obj->size, &dev->object_memory);
- kfree(obj);
}
+EXPORT_SYMBOL(drm_gem_object_release);
/**
* Called after the last reference to the object has been lost.
@@ -429,8 +448,6 @@ drm_gem_object_free(struct kref *kref)
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
-
- drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
@@ -453,8 +470,6 @@ drm_gem_object_free_unlocked(struct kref *kref)
dev->driver->gem_free_object(obj);
mutex_unlock(&dev->struct_mutex);
}
-
- drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free_unlocked);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3bd87276156..a263b7070fc 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -476,6 +476,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
unsigned long irqflags;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ dev->driver->disable_vblank(dev, crtc);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
dev->vblank_enabled[crtc] = 0;
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index e4865f99989..7732268eced 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -77,7 +77,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
break;
- if (!agpmem)
+ if (&agpmem->head == &dev->agp->memory)
return NULL;
/*
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 76d63394c77..f1f473ea97d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
- if (interlaced)
+ if (interlaced) {
drm_mode->vtotal *= 2;
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ }
/* Fill the mode line name */
drm_mode_set_name(drm_mode);
if (reduced)
@@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
else
drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NHSYNC);
- if (interlaced)
- drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
- return drm_mode;
+ return drm_mode;
}
EXPORT_SYMBOL(drm_cvt_mode);
/**
- * drm_gtf_mode - create the modeline based on GTF algorithm
+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
*
* @dev :drm device
* @hdisplay :hdisplay size
* @vdisplay :vdisplay size
* @vrefresh :vrefresh rate.
* @interlaced :whether the interlace is supported
- * @margins :whether the margin is supported
+ * @margins :desired margin size
+ * @GTF_[MCKJ] :extended GTF formula parameters
*
* LOCKING.
* none.
*
- * return the modeline based on GTF algorithm
- *
- * This function is to create the modeline based on the GTF algorithm.
- * Generalized Timing Formula is derived from:
- * GTF Spreadsheet by Andy Morrish (1/5/97)
- * available at http://www.vesa.org
+ * return the modeline based on full GTF algorithm.
*
- * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
- * What I have done is to translate it by using integer calculation.
- * I also refer to the function of fb_get_mode in the file of
- * drivers/video/fbmon.c
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two. For a C of 40, pass in 80.
*/
-struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
- int vdisplay, int vrefresh,
- bool interlaced, int margins)
-{
- /* 1) top/bottom margin size (% of height) - default: 1.8, */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+ int vrefresh, bool interlaced, int margins,
+ int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
#define GTF_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
#define GTF_CELL_GRAN 8
@@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
#define H_SYNC_PERCENT 8
/* min time of vsync + back porch (microsec) */
#define MIN_VSYNC_PLUS_BP 550
- /* blanking formula gradient */
-#define GTF_M 600
- /* blanking formula offset */
-#define GTF_C 40
- /* blanking formula scaling factor */
-#define GTF_K 128
- /* blanking formula scaling factor */
-#define GTF_J 20
/* C' and M' are part of the Blanking Duty Cycle computation */
-#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
-#define GTF_M_PRIME (GTF_K * GTF_M / 256)
+#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME (GTF_K * GTF_M / 256)
struct drm_display_mode *drm_mode;
unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
int top_margin, bottom_margin;
@@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock = pixel_freq;
- drm_mode_set_name(drm_mode);
- drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
-
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
+ drm_mode_set_name(drm_mode);
+ if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+ drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+ else
+ drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
return drm_mode;
}
+EXPORT_SYMBOL(drm_gtf_mode_complex);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev :drm device
+ * @hdisplay :hdisplay size
+ * @vdisplay :vdisplay size
+ * @vrefresh :vrefresh rate.
+ * @interlaced :whether the interlace is supported
+ * @margins :whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+ bool lace, int margins)
+{
+ return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+ margins, 600, 40 * 2, 128, 20 * 2);
+}
EXPORT_SYMBOL(drm_gtf_mode);
+
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
@@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode);
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
- snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
- mode->vdisplay);
+ bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+ mode->hdisplay, mode->vdisplay,
+ interlaced ? "i" : "");
}
EXPORT_SYMBOL(drm_mode_set_name);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index b743411d814..a0c365f2e52 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -516,8 +516,6 @@ void drm_put_dev(struct drm_device *dev)
}
driver = dev->driver;
- drm_vblank_cleanup(dev);
-
drm_lastclose(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
@@ -537,6 +535,8 @@ void drm_put_dev(struct drm_device *dev)
dev->agp = NULL;
}
+ drm_vblank_cleanup(dev);
+
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_rmmap(dev, r_list->map);
drm_ht_remove(&dev->map_hash);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 1a1825b29f5..101d381e9d8 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -193,8 +193,9 @@ static ssize_t enabled_show(struct device *device,
"disabled");
}
-static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
{
struct device *connector_dev = container_of(kobj, struct device, kobj);
struct drm_connector *connector = to_drm_connector(connector_dev);
@@ -333,7 +334,7 @@ static struct device_attribute connector_attrs_opt1[] = {
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
- .size = 128,
+ .size = 0,
.read = edid_show,
};
@@ -354,7 +355,10 @@ static struct bin_attribute edid_attr = {
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- int ret = 0, i, j;
+ int attr_cnt = 0;
+ int opt_cnt = 0;
+ int i;
+ int ret = 0;
/* We shouldn't get called more than once for the same connector */
BUG_ON(device_is_registered(&connector->kdev));
@@ -377,8 +381,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
/* Standard attributes */
- for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
- ret = device_create_file(&connector->kdev, &connector_attrs[i]);
+ for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
+ ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
if (ret)
goto err_out_files;
}
@@ -394,8 +398,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Component:
case DRM_MODE_CONNECTOR_TV:
- for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
- ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
+ for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
+ ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
if (ret)
goto err_out_files;
}
@@ -414,10 +418,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
return 0;
err_out_files:
- if (i > 0)
- for (j = 0; j < i; j++)
- device_remove_file(&connector->kdev,
- &connector_attrs[i]);
+ for (i = 0; i < opt_cnt; i++)
+ device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
+ for (i = 0; i < attr_cnt; i++)
+ device_remove_file(&connector->kdev, &connector_attrs[i]);
device_unregister(&connector->kdev);
out:
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9929f84ec3e..95639017bdb 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,3 +33,5 @@ i915-$(CONFIG_ACPI) += i915_opregion.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
obj-$(CONFIG_DRM_I915) += i915.o
+
+CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 288fc50627e..0d6ff640e1c 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -70,16 +70,6 @@ struct intel_dvo_dev_ops {
void (*dpms)(struct intel_dvo_device *dvo, int mode);
/*
- * Saves the output's state for restoration on VT switch.
- */
- void (*save)(struct intel_dvo_device *dvo);
-
- /*
- * Restore's the output's state at VT switch.
- */
- void (*restore)(struct intel_dvo_device *dvo);
-
- /*
* Callback for testing a video mode for a given output.
*
* This function should only check for cases where a mode can't
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1184c14ba87..14d59804acd 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -159,16 +159,7 @@
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
- uint8_t save_hapi;
- uint8_t save_vali;
- uint8_t save_valo;
- uint8_t save_ailo;
- uint8_t save_lvds_pll_vco;
- uint8_t save_feedback_div;
- uint8_t save_lvds_control_2;
- uint8_t save_outputs_enable;
- uint8_t save_lvds_power_down;
- uint8_t save_power_management;
+ uint8_t dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -401,39 +392,6 @@ do { \
DUMP(CH7017_LVDS_POWER_DOWN);
}
-static void ch7017_save(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
- ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
- ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
- ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
- ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
- ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
- ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
- ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
- ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
-}
-
-static void ch7017_restore(struct intel_dvo_device *dvo)
-{
- struct ch7017_priv *priv = dvo->dev_priv;
-
- /* Power down before changing mode */
- ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
-
- ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
- ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
- ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
- ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
- ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
- ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
- ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
- ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
- ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
-}
-
static void ch7017_destroy(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
@@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = {
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
.dump_regs = ch7017_dump_regs,
- .save = ch7017_save,
- .restore = ch7017_restore,
.destroy = ch7017_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d56ff5cc22b..6f1944b2444 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -92,21 +92,10 @@ static struct ch7xxx_id_struct {
{ CH7301_VID, "CH7301" },
};
-struct ch7xxx_reg_state {
- uint8_t regs[CH7xxx_NUM_REGS];
-};
-
struct ch7xxx_priv {
bool quiet;
-
- struct ch7xxx_reg_state save_reg;
- struct ch7xxx_reg_state mode_reg;
- uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
- uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
};
-static void ch7xxx_save(struct intel_dvo_device *dvo);
-
static char *ch7xxx_get_id(uint8_t vid)
{
int i;
@@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ uint8_t val;
if ((i % 8) == 0 )
DRM_LOG_KMS("\n %02X: ", i);
- DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
+ ch7xxx_readb(dvo, i, &val);
+ DRM_LOG_KMS("%02X ", val);
}
}
-static void ch7xxx_save(struct intel_dvo_device *dvo)
-{
- struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
-
- ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
- ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
- ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
- ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
- ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
- ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
- ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
-}
-
-static void ch7xxx_restore(struct intel_dvo_device *dvo)
-{
- struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
-
- ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
- ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
- ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
- ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
- ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
- ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
- ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
-}
-
static void ch7xxx_destroy(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
@@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
.dump_regs = ch7xxx_dump_regs,
- .save = ch7xxx_save,
- .restore = ch7xxx_restore,
.destroy = ch7xxx_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 24169e528f0..a2ec3f48720 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -153,9 +153,6 @@ struct ivch_priv {
bool quiet;
uint16_t width, height;
-
- uint16_t save_VR01;
- uint16_t save_VR40;
};
@@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("VR8F: 0x%04x\n", val);
}
-static void ivch_save(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_read(dvo, VR01, &priv->save_VR01);
- ivch_read(dvo, VR40, &priv->save_VR40);
-}
-
-static void ivch_restore(struct intel_dvo_device *dvo)
-{
- struct ivch_priv *priv = dvo->dev_priv;
-
- ivch_write(dvo, VR01, priv->save_VR01);
- ivch_write(dvo, VR40, priv->save_VR40);
-}
-
static void ivch_destroy(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
@@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
struct intel_dvo_dev_ops ivch_ops= {
.init = ivch_init,
.dpms = ivch_dpms,
- .save = ivch_save,
- .restore = ivch_restore,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 0001c13f0a8..9b8e6765cf2 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define SIL164_REGC 0x0c
-struct sil164_save_rec {
- uint8_t reg8;
- uint8_t reg9;
- uint8_t regc;
-};
-
struct sil164_priv {
//I2CDevRec d;
bool quiet;
- struct sil164_save_rec save_regs;
- struct sil164_save_rec mode_regs;
};
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
@@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
}
-static void sil164_save(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil= dvo->dev_priv;
-
- if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
- return;
-
- if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
- return;
-
- if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
- return;
-
- return;
-}
-
-static void sil164_restore(struct intel_dvo_device *dvo)
-{
- struct sil164_priv *sil = dvo->dev_priv;
-
- /* Restore it powered down initially */
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
-
- sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
- sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
- sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
-}
-
static void sil164_destroy(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil = dvo->dev_priv;
@@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = {
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
.dump_regs = sil164_dump_regs,
- .save = sil164_save,
- .restore = sil164_restore,
.destroy = sil164_destroy,
};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index c7c391bc116..66c697bc9b2 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -86,16 +86,8 @@
#define TFP410_V_RES_LO 0x3C
#define TFP410_V_RES_HI 0x3D
-struct tfp410_save_rec {
- uint8_t ctl1;
- uint8_t ctl2;
-};
-
struct tfp410_priv {
bool quiet;
-
- struct tfp410_save_rec saved_reg;
- struct tfp410_save_rec mode_reg;
};
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
@@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
-static void tfp410_save(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
- return;
-
- if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
- return;
-}
-
-static void tfp410_restore(struct intel_dvo_device *dvo)
-{
- struct tfp410_priv *tfp = dvo->dev_priv;
-
- /* Restore it powered down initially */
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
-
- tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
- tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
-}
-
static void tfp410_destroy(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
@@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = {
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
.dump_regs = tfp410_dump_regs,
- .save = tfp410_save,
- .restore = tfp410_restore,
.destroy = tfp410_destroy,
};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a0b8447b06e..322070c0c63 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
spin_lock(lock);
list_for_each_entry(obj_priv, head, list)
{
- struct drm_gem_object *obj = obj_priv->obj;
-
seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
- obj,
+ &obj_priv->base,
get_pin_flag(obj_priv),
- obj->size,
- obj->read_domains, obj->write_domain,
+ obj_priv->base.size,
+ obj_priv->base.read_domains,
+ obj_priv->base.write_domain,
obj_priv->last_rendering_seqno,
obj_priv->dirty ? " dirty" : "",
obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
- if (obj->name)
- seq_printf(m, " (name: %d)", obj->name);
+ if (obj_priv->base.name)
+ seq_printf(m, " (name: %d)", obj_priv->base.name);
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
if (obj_priv->gtt_space != NULL)
@@ -289,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
spin_lock(&dev_priv->mm.active_list_lock);
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
@@ -567,23 +566,14 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_crtc *crtc;
drm_i915_private_t *dev_priv = dev->dev_private;
- bool fbc_enabled = false;
- if (!dev_priv->display.fbc_enabled) {
+ if (!I915_HAS_FBC(dev)) {
seq_printf(m, "FBC unsupported on this chipset\n");
return 0;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (!crtc->enabled)
- continue;
- if (dev_priv->display.fbc_enabled(crtc))
- fbc_enabled = true;
- }
-
- if (fbc_enabled) {
+ if (intel_fbc_enabled(dev)) {
seq_printf(m, "FBC enabled\n");
} else {
seq_printf(m, "FBC disabled: ");
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2dc93939507..2a6b5de5ae5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1357,19 +1357,30 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->cfb_size = size;
+ intel_disable_fbc(dev);
+ dev_priv->compressed_fb = compressed_fb;
+
if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
- i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
+ dev_priv->compressed_llb = compressed_llb;
}
DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
ll_base, size >> 20);
}
+static void i915_cleanup_compression(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ drm_mm_put_block(dev_priv->compressed_fb);
+ if (!IS_GM45(dev))
+ drm_mm_put_block(dev_priv->compressed_llb);
+}
+
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
@@ -1492,8 +1503,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- drm_helper_initial_config(dev);
-
+ intel_fbdev_init(dev);
+ drm_kms_helper_poll_init(dev);
return 0;
destroy_ringbuffer:
@@ -1579,7 +1590,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
*/
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv;
resource_size_t base, size;
int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
@@ -1711,6 +1722,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Start out suspended */
dev_priv->mm.suspended = 1;
+ intel_detect_pch(dev);
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev, prealloc_start,
prealloc_size, agp_size);
@@ -1757,6 +1770,8 @@ int i915_driver_unload(struct drm_device *dev)
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_modeset_cleanup(dev);
+
/*
* free the memory space allocated for the child device
* config parsed from VBT
@@ -1780,13 +1795,13 @@ int i915_driver_unload(struct drm_device *dev)
intel_opregion_free(dev, 0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- intel_modeset_cleanup(dev);
-
i915_gem_free_all_phys_object(dev);
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ if (I915_HAS_FBC(dev) && i915_powersave)
+ i915_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->vram);
i915_gem_lastclose(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0af3dcc85ce..5c51e45ab68 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = {
};
const static struct intel_device_info intel_i85x_info = {
- .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+ .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+ .cursor_needs_physical = 1,
};
const static struct intel_device_info intel_i865g_info = {
@@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = {
INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
@@ -187,6 +188,35 @@ const static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
+#define INTEL_PCH_DEVICE_ID_MASK 0xff00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+
+void intel_detect_pch (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pch;
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ */
+ pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+ if (pch) {
+ if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+ int id;
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_CPT;
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ }
+ }
+ pci_dev_put(pch);
+ }
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6960849522f..7f797ef1ab3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -128,6 +128,7 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct drm_gem_object *obj;
+ struct list_head lru_list;
};
struct sdvo_device_mapping {
@@ -135,6 +136,7 @@ struct sdvo_device_mapping {
u8 slave_addr;
u8 dvo_wiring;
u8 initialized;
+ u8 ddc_pin;
};
struct drm_i915_error_state {
@@ -175,7 +177,7 @@ struct drm_i915_error_state {
struct drm_i915_display_funcs {
void (*dpms)(struct drm_crtc *crtc, int mode);
- bool (*fbc_enabled)(struct drm_crtc *crtc);
+ bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
@@ -195,6 +197,7 @@ struct intel_overlay;
struct intel_device_info {
u8 is_mobile : 1;
u8 is_i8xx : 1;
+ u8 is_i85x : 1;
u8 is_i915g : 1;
u8 is_i9xx : 1;
u8 is_i945gm : 1;
@@ -221,6 +224,13 @@ enum no_fbc_reason {
FBC_NOT_TILED, /* buffer not tiled */
};
+enum intel_pch {
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint PCH */
+};
+
+struct intel_fbdev;
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -235,11 +245,14 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
void *hw_status_page;
+ void *seqno_page;
dma_addr_t dma_status_page;
uint32_t counter;
unsigned int status_gfx_addr;
+ unsigned int seqno_gfx_addr;
drm_local_map_t hws_map;
struct drm_gem_object *hws_obj;
+ struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
struct resource mch_res;
@@ -331,6 +344,9 @@ typedef struct drm_i915_private {
/* Display functions */
struct drm_i915_display_funcs display;
+ /* PCH chipset type */
+ enum intel_pch pch_type;
+
/* Register state */
bool modeset_on_lid;
u8 saveLBB;
@@ -630,11 +646,17 @@ typedef struct drm_i915_private {
u8 max_delay;
enum no_fbc_reason no_fbc_reason;
+
+ struct drm_mm_node *compressed_fb;
+ struct drm_mm_node *compressed_llb;
+
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
- struct drm_gem_object *obj;
+ struct drm_gem_object base;
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
@@ -644,9 +666,6 @@ struct drm_i915_gem_object {
/** This object's place on GPU write list */
struct list_head gpu_write_list;
- /** This object's place on the fenced object LRU */
- struct list_head fence_list;
-
/**
* This is set if the object is on the active or flushing lists
* (has pending rendering), and is not set if it's on inactive (ready
@@ -733,7 +752,7 @@ struct drm_i915_gem_object {
atomic_t pending_flip;
};
-#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/**
* Request queue structure.
@@ -895,6 +914,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
@@ -991,6 +1012,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void i8xx_disable_fbc(struct drm_device *dev);
extern void g4x_disable_fbc(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+
+extern void intel_detect_pch (struct drm_device *dev);
+extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
/**
* Lock test for when it's just for synchronization of ring access.
@@ -1070,7 +1097,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
@@ -1123,7 +1150,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
- !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
+ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
+ !IS_GEN6(dev))
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
@@ -1135,6 +1163,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 80871c62a57..112699f71fa 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -124,7 +124,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size = roundup(args->size, PAGE_SIZE);
/* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args->size);
+ obj = i915_gem_alloc_object(dev, args->size);
if (obj == NULL)
return -ENOMEM;
@@ -1051,7 +1051,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* about to occur.
*/
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- list_move_tail(&obj_priv->fence_list,
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
}
@@ -1566,7 +1568,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.gpu_write_list,
gpu_write_list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
@@ -1577,9 +1579,12 @@ i915_gem_process_flushing_list(struct drm_device *dev,
i915_gem_object_move_to_active(obj, seqno);
/* update the fence lru list */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- list_move_tail(&obj_priv->fence_list,
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
+ }
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
@@ -1588,6 +1593,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
}
+#define PIPE_CONTROL_FLUSH(addr) \
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL); \
+ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
+ OUT_RING(0); \
+ OUT_RING(0); \
+
/**
* Creates a new sequence number, emitting a write of it to the status page
* plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1622,13 +1634,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if (dev_priv->mm.next_gem_seqno == 0)
dev_priv->mm.next_gem_seqno++;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
+ if (HAS_PIPE_CONTROL(dev)) {
+ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
+ /*
+ * Workaround qword write incoherence by flushing the
+ * PIPE_NOTIFY buffers out to memory before requesting
+ * an interrupt.
+ */
+ BEGIN_LP_RING(32);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
DRM_DEBUG_DRIVER("%d\n", seqno);
@@ -1704,7 +1750,7 @@ i915_gem_retire_request(struct drm_device *dev,
obj_priv = list_first_entry(&dev_priv->mm.active_list,
struct drm_i915_gem_object,
list);
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
/* If the seqno being retired doesn't match the oldest in the
* list, then the oldest in the list must still be newer than
@@ -1752,7 +1798,10 @@ i915_get_gem_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+ if (HAS_PIPE_CONTROL(dev))
+ return ((volatile u32 *)(dev_priv->seqno_page))[0];
+ else
+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
}
/**
@@ -2075,7 +2124,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
/* Try to find the smallest clean object */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if (obj->size >= min_size) {
if ((!obj_priv->dirty ||
i915_gem_object_is_purgeable(obj_priv)) &&
@@ -2209,7 +2258,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
/* Find an object that we can immediately reuse */
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj->size >= min_size)
break;
@@ -2362,6 +2411,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
pitch_val = obj_priv->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
+ if (obj_priv->tiling_mode == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(dev))
+ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+ else
+ WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
+
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2435,9 +2490,10 @@ static int i915_find_fence_reg(struct drm_device *dev)
/* None available, try to steal one or wait for a user to finish */
i = I915_FENCE_REG_NONE;
- list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
- fence_list) {
- obj = obj_priv->obj;
+ list_for_each_entry(reg, &dev_priv->mm.fence_list,
+ lru_list) {
+ obj = reg->obj;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count)
continue;
@@ -2486,7 +2542,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
/* Just update our place in the LRU if our fence is getting used. */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
return 0;
}
@@ -2516,7 +2573,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
obj_priv->fence_reg = ret;
reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+ list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
reg->obj = obj;
@@ -2548,6 +2605,8 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_fence_reg *reg =
+ &dev_priv->fence_regs[obj_priv->fence_reg];
if (IS_GEN6(dev)) {
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2566,9 +2625,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
I915_WRITE(fence_reg, 0);
}
- dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
+ reg->obj = NULL;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
- list_del_init(&obj_priv->fence_list);
+ list_del_init(&reg->lru_list);
}
/**
@@ -4421,34 +4480,38 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int i915_gem_init_object(struct drm_gem_object *obj)
+struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
{
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
- if (obj_priv == NULL)
- return -ENOMEM;
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL)
+ return NULL;
- /*
- * We've just allocated pages from the kernel,
- * so they've just been written by the CPU with
- * zeros. They'll need to be clflushed before we
- * use them with the GPU.
- */
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
+ if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+ kfree(obj);
+ return NULL;
+ }
- obj_priv->agp_type = AGP_USER_MEMORY;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->driver_private = obj_priv;
- obj_priv->obj = obj;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
- INIT_LIST_HEAD(&obj_priv->list);
- INIT_LIST_HEAD(&obj_priv->gpu_write_list);
- INIT_LIST_HEAD(&obj_priv->fence_list);
- obj_priv->madv = I915_MADV_WILLNEED;
+ obj->agp_type = AGP_USER_MEMORY;
+ obj->base.driver_private = NULL;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ INIT_LIST_HEAD(&obj->list);
+ INIT_LIST_HEAD(&obj->gpu_write_list);
+ obj->madv = I915_MADV_WILLNEED;
+
+ trace_i915_gem_object_create(&obj->base);
- trace_i915_gem_object_create(obj);
+ return &obj->base;
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
return 0;
}
@@ -4471,9 +4534,11 @@ void i915_gem_free_object(struct drm_gem_object *obj)
if (obj_priv->mmap_offset)
i915_gem_free_mmap_offset(obj);
+ drm_gem_object_release(obj);
+
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
- kfree(obj->driver_private);
+ kfree(obj_priv);
}
/** Unbinds all inactive objects. */
@@ -4486,9 +4551,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
struct drm_gem_object *obj;
int ret;
- obj = list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list)->obj;
+ obj = &list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
@@ -4546,6 +4611,49 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+static int
+i915_gem_init_pipe_control(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ obj_priv = to_intel_bo(obj);
+ obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret)
+ goto err_unref;
+
+ dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
+ dev_priv->seqno_page = kmap(obj_priv->pages[0]);
+ if (dev_priv->seqno_page == NULL)
+ goto err_unpin;
+
+ dev_priv->seqno_obj = obj;
+ memset(dev_priv->seqno_page, 0, PAGE_SIZE);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return ret;
+}
+
static int
i915_gem_init_hws(struct drm_device *dev)
{
@@ -4560,10 +4668,11 @@ i915_gem_init_hws(struct drm_device *dev)
if (!I915_NEED_GFX_HWS(dev))
return 0;
- obj = drm_gem_object_alloc(dev, 4096);
+ obj = i915_gem_alloc_object(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
@@ -4571,7 +4680,7 @@ i915_gem_init_hws(struct drm_device *dev)
ret = i915_gem_object_pin(obj, 4096);
if (ret != 0) {
drm_gem_object_unreference(obj);
- return ret;
+ goto err_unref;
}
dev_priv->status_gfx_addr = obj_priv->gtt_offset;
@@ -4580,10 +4689,16 @@ i915_gem_init_hws(struct drm_device *dev)
if (dev_priv->hw_status_page == NULL) {
DRM_ERROR("Failed to map status page.\n");
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unpin;
}
+
+ if (HAS_PIPE_CONTROL(dev)) {
+ ret = i915_gem_init_pipe_control(dev);
+ if (ret)
+ goto err_unpin;
+ }
+
dev_priv->hws_obj = obj;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
if (IS_GEN6(dev)) {
@@ -4596,6 +4711,30 @@ i915_gem_init_hws(struct drm_device *dev)
DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return 0;
+}
+
+static void
+i915_gem_cleanup_pipe_control(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = dev_priv->seqno_obj;
+ obj_priv = to_intel_bo(obj);
+ kunmap(obj_priv->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ dev_priv->seqno_obj = NULL;
+
+ dev_priv->seqno_page = NULL;
}
static void
@@ -4619,6 +4758,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
dev_priv->hw_status_page = NULL;
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
+
/* Write high address into HWS_PGA when disabling. */
I915_WRITE(HWS_PGA, 0x1ffff000);
}
@@ -4637,7 +4779,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
if (ret != 0)
return ret;
- obj = drm_gem_object_alloc(dev, 128 * 1024);
+ obj = i915_gem_alloc_object(dev, 128 * 1024);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
i915_gem_cleanup_hws(dev);
@@ -4830,6 +4972,8 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ for (i = 0; i < 16; i++)
+ INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
@@ -5058,6 +5202,20 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
}
static int
+i915_gpu_is_active(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ return !lists_empty;
+}
+
+static int
i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
{
drm_i915_private_t *dev_priv, *next_dev;
@@ -5086,6 +5244,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
spin_lock(&shrink_list_lock);
+rescan:
/* first scan for clean buffers */
list_for_each_entry_safe(dev_priv, next_dev,
&shrink_list, mm.shrink_list) {
@@ -5102,7 +5261,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
&dev_priv->mm.inactive_list,
list) {
if (i915_gem_object_is_purgeable(obj_priv)) {
- i915_gem_object_unbind(obj_priv->obj);
+ i915_gem_object_unbind(&obj_priv->base);
if (--nr_to_scan <= 0)
break;
}
@@ -5131,7 +5290,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
&dev_priv->mm.inactive_list,
list) {
if (nr_to_scan > 0) {
- i915_gem_object_unbind(obj_priv->obj);
+ i915_gem_object_unbind(&obj_priv->base);
nr_to_scan--;
} else
cnt++;
@@ -5143,6 +5302,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
would_deadlock = 0;
}
+ if (nr_to_scan) {
+ int active = 0;
+
+ /*
+ * We are desperate for pages, so as a last resort, wait
+ * for the GPU to finish and discard whatever we can.
+ * This has a dramatic impact to reduce the number of
+ * OOM-killer events whilst running the GPU aggressively.
+ */
+ list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (!mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ if (i915_gpu_is_active(dev)) {
+ i915_gpu_idle(dev);
+ active++;
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ if (active)
+ goto rescan;
+ }
+
spin_unlock(&shrink_list_lock);
if (would_deadlock)
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 35507cf53fa..80f380b1d95 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line)
struct drm_i915_gem_object *obj_priv;
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = obj_priv->obj;
+ obj = &obj_priv->base;
if (obj_priv->pin_count || obj_priv->active ||
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT)))
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 449157f7161..4b7c49d4257 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
* reg, so dont bother to check the size */
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
- } else if (IS_I9XX(dev)) {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
-
- /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
- * instead of 4 (2KB) on 945s.
- */
- if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 20))
+ } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ if (stride > 8192)
return false;
- } else {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
- if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 19))
- return false;
+ if (IS_GEN3(dev)) {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ return false;
+ } else {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ return false;
+ }
}
/* 965+ just needs multiples of tile width */
@@ -287,6 +283,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (obj_priv->pin_count) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EBUSY;
+ }
+
if (args->tiling_mode == I915_TILING_NONE) {
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
args->stride = 0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6421481d622..8c3f0802686 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -169,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
- else
+ else {
i915_enable_pipestat(dev_priv, 1,
I915_LEGACY_BLC_EVENT_ENABLE);
+ if (IS_I965G(dev))
+ i915_enable_pipestat(dev_priv, 0,
+ I915_LEGACY_BLC_EVENT_ENABLE);
+ }
}
/**
@@ -256,18 +260,18 @@ static void i915_hotplug_work_func(struct work_struct *work)
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
- if (mode_config->num_connector) {
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ if (mode_config->num_encoder) {
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->hot_plug)
(*intel_encoder->hot_plug) (intel_encoder);
}
}
/* Just fire off a uevent and let userspace tell us what to do */
- drm_sysfs_hotplug_event(dev);
+ drm_helper_hpd_irq_event(dev);
}
static void i915_handle_rps_change(struct drm_device *dev)
@@ -349,7 +353,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
READ_BREADCRUMB(dev_priv);
}
- if (gt_iir & GT_USER_INTERRUPT) {
+ if (gt_iir & GT_PIPE_NOTIFY) {
u32 seqno = i915_get_gem_seqno(dev);
dev_priv->mm.irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
@@ -456,11 +460,15 @@ i915_error_object_create(struct drm_device *dev,
for (page = 0; page < page_count; page++) {
void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ unsigned long flags;
+
if (d == NULL)
goto unwind;
- s = kmap_atomic(src_priv->pages[page], KM_USER0);
+ local_irq_save(flags);
+ s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s, KM_USER0);
+ kunmap_atomic(s, KM_IRQ0);
+ local_irq_restore(flags);
dst->pages[page] = d;
}
dst->page_count = page_count;
@@ -608,7 +616,7 @@ static void i915_capture_error_state(struct drm_device *dev)
batchbuffer[1] = NULL;
count = 0;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
bbaddr >= obj_priv->gtt_offset &&
@@ -644,7 +652,7 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo) {
int i = 0;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
+ struct drm_gem_object *obj = &obj_priv->base;
error->active_bo[i].size = obj->size;
error->active_bo[i].name = obj->name;
@@ -946,7 +954,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip(dev, 1);
}
- if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
opregion_asle_intr(dev);
@@ -1005,7 +1014,7 @@ void i915_user_irq_get(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
if (HAS_PCH_SPLIT(dev))
- ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
}
@@ -1021,7 +1030,7 @@ void i915_user_irq_put(struct drm_device *dev)
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
if (HAS_PCH_SPLIT(dev))
- ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
}
@@ -1305,7 +1314,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
- u32 render_mask = GT_USER_INTERRUPT;
+ u32 render_mask = GT_PIPE_NOTIFY;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 7cc8410239c..8fcc75c1aa2 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
struct drm_connector *connector;
+ acpi_handle handle;
+ struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
+ unsigned long long device_id;
+ acpi_status status;
int i = 0;
+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+ return;
+
+ if (acpi_is_video_device(acpi_dev))
+ acpi_video_bus = acpi_dev;
+ else {
+ list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
+ if (acpi_is_video_device(acpi_cdev)) {
+ acpi_video_bus = acpi_cdev;
+ break;
+ }
+ }
+ }
+
+ if (!acpi_video_bus) {
+ printk(KERN_WARNING "No ACPI video bus found\n");
+ return;
+ }
+
+ list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
+ if (i >= 8) {
+ dev_printk (KERN_ERR, &dev->pdev->dev,
+ "More than 8 outputs detected\n");
+ return;
+ }
+ status =
+ acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+ NULL, &device_id);
+ if (ACPI_SUCCESS(status)) {
+ if (!device_id)
+ goto blind_set;
+ opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+ i++;
+ }
+ }
+
+end:
+ /* If fewer than 8 outputs, the list must be null terminated */
+ if (i < 8)
+ opregion->acpi->didl[i] = 0;
+ return;
+
+blind_set:
+ i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev)
opregion->acpi->didl[i] |= (1<<31) | output_type | i;
i++;
}
-
- /* If fewer than 8 outputs, the list must be null terminated */
- if (i < 8)
- opregion->acpi->didl[i] = 0;
+ goto end;
}
int intel_opregion_init(struct drm_device *dev, int resume)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cbbf59f56df..f3e39cc46f0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -230,6 +230,16 @@
#define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
+#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_WC_FLUSH (1<<12)
+#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
+#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_ISP_DIS (1<<9)
+#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
/*
* Fence registers
@@ -241,7 +251,7 @@
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0)
-#define I915_FENCE_MAX_PITCH_VAL 0x10
+#define I915_FENCE_MAX_PITCH_VAL 4
#define I830_FENCE_MAX_PITCH_VAL 6
#define I830_FENCE_MAX_SIZE_VAL (1<<8)
@@ -1754,6 +1764,14 @@
#define DP_LINK_TRAIN_MASK (3 << 28)
#define DP_LINK_TRAIN_SHIFT 28
+/* CPT Link training mode */
+#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
+#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
+#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
+#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
+#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
+#define DP_LINK_TRAIN_SHIFT_CPT 8
+
/* Signal voltages. These are mostly controlled by the other end */
#define DP_VOLTAGE_0_4 (0 << 25)
#define DP_VOLTAGE_0_6 (1 << 25)
@@ -1914,7 +1932,10 @@
/* Display & cursor control */
/* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER (1 << 4)
+#define PIPE_ENABLE_DITHER (1 << 4)
+#define PIPE_DITHER_TYPE_MASK (3 << 2)
+#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
+#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
#define PIPEACONF 0x70008
@@ -1978,15 +1999,24 @@
#define DSPFW1 0x70034
#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f<<16)
#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f<<8)
+#define DSPFW_PLANEA_MASK (0x7f)
#define DSPFW2 0x70038
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_PLANEC_MASK (0x7f)
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
+#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
+#define DSPFW_HPLL_SR_MASK (0x1ff)
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -2013,6 +2043,43 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK 0x45100
+#define WM0_PIPE_PLANE_MASK (0x7f<<16)
+#define WM0_PIPE_PLANE_SHIFT 16
+#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
+#define WM0_PIPE_SPRITE_SHIFT 8
+#define WM0_PIPE_CURSOR_MASK (0x1f)
+
+#define WM0_PIPEB_ILK 0x45104
+#define WM1_LP_ILK 0x45108
+#define WM1_LP_SR_EN (1<<31)
+#define WM1_LP_LATENCY_SHIFT 24
+#define WM1_LP_LATENCY_MASK (0x7f<<24)
+#define WM1_LP_SR_MASK (0x1ff<<8)
+#define WM1_LP_SR_SHIFT 8
+#define WM1_LP_CURSOR_MASK (0x3f)
+
+/* Memory latency timer register */
+#define MLTR_ILK 0x11222
+/* the unit of memory self-refresh latency time is 0.5us */
+#define ILK_SRLT_MASK 0x3f
+
+/* define the fifo size on Ironlake */
+#define ILK_DISPLAY_FIFO 128
+#define ILK_DISPLAY_MAXWM 64
+#define ILK_DISPLAY_DFTWM 8
+
+#define ILK_DISPLAY_SR_FIFO 512
+#define ILK_DISPLAY_MAX_SRWM 0x1ff
+#define ILK_DISPLAY_DFT_SRWM 0x3f
+#define ILK_CURSOR_SR_FIFO 64
+#define ILK_CURSOR_MAX_SRWM 0x3f
+#define ILK_CURSOR_DFT_SRWM 8
+
+#define ILK_FIFO_LINE_SIZE 64
+
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2285,6 +2352,7 @@
#define DEIER 0x4400c
/* GT interrupt */
+#define GT_PIPE_NOTIFY (1 << 4)
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
@@ -2293,8 +2361,15 @@
#define GTIIR 0x44018
#define GTIER 0x4401c
+#define ILK_DISPLAY_CHICKEN2 0x42004
+#define ILK_DPARB_GATE (1<<22)
+#define ILK_VSDPFD_FULL (1<<21)
+#define ILK_DSPCLK_GATE 0x42020
+#define ILK_DPARB_CLK_GATE (1<<5)
+
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
+#define DISP_FBC_WM_DIS (1<<15)
/* PCH */
@@ -2305,6 +2380,11 @@
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
#define SDE_HOTPLUG_MASK (0xf << 8)
+/* CPT */
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -2396,6 +2476,17 @@
#define PCH_SSC4_PARMS 0xc6210
#define PCH_SSC4_AUX_PARMS 0xc6214
+#define PCH_DPLL_SEL 0xc7000
+#define TRANSA_DPLL_ENABLE (1<<3)
+#define TRANSA_DPLLB_SEL (1<<0)
+#define TRANSA_DPLLA_SEL 0
+#define TRANSB_DPLL_ENABLE (1<<7)
+#define TRANSB_DPLLB_SEL (1<<4)
+#define TRANSB_DPLLA_SEL (0)
+#define TRANSC_DPLL_ENABLE (1<<11)
+#define TRANSC_DPLLB_SEL (1<<8)
+#define TRANSC_DPLLA_SEL (0)
+
/* transcoder */
#define TRANS_HTOTAL_A 0xe0000
@@ -2482,6 +2573,19 @@
#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+ SNB has different settings. */
+/* SNB A-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
#define FDI_DP_PORT_WIDTH_X1 (0<<19)
#define FDI_DP_PORT_WIDTH_X2 (1<<19)
#define FDI_DP_PORT_WIDTH_X3 (2<<19)
@@ -2514,6 +2618,13 @@
#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
#define FDI_SEL_RAWCLK (0<<4)
#define FDI_SEL_PCDCLK (1<<4)
+/* CPT */
+#define FDI_AUTO_TRAINING (1<<10)
+#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
+#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
+#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
+#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
+#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
#define FDI_RXA_MISC 0xf0010
#define FDI_RXB_MISC 0xf1010
@@ -2585,6 +2696,9 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
+/* PCH SDVOB multiplex with HDMIB */
+#define PCH_SDVOB HDMIB
+
#define HDMIC 0xe1150
#define HDMID 0xe1160
@@ -2642,4 +2756,42 @@
#define PCH_DPD_AUX_CH_DATA4 0xe4320
#define PCH_DPD_AUX_CH_DATA5 0xe4324
+/* CPT */
+#define PORT_TRANS_A_SEL_CPT 0
+#define PORT_TRANS_B_SEL_CPT (1<<29)
+#define PORT_TRANS_C_SEL_CPT (2<<29)
+#define PORT_TRANS_SEL_MASK (3<<29)
+
+#define TRANS_DP_CTL_A 0xe0300
+#define TRANS_DP_CTL_B 0xe1300
+#define TRANS_DP_CTL_C 0xe2300
+#define TRANS_DP_OUTPUT_ENABLE (1<<31)
+#define TRANS_DP_PORT_SEL_B (0<<29)
+#define TRANS_DP_PORT_SEL_C (1<<29)
+#define TRANS_DP_PORT_SEL_D (2<<29)
+#define TRANS_DP_PORT_SEL_MASK (3<<29)
+#define TRANS_DP_AUDIO_ONLY (1<<26)
+#define TRANS_DP_ENH_FRAMING (1<<18)
+#define TRANS_DP_8BPC (0<<9)
+#define TRANS_DP_10BPC (1<<9)
+#define TRANS_DP_6BPC (2<<9)
+#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
+#define TRANS_DP_VSYNC_ACTIVE_LOW 0
+#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
+#define TRANS_DP_HSYNC_ACTIVE_LOW 0
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
+/* SNB B-stepping */
+#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
+#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
+#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ac0d1a73ac2..60a5800fba6 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev)
}
/* FIXME: save TV & SDVO state */
- /* FBC state */
- if (IS_GM45(dev)) {
- dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
- } else {
- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ /* Only save FBC state on the platform that supports FBC */
+ if (I915_HAS_FBC(dev)) {
+ if (IS_GM45(dev)) {
+ dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+ } else {
+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ }
}
/* VGA state */
@@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev)
}
/* FIXME: restore TV & SDVO state */
- /* FBC info */
- if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
- I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
- } else {
- i8xx_disable_fbc(dev);
- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ /* only restore FBC info on the platform that supports FBC*/
+ if (I915_HAS_FBC(dev)) {
+ if (IS_GM45(dev)) {
+ g4x_disable_fbc(dev);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ } else {
+ i8xx_disable_fbc(dev);
+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+ }
}
-
/* VGA state */
if (IS_IRONLAKE(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 01840d9bc38..9e4c45f68d6 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -115,7 +115,7 @@ TRACE_EVENT(i915_gem_object_get_fence,
__entry->obj, __entry->fence, __entry->tiling_mode)
);
-TRACE_EVENT(i915_gem_object_unbind,
+DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_gem_object *obj),
@@ -132,21 +132,18 @@ TRACE_EVENT(i915_gem_object_unbind,
TP_printk("obj=%p", __entry->obj)
);
-TRACE_EVENT(i915_gem_object_destroy,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
TP_PROTO(struct drm_gem_object *obj),
- TP_ARGS(obj),
+ TP_ARGS(obj)
+);
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- ),
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
- TP_fast_assign(
- __entry->obj = obj;
- ),
+ TP_PROTO(struct drm_gem_object *obj),
- TP_printk("obj=%p", __entry->obj)
+ TP_ARGS(obj)
);
/* batch tracing */
@@ -197,8 +194,7 @@ TRACE_EVENT(i915_gem_request_flush,
__entry->flush_domains, __entry->invalidate_domains)
);
-
-TRACE_EVENT(i915_gem_request_complete,
+DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
@@ -217,64 +213,35 @@ TRACE_EVENT(i915_gem_request_complete,
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
-TRACE_EVENT(i915_gem_request_retire,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
-
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_gem_request_wait_begin,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
-
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_gem_request_wait_end,
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, seqno),
+ TP_ARGS(dev, seqno)
+);
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, seqno)
- ),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- ),
+ TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_ARGS(dev, seqno)
);
-TRACE_EVENT(i915_ring_wait_begin,
+DECLARE_EVENT_CLASS(i915_ring,
TP_PROTO(struct drm_device *dev),
@@ -291,26 +258,23 @@ TRACE_EVENT(i915_ring_wait_begin,
TP_printk("dev=%u", __entry->dev)
);
-TRACE_EVENT(i915_ring_wait_end,
+DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
TP_PROTO(struct drm_device *dev),
- TP_ARGS(dev),
+ TP_ARGS(dev)
+);
- TP_STRUCT__entry(
- __field(u32, dev)
- ),
+DEFINE_EVENT(i915_ring, i915_ring_wait_end,
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- ),
+ TP_PROTO(struct drm_device *dev),
- TP_printk("dev=%u", __entry->dev)
+ TP_ARGS(dev)
);
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f9ba452f0cb..4c748d8f73d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -366,6 +366,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->dvo_port = p_child->dvo_port;
p_mapping->slave_addr = p_child->slave_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->initialized = 1;
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 759c2ef72ef..e16ac5a28c3 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -136,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
if (intel_crtc->pipe == 0) {
- adpa |= ADPA_PIPE_A_SELECT;
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_A_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_A_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
- adpa |= ADPA_PIPE_B_SELECT;
+ if (HAS_PCH_CPT(dev))
+ adpa |= PORT_TRANS_B_SEL_CPT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, 0);
}
@@ -152,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 adpa;
+ u32 adpa, temp;
bool ret;
- adpa = I915_READ(PCH_ADPA);
+ temp = adpa = I915_READ(PCH_ADPA);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
+ if (HAS_PCH_CPT(dev)) {
+ /* Disable DAC before force detect */
+ I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
+ (void)I915_READ(PCH_ADPA);
+ } else {
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
+ }
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -176,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
;
+ if (HAS_PCH_CPT(dev)) {
+ I915_WRITE(PCH_ADPA, temp);
+ (void)I915_READ(PCH_ADPA);
+ }
+
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA);
adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
@@ -245,9 +262,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return false;
}
-static bool intel_crt_detect_ddc(struct drm_connector *connector)
+static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* CRT should always be at 0, but check anyway */
if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
@@ -387,8 +404,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
@@ -400,18 +417,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
return connector_status_disconnected;
}
- if (intel_crt_detect_ddc(connector))
+ if (intel_crt_detect_ddc(encoder))
return connector_status_connected;
/* for pre-945g platforms use load detect */
if (encoder->crtc && encoder->crtc->enabled) {
status = intel_crt_load_detect(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder,
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
NULL, &dpms_mode);
if (crtc) {
status = intel_crt_load_detect(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+ intel_release_load_detect_pipe(intel_encoder,
+ connector, dpms_mode);
} else
status = connector_status_unknown;
}
@@ -421,9 +439,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
static void intel_crt_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -432,29 +447,27 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
int ret;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct i2c_adapter *ddcbus;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct i2c_adapter *ddc_bus;
struct drm_device *dev = connector->dev;
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret || !IS_G4X(dev))
goto end;
- ddcbus = intel_encoder->ddc_bus;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- intel_encoder->ddc_bus =
- intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
+ ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
- if (!intel_encoder->ddc_bus) {
- intel_encoder->ddc_bus = ddcbus;
+ if (!ddc_bus) {
dev_printk(KERN_ERR, &connector->dev->pdev->dev,
"DDC bus registration failed for CRTDDC_D.\n");
goto end;
}
/* Try to get modes by GPIOD port */
- ret = intel_ddc_get_modes(intel_encoder);
- intel_i2c_destroy(ddcbus);
+ ret = intel_ddc_get_modes(connector, ddc_bus);
+ intel_i2c_destroy(ddc_bus);
end:
return ret;
@@ -491,12 +504,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_crt_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -507,6 +524,7 @@ void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 i2c_reg;
@@ -514,14 +532,20 @@ void intel_crt_init(struct drm_device *dev)
if (!intel_encoder)
return;
- connector = &intel_encoder->base;
- drm_connector_init(dev, &intel_encoder->base,
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
/* Set up the DDC bus. */
@@ -553,5 +577,10 @@ void intel_crt_init(struct drm_device *dev)
drm_sysfs_connector_add(connector);
+ if (I915_HAS_HOTPLUG(dev))
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e7356fb6c91..f469a84cacf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -742,12 +742,11 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry;
+ struct drm_encoder *l_entry;
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
+ list_for_each_entry(l_entry, &mode_config->encoder_list, head) {
+ if (l_entry && l_entry->crtc == crtc) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(l_entry);
if (intel_encoder->type == type)
return true;
}
@@ -755,23 +754,6 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
return false;
}
-static struct drm_connector *
-intel_pipe_get_connector (struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry, *ret = NULL;
-
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder &&
- l_entry->encoder->crtc == crtc) {
- ret = l_entry;
- break;
- }
- }
- return ret;
-}
-
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
@@ -905,9 +887,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
memset(best_clock, 0, sizeof(*best_clock));
max_n = limit->n.max;
- /* based on hardware requriment prefer smaller n to precision */
+ /* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2 */
+ /* based on hardware requirement, prefere larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
for (clock.m2 = limit->m2.max;
@@ -1066,9 +1048,8 @@ void i8xx_disable_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("disabled FBC\n");
}
-static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+static bool i8xx_fbc_enabled(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
@@ -1125,14 +1106,43 @@ void g4x_disable_fbc(struct drm_device *dev)
DRM_DEBUG_KMS("disabled FBC\n");
}
-static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+static bool g4x_fbc_enabled(struct drm_device *dev)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ dev_priv->display.enable_fbc(crtc, interval);
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+}
+
/**
* intel_update_fbc - enable/disable FBC as needed
* @crtc: CRTC to point the compressor at
@@ -1167,9 +1177,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
if (!i915_powersave)
return;
- if (!dev_priv->display.fbc_enabled ||
- !dev_priv->display.enable_fbc ||
- !dev_priv->display.disable_fbc)
+ if (!I915_HAS_FBC(dev))
return;
if (!crtc->fb)
@@ -1216,28 +1224,25 @@ static void intel_update_fbc(struct drm_crtc *crtc,
goto out_disable;
}
- if (dev_priv->display.fbc_enabled(crtc)) {
+ if (intel_fbc_enabled(dev)) {
/* We can re-enable it in this case, but need to update pitch */
- if (fb->pitch > dev_priv->cfb_pitch)
- dev_priv->display.disable_fbc(dev);
- if (obj_priv->fence_reg != dev_priv->cfb_fence)
- dev_priv->display.disable_fbc(dev);
- if (plane != dev_priv->cfb_plane)
- dev_priv->display.disable_fbc(dev);
+ if ((fb->pitch > dev_priv->cfb_pitch) ||
+ (obj_priv->fence_reg != dev_priv->cfb_fence) ||
+ (plane != dev_priv->cfb_plane))
+ intel_disable_fbc(dev);
}
- if (!dev_priv->display.fbc_enabled(crtc)) {
- /* Now try to turn it back on if possible */
- dev_priv->display.enable_fbc(crtc, 500);
- }
+ /* Now try to turn it back on if possible */
+ if (!intel_fbc_enabled(dev))
+ intel_enable_fbc(crtc, 500);
return;
out_disable:
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
/* Multiple disables should be harmless */
- if (dev_priv->display.fbc_enabled(crtc))
- dev_priv->display.disable_fbc(dev);
+ if (intel_fbc_enabled(dev))
+ intel_disable_fbc(dev);
}
static int
@@ -1510,6 +1515,219 @@ static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
udelay(500);
}
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+ u32 temp, tries = 0;
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ temp = I915_READ(fdi_tx_reg);
+ temp |= FDI_TX_ENABLE;
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_tx_reg, temp);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(150);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
+ for (;;) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if ((temp & FDI_RX_BIT_LOCK)) {
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ break;
+ }
+
+ tries++;
+
+ if (tries > 5) {
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
+ break;
+ }
+ }
+
+ /* Train 2 */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ I915_WRITE(fdi_rx_reg, temp);
+ udelay(150);
+
+ tries = 0;
+
+ for (;;) {
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+
+ tries++;
+
+ if (tries > 5) {
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
+ break;
+ }
+ }
+
+ DRM_DEBUG_KMS("FDI train done\n");
+}
+
+static int snb_b_fdi_train_param [] = {
+ FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+ FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+ FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+ FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
+ int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
+ u32 temp, i;
+
+ /* enable CPU FDI TX and PCH FDI RX */
+ temp = I915_READ(fdi_tx_reg);
+ temp |= FDI_TX_ENABLE;
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ I915_WRITE(fdi_tx_reg, temp);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(150);
+
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(fdi_tx_reg, temp);
+ udelay(500);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
+
+ /* Train 2 */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ if (IS_GEN6(dev)) {
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ /* SNB-B */
+ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+ I915_WRITE(fdi_tx_reg, temp);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_2;
+ }
+ I915_WRITE(fdi_rx_reg, temp);
+ udelay(150);
+
+ for (i = 0; i < 4; i++ ) {
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+ temp |= snb_b_fdi_train_param[i];
+ I915_WRITE(fdi_tx_reg, temp);
+ udelay(500);
+
+ temp = I915_READ(fdi_rx_iir_reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(fdi_rx_iir_reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ }
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
@@ -1523,8 +1741,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
- int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
- int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
@@ -1541,8 +1757,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
+ int trans_dpll_sel = (pipe == 0) ? 0 : 1;
u32 temp;
- int tries = 5, j, n;
+ int n;
u32 pipe_bpc;
temp = I915_READ(pipeconf_reg);
@@ -1569,12 +1786,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable eDP PLL */
ironlake_enable_pll_edp(crtc);
} else {
- /* enable PCH DPLL */
- temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
@@ -1584,9 +1795,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
*/
temp &= ~(0x7 << 16);
temp |= (pipe_bpc << 11);
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
- FDI_SEL_PCDCLK |
- FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
+ temp &= ~(7 << 19);
+ temp |= (intel_crtc->fdi_lanes - 1) << 19;
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(200);
+
+ /* Switch from Rawclk to PCDclk */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
I915_READ(fdi_rx_reg);
udelay(200);
@@ -1629,91 +1846,32 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
if (!HAS_eDP) {
- /* enable CPU FDI TX and PCH FDI RX */
- temp = I915_READ(fdi_tx_reg);
- temp |= FDI_TX_ENABLE;
- temp |= FDI_DP_PORT_WIDTH_X4; /* default */
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_tx_reg, temp);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
- I915_READ(fdi_rx_reg);
-
- udelay(150);
-
- /* Train FDI. */
- /* umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- temp = I915_READ(fdi_rx_imr_reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
- udelay(150);
+ /* For PCH output, training FDI link */
+ if (IS_GEN6(dev))
+ gen6_fdi_link_train(crtc);
+ else
+ ironlake_fdi_link_train(crtc);
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_BIT_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_BIT_LOCK)
- break;
- udelay(200);
- }
- if (j != tries)
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- else
- DRM_DEBUG_KMS("train 1 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("train 1 ok 2!\n");
+ /* enable PCH DPLL */
+ temp = I915_READ(pch_dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
}
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_tx_reg, temp);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(fdi_rx_reg, temp);
-
- udelay(150);
+ udelay(200);
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
- for (j = 0; j < tries; j++) {
- temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
- temp);
- if (temp & FDI_RX_SYMBOL_LOCK)
- break;
- udelay(200);
- }
- if (j != tries) {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 1!\n");
- } else
- DRM_DEBUG_KMS("train 2 fail\n");
- } else {
- I915_WRITE(fdi_rx_iir_reg,
- temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("train 2 ok 2!\n");
+ if (HAS_PCH_CPT(dev)) {
+ /* Be sure PCH DPLL SEL is set */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0 &&
+ (temp & TRANSA_DPLL_ENABLE) == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else if (trans_dpll_sel == 1 &&
+ (temp & TRANSB_DPLL_ENABLE) == 0)
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
}
- DRM_DEBUG_KMS("train done\n");
/* set transcoder timing */
I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1724,6 +1882,60 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
+ /* enable normal train */
+ temp = I915_READ(fdi_tx_reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
+ FDI_TX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+ I915_READ(fdi_rx_reg);
+
+ /* wait one idle pattern time */
+ udelay(100);
+
+ /* For PCH DP, enable TRANS_DP_CTL */
+ if (HAS_PCH_CPT(dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+ int reg;
+
+ reg = I915_READ(trans_dp_ctl);
+ reg &= ~TRANS_DP_PORT_SEL_MASK;
+ reg = TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING |
+ TRANS_DP_VSYNC_ACTIVE_HIGH |
+ TRANS_DP_HSYNC_ACTIVE_HIGH;
+
+ switch (intel_trans_dp_port_sel(crtc)) {
+ case PCH_DP_B:
+ reg |= TRANS_DP_PORT_SEL_B;
+ break;
+ case PCH_DP_C:
+ reg |= TRANS_DP_PORT_SEL_C;
+ break;
+ case PCH_DP_D:
+ reg |= TRANS_DP_PORT_SEL_D;
+ break;
+ default:
+ DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
+ reg |= TRANS_DP_PORT_SEL_B;
+ break;
+ }
+
+ I915_WRITE(trans_dp_ctl, reg);
+ POSTING_READ(trans_dp_ctl);
+ }
+
/* enable PCH transcoder */
temp = I915_READ(transconf_reg);
/*
@@ -1738,23 +1950,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
;
- /* enable normal */
-
- temp = I915_READ(fdi_tx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_TX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_tx_reg);
-
- temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
- FDI_RX_ENHANCE_FRAME_ENABLE);
- I915_READ(fdi_rx_reg);
-
- /* wait one idle pattern time */
- udelay(100);
-
}
intel_crtc_load_lut(crtc);
@@ -1805,6 +2000,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(pf_ctl_reg);
}
I915_WRITE(pf_win_size, 0);
+ POSTING_READ(pf_win_size);
+
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -1825,11 +2022,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(fdi_tx_reg, temp);
+ POSTING_READ(fdi_tx_reg);
temp = I915_READ(fdi_rx_reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
I915_WRITE(fdi_rx_reg, temp);
+ POSTING_READ(fdi_rx_reg);
udelay(100);
@@ -1859,6 +2063,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
}
+
temp = I915_READ(transconf_reg);
/* BPC in transcoder is consistent with that in pipeconf */
temp &= ~PIPE_BPC_MASK;
@@ -1867,35 +2072,53 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(transconf_reg);
udelay(100);
+ if (HAS_PCH_CPT(dev)) {
+ /* disable TRANS_DP_CTL */
+ int trans_dp_ctl = (pipe == 0) ? TRANS_DP_CTL_A : TRANS_DP_CTL_B;
+ int reg;
+
+ reg = I915_READ(trans_dp_ctl);
+ reg &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ I915_WRITE(trans_dp_ctl, reg);
+ POSTING_READ(trans_dp_ctl);
+
+ /* disable DPLL_SEL */
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0)
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+ else
+ temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
+
+ }
+
/* disable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
- I915_READ(pch_dpll_reg);
- }
+ I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ I915_READ(pch_dpll_reg);
if (HAS_eDP) {
ironlake_disable_pll_edp(crtc);
}
+ /* Switch from PCDclk to Rawclk */
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_SEL_PCDCLK;
I915_WRITE(fdi_rx_reg, temp);
I915_READ(fdi_rx_reg);
+ /* Disable CPU FDI TX PLL */
+ temp = I915_READ(fdi_tx_reg);
+ I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
+ I915_READ(fdi_tx_reg);
+ udelay(100);
+
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_RX_PLL_ENABLE;
I915_WRITE(fdi_rx_reg, temp);
I915_READ(fdi_rx_reg);
- /* Disable CPU FDI TX PLL */
- temp = I915_READ(fdi_tx_reg);
- if ((temp & FDI_TX_PLL_ENABLE) != 0) {
- I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE);
- I915_READ(fdi_tx_reg);
- udelay(100);
- }
-
/* Wait for the clocks to turn off. */
udelay(100);
break;
@@ -2331,6 +2554,30 @@ static struct intel_watermark_params i830_wm_info = {
I830_FIFO_LINE_SIZE
};
+static struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
@@ -2449,66 +2696,6 @@ static void pineview_disable_cxsr(struct drm_device *dev)
DRM_INFO("Big FIFO is disabled\n");
}
-static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
- int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg;
- unsigned long wm;
- struct cxsr_latency *latency;
-
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
- dev_priv->mem_freq);
- if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
- return;
- }
-
- /* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
- latency->display_sr);
- reg = I915_READ(DSPFW1);
- reg &= 0x7fffff;
- reg |= wm << 23;
- I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
- latency->cursor_sr);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 24);
- reg |= (wm & 0x3f) << 24;
- I915_WRITE(DSPFW3, reg);
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
- reg = I915_READ(DSPFW3);
- reg &= 0xfffffe00;
- reg |= wm & 0x1ff;
- I915_WRITE(DSPFW3, reg);
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
- latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~(0x3f << 16);
- reg |= (wm & 0x3f) << 16;
- I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
- /* activate cxsr */
- reg = I915_READ(DSPFW3);
- reg |= PINEVIEW_SELF_REFRESH_EN;
- I915_WRITE(DSPFW3, reg);
-
- DRM_INFO("Big FIFO is enabled\n");
-
- return;
-}
-
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
@@ -2593,6 +2780,71 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
+static void pineview_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ unsigned long wm;
+ struct cxsr_latency *latency;
+ int sr_clock;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
+ dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ if (!planea_clock || !planeb_clock) {
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+
+ /* Display SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ reg = I915_READ(DSPFW3);
+ reg |= PINEVIEW_SELF_REFRESH_EN;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
static void g4x_update_wm(struct drm_device *dev, int planea_clock,
int planeb_clock, int sr_hdisplay, int pixel_size)
{
@@ -2813,6 +3065,108 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
I915_WRITE(FW_BLC, fwater_lo);
}
+#define ILK_LP0_PLANE_LATENCY 700
+
+static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int sr_wm, cursor_wm;
+ unsigned long line_time_us;
+ int sr_clock, entries_required;
+ u32 reg_value;
+
+ /* Calculate and update the watermark for plane A */
+ if (planea_clock) {
+ entries_required = ((planea_clock / 1000) * pixel_size *
+ ILK_LP0_PLANE_LATENCY) / 1000;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_wm_info.cacheline_size);
+ planea_wm = entries_required +
+ ironlake_display_wm_info.guard_size;
+
+ if (planea_wm > (int)ironlake_display_wm_info.max_wm)
+ planea_wm = ironlake_display_wm_info.max_wm;
+
+ cursora_wm = 16;
+ reg_value = I915_READ(WM0_PIPEA_ILK);
+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
+ (cursora_wm & WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, reg_value);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A - plane %d, "
+ "cursor: %d\n", planea_wm, cursora_wm);
+ }
+ /* Calculate and update the watermark for plane B */
+ if (planeb_clock) {
+ entries_required = ((planeb_clock / 1000) * pixel_size *
+ ILK_LP0_PLANE_LATENCY) / 1000;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_wm_info.cacheline_size);
+ planeb_wm = entries_required +
+ ironlake_display_wm_info.guard_size;
+
+ if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
+ planeb_wm = ironlake_display_wm_info.max_wm;
+
+ cursorb_wm = 16;
+ reg_value = I915_READ(WM0_PIPEB_ILK);
+ reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
+ (cursorb_wm & WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, reg_value);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B - plane %d, "
+ "cursor: %d\n", planeb_wm, cursorb_wm);
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ if (!planea_clock || !planeb_clock) {
+ int line_count;
+ /* Read the self-refresh latency. The unit is 0.5us */
+ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+ line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+ /* Use ns/us then divide to preserve precision */
+ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
+ / 1000;
+
+ /* calculate the self-refresh watermark for display plane */
+ entries_required = line_count * sr_hdisplay * pixel_size;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_display_srwm_info.cacheline_size);
+ sr_wm = entries_required +
+ ironlake_display_srwm_info.guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries_required = line_count * pixel_size * 64;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_cursor_srwm_info.cacheline_size);
+ cursor_wm = entries_required +
+ ironlake_cursor_srwm_info.guard_size;
+
+ /* configure watermark and enable self-refresh */
+ reg_value = I915_READ(WM1_LP_ILK);
+ reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
+ WM1_LP_CURSOR_MASK);
+ reg_value |= WM1_LP_SR_EN |
+ (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+ (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
+
+ I915_WRITE(WM1_LP_ILK, reg_value);
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", sr_wm, cursor_wm);
+
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+ }
+}
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -2882,12 +3236,6 @@ static void intel_update_watermarks(struct drm_device *dev)
if (enabled <= 0)
return;
- /* Single plane configs can enable self refresh */
- if (enabled == 1 && IS_PINEVIEW(dev))
- pineview_enable_cxsr(dev, sr_clock, pixel_size);
- else if (IS_PINEVIEW(dev))
- pineview_disable_cxsr(dev);
-
dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
sr_hdisplay, pixel_size);
}
@@ -2924,7 +3272,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
bool is_edp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
@@ -2935,6 +3284,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
+ int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
+ int trans_dpll_sel = (pipe == 0) ? 0 : 1;
int lvds_reg = LVDS;
u32 temp;
int sdvo_pixel_multiply;
@@ -2942,12 +3293,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_pre_modeset(dev, pipe);
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- if (!connector->encoder || connector->encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
+ intel_encoder = enc_to_intel_encoder(encoder);
+
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -3043,14 +3395,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */
if (HAS_PCH_SPLIT(dev)) {
- int lane, link_bw, bpp;
+ int lane = 0, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (is_edp) {
- struct drm_connector *edp;
target_clock = mode->clock;
- edp = intel_pipe_get_connector(crtc);
- intel_edp_link_config(to_intel_encoder(edp),
+ intel_edp_link_config(intel_encoder,
&lane, &link_bw);
} else {
/* DP over FDI requires target mode clock
@@ -3059,7 +3409,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
- lane = 4;
link_bw = 270000;
}
@@ -3111,6 +3460,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bpp = 24;
}
+ if (!lane) {
+ /*
+ * Account for spread spectrum to avoid
+ * oversubscribing the link. Max center spread
+ * is 2.5%; use 5% for safety's sake.
+ */
+ u32 bps = target_clock * bpp * 21 / 20;
+ lane = bps / (link_bw * 8) + 1;
+ }
+
+ intel_crtc->fdi_lanes = lane;
+
ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
@@ -3265,11 +3626,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
}
- dspcntr |= DISPLAY_PLANE_ENABLE;
- pipeconf |= PIPEACONF_ENABLE;
- dpll |= DPLL_VCO_ENABLE;
-
-
/* Disable the panel fitter if it was on our pipe */
if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
@@ -3292,6 +3648,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(150);
}
+ /* enable transcoder DPLL */
+ if (HAS_PCH_CPT(dev)) {
+ temp = I915_READ(PCH_DPLL_SEL);
+ if (trans_dpll_sel == 0)
+ temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ else
+ temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ I915_WRITE(PCH_DPLL_SEL, temp);
+ I915_READ(PCH_DPLL_SEL);
+ udelay(150);
+ }
+
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
@@ -3303,7 +3671,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
lvds_reg = PCH_LVDS;
lvds = I915_READ(lvds_reg);
- lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ lvds |= PORT_TRANS_B_SEL_CPT;
+ else
+ lvds |= LVDS_PIPEB_SELECT;
+ } else {
+ if (HAS_PCH_CPT(dev))
+ lvds &= ~PORT_TRANS_SEL_MASK;
+ else
+ lvds &= ~LVDS_PIPEB_SELECT;
+ }
/* set the corresponsding LVDS_BORDER bit */
lvds |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
@@ -3321,14 +3700,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* set the dithering flag */
if (IS_I965G(dev)) {
if (dev_priv->lvds_dither) {
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
pipeconf |= PIPE_ENABLE_DITHER;
- else
+ pipeconf |= PIPE_DITHER_TYPE_ST01;
+ } else
lvds |= LVDS_ENABLE_DITHER;
} else {
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
pipeconf &= ~PIPE_ENABLE_DITHER;
- else
+ pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+ } else
lvds &= ~LVDS_ENABLE_DITHER;
}
}
@@ -3337,6 +3718,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
if (is_dp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
+ else if (HAS_PCH_SPLIT(dev)) {
+ /* For non-DP output, clear any trans DP clock recovery setting.*/
+ if (pipe == 0) {
+ I915_WRITE(TRANSA_DATA_M1, 0);
+ I915_WRITE(TRANSA_DATA_N1, 0);
+ I915_WRITE(TRANSA_DP_LINK_M1, 0);
+ I915_WRITE(TRANSA_DP_LINK_N1, 0);
+ } else {
+ I915_WRITE(TRANSB_DATA_M1, 0);
+ I915_WRITE(TRANSB_DATA_N1, 0);
+ I915_WRITE(TRANSB_DP_LINK_M1, 0);
+ I915_WRITE(TRANSB_DP_LINK_N1, 0);
+ }
+ }
if (!is_edp) {
I915_WRITE(fp_reg, fp);
@@ -3411,6 +3806,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* enable FDI RX PLL too */
temp = I915_READ(fdi_rx_reg);
I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
+ I915_READ(fdi_rx_reg);
+ udelay(200);
+
+ /* enable FDI TX PLL too */
+ temp = I915_READ(fdi_tx_reg);
+ I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+ I915_READ(fdi_tx_reg);
+
+ /* enable FDI RX PCDCLK */
+ temp = I915_READ(fdi_rx_reg);
+ I915_WRITE(fdi_rx_reg, temp | FDI_SEL_PCDCLK);
+ I915_READ(fdi_rx_reg);
udelay(200);
}
}
@@ -3671,6 +4078,7 @@ static struct drm_display_mode load_detect_mode = {
};
struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode)
{
@@ -3729,7 +4137,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
}
encoder->crtc = crtc;
- intel_encoder->base.encoder = encoder;
+ connector->encoder = encoder;
intel_encoder->load_detect_temp = true;
intel_crtc = to_intel_crtc(crtc);
@@ -3755,7 +4163,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
return crtc;
}
-void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
+void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector, int dpms_mode)
{
struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
@@ -3765,7 +4174,7 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpm
if (intel_encoder->load_detect_temp) {
encoder->crtc = NULL;
- intel_encoder->base.encoder = NULL;
+ connector->encoder = NULL;
intel_encoder->load_detect_temp = false;
crtc->enabled = drm_helper_crtc_in_use(crtc);
drm_helper_disable_unused_functions(dev);
@@ -4392,14 +4801,14 @@ struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
return crtc;
}
-static int intel_connector_clones(struct drm_device *dev, int type_mask)
+static int intel_encoder_clones(struct drm_device *dev, int type_mask)
{
int index_mask = 0;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
int entry = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (type_mask & intel_encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
@@ -4411,7 +4820,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
intel_crt_init(dev);
@@ -4426,9 +4835,8 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_dp_init(dev, DP_A);
if (I915_READ(HDMIB) & PORT_DETECTED) {
- /* check SDVOB */
- /* found = intel_sdvo_init(dev, HDMIB); */
- found = 0;
+ /* PCH SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, PCH_SDVOB);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -4494,12 +4902,11 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_encoder *encoder = &intel_encoder->enc;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
encoder->possible_crtcs = intel_encoder->crtc_mask;
- encoder->possible_clones = intel_connector_clones(dev,
+ encoder->possible_clones = intel_encoder_clones(dev,
intel_encoder->clone_mask);
}
}
@@ -4507,10 +4914,6 @@ static void intel_setup_outputs(struct drm_device *dev)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_device *dev = fb->dev;
-
- if (fb->fbdev)
- intelfb_remove(dev, fb);
drm_framebuffer_cleanup(fb);
drm_gem_object_unreference_unlocked(intel_fb->obj);
@@ -4533,18 +4936,13 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
-int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj)
+int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *intel_fb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
{
- struct intel_framebuffer *intel_fb;
int ret;
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb)
- return -ENOMEM;
-
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -4552,40 +4950,41 @@ int intel_framebuffer_create(struct drm_device *dev,
}
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
-
intel_fb->obj = obj;
-
- *fb = &intel_fb->base;
-
return 0;
}
-
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
int ret;
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
if (!obj)
return NULL;
- ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return NULL;
+
+ ret = intel_framebuffer_init(dev, intel_fb,
+ mode_cmd, obj);
if (ret) {
drm_gem_object_unreference_unlocked(obj);
+ kfree(intel_fb);
return NULL;
}
- return fb;
+ return &intel_fb->base;
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
- .fb_changed = intelfb_probe,
+ .output_poll_changed = intel_fb_output_poll_changed,
};
static struct drm_gem_object *
@@ -4594,7 +4993,7 @@ intel_alloc_power_context(struct drm_device *dev)
struct drm_gem_object *pwrctx;
int ret;
- pwrctx = drm_gem_object_alloc(dev, 4096);
+ pwrctx = i915_gem_alloc_object(dev, 4096);
if (!pwrctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
@@ -4732,6 +5131,25 @@ void intel_init_clock_gating(struct drm_device *dev)
}
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ if (IS_IRONLAKE(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ I915_WRITE(ILK_DSPCLK_GATE,
+ (I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE));
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ }
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -4809,8 +5227,7 @@ static void intel_init_display(struct drm_device *dev)
else
dev_priv->display.dpms = i9xx_crtc_dpms;
- /* Only mobile has FBC, leave pointers NULL for other chips */
- if (IS_MOBILE(dev)) {
+ if (I915_HAS_FBC(dev)) {
if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
@@ -4847,23 +5264,46 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed;
/* For FIFO watermark updates */
- if (HAS_PCH_SPLIT(dev))
- dev_priv->display.update_wm = NULL;
- else if (IS_G4X(dev))
+ if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ } else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
else if (IS_I965G(dev))
dev_priv->display.update_wm = i965_update_wm;
- else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
+ else if (IS_I9XX(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
} else {
- if (IS_I85X(dev))
- dev_priv->display.get_fifo_size = i85x_get_fifo_size;
- else if (IS_845G(dev))
+ dev_priv->display.update_wm = i830_update_wm;
+ if (IS_845G(dev))
dev_priv->display.get_fifo_size = i845_get_fifo_size;
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
- dev_priv->display.update_wm = i830_update_wm;
}
}
@@ -4922,13 +5362,6 @@ void intel_modeset_init(struct drm_device *dev)
(unsigned long)dev);
intel_setup_overlay(dev);
-
- if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
- dev_priv->fsb_freq,
- dev_priv->mem_freq))
- DRM_INFO("failed to find known CxSR latency "
- "(found fsb freq %d, mem freq %d), disabling CxSR\n",
- dev_priv->fsb_freq, dev_priv->mem_freq);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -4939,6 +5372,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
+ drm_kms_helper_poll_fini(dev);
+ intel_fbdev_fini(dev);
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
@@ -4973,14 +5409,29 @@ void intel_modeset_cleanup(struct drm_device *dev)
}
-/* current intel driver doesn't take advantage of encoders
- always give back the encoder for the connector
-*/
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+/*
+ * Return which encoder is currently attached for connector.
+ */
+struct drm_encoder *intel_attached_encoder (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int i;
- return &intel_encoder->enc;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 77e40cfcf21..6b1c9a27c27 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -48,8 +48,6 @@ struct intel_dp_priv {
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
- uint32_t save_DP;
- uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int dpms_mode;
uint8_t link_bw;
@@ -141,7 +139,8 @@ static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
int max_lanes = intel_dp_max_lane_count(intel_encoder);
@@ -215,7 +214,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t output_reg = dp_priv->output_reg;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -224,19 +223,27 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
uint32_t ctl;
uint32_t status;
uint32_t aux_clock_divider;
- int try;
+ int try, precharge;
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
- if (IS_eDP(intel_encoder))
- aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- else if (HAS_PCH_SPLIT(dev))
+ if (IS_eDP(intel_encoder)) {
+ if (IS_GEN6(dev))
+ aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ else
+ aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ } else if (HAS_PCH_SPLIT(dev))
aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
@@ -249,7 +256,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
ctl = (DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
@@ -465,7 +472,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
+intel_dp_i2c_init(struct intel_encoder *intel_encoder,
+ struct intel_connector *intel_connector, const char *name)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
@@ -480,7 +488,7 @@ intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
dp_priv->adapter.algo_data = &dp_priv->algo;
- dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
+ dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
return i2c_dp_aux_add_bus(&dp_priv->adapter);
}
@@ -555,7 +563,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
@@ -564,13 +572,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
/*
* Find the lane count in the intel_encoder private
*/
- list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder;
+ struct intel_dp_priv *dp_priv;
- if (!connector->encoder || connector->encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
+ intel_encoder = enc_to_intel_encoder(encoder);
+ dp_priv = intel_encoder->dev_priv;
+
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = dp_priv->lane_count;
break;
@@ -626,16 +637,24 @@ static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = encoder->dev;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_crtc *crtc = intel_encoder->enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- dp_priv->DP = (DP_LINK_TRAIN_OFF |
- DP_VOLTAGE_0_4 |
- DP_PRE_EMPHASIS_0 |
- DP_SYNC_VS_HIGH |
- DP_SYNC_HS_HIGH);
+ dp_priv->DP = (DP_VOLTAGE_0_4 |
+ DP_PRE_EMPHASIS_0);
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dp_priv->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dp_priv->DP |= DP_SYNC_VS_HIGH;
+
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
+ else
+ dp_priv->DP |= DP_LINK_TRAIN_OFF;
switch (dp_priv->lane_count) {
case 1:
@@ -664,7 +683,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
dp_priv->DP |= DP_ENHANCED_FRAMING;
}
- if (intel_crtc->pipe == 1)
+ /* CPT DP's pipe select is decided in TRANS_DP_CTL */
+ if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
dp_priv->DP |= DP_PIPEB_SELECT;
if (IS_eDP(intel_encoder)) {
@@ -704,7 +724,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(dp_priv->output_reg);
@@ -749,20 +769,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
return link_status[r - DP_LANE0_1_STATUS];
}
-static void
-intel_dp_save(struct drm_connector *connector)
-{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
- dp_priv->save_DP = I915_READ(dp_priv->output_reg);
- intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
- dp_priv->save_link_configuration,
- sizeof (dp_priv->save_link_configuration));
-}
-
static uint8_t
intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -892,6 +898,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
return signal_levels;
}
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+ switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
+ return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -948,7 +973,7 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
uint8_t train_set[4],
bool first)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
@@ -974,7 +999,7 @@ static void
intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
@@ -985,23 +1010,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
bool channel_eq = false;
bool first = true;
int tries;
+ u32 reg;
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_encoder, 0x100,
+ intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
link_configuration, DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- DP &= ~DP_LINK_TRAIN_MASK;
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ else
+ DP &= ~DP_LINK_TRAIN_MASK;
memset(train_set, 0, 4);
voltage = 0xff;
tries = 0;
clock_recovery = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ uint32_t signal_levels;
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_1;
+
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
@@ -1041,11 +1081,23 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
channel_eq = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
- uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
- DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ uint32_t signal_levels;
+
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
+ signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
+ } else {
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+ }
+
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
@@ -1068,7 +1120,12 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
++tries;
}
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ reg = DP | DP_LINK_TRAIN_OFF_CPT;
+ else
+ reg = DP | DP_LINK_TRAIN_OFF;
+
+ I915_WRITE(dp_priv->output_reg, reg);
POSTING_READ(dp_priv->output_reg);
intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
@@ -1077,7 +1134,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
static void
intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
@@ -1090,9 +1147,15 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
udelay(100);
}
- DP &= ~DP_LINK_TRAIN_MASK;
- I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(dp_priv->output_reg);
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
+ DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ POSTING_READ(dp_priv->output_reg);
+ } else {
+ DP &= ~DP_LINK_TRAIN_MASK;
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ POSTING_READ(dp_priv->output_reg);
+ }
udelay(17000);
@@ -1102,18 +1165,6 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
POSTING_READ(dp_priv->output_reg);
}
-static void
-intel_dp_restore(struct drm_connector *connector)
-{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
-
- if (dp_priv->save_DP & DP_PORT_EN)
- intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
- else
- intel_dp_link_down(intel_encoder, dp_priv->save_DP);
-}
-
/*
* According to DP spec
* 5.1.2:
@@ -1144,7 +1195,8 @@ intel_dp_check_link_status(struct intel_encoder *intel_encoder)
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
enum drm_connector_status status;
@@ -1168,8 +1220,9 @@ ironlake_dp_detect(struct drm_connector *connector)
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
@@ -1180,16 +1233,6 @@ intel_dp_detect(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
- temp = I915_READ(PORT_HOTPLUG_EN);
-
- I915_WRITE(PORT_HOTPLUG_EN,
- temp |
- DPB_HOTPLUG_INT_EN |
- DPC_HOTPLUG_INT_EN |
- DPD_HOTPLUG_INT_EN);
-
- POSTING_READ(PORT_HOTPLUG_EN);
-
switch (dp_priv->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
@@ -1222,15 +1265,16 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret)
return ret;
@@ -1249,13 +1293,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static void
intel_dp_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1268,8 +1308,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_dp_save,
- .restore = intel_dp_restore,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_dp_destroy,
@@ -1278,12 +1316,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_dp_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -1299,12 +1342,35 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
intel_dp_check_link_status(intel_encoder);
}
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel (struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ if (!encoder || encoder->crtc != crtc)
+ continue;
+
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ return dp_priv->output_reg;
+ }
+ }
+ return -1;
+}
+
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_dp_priv *dp_priv;
const char *name = NULL;
@@ -1313,13 +1379,21 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (!intel_encoder)
return;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
- connector = &intel_encoder->base;
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
if (output_reg == DP_A)
intel_encoder->type = INTEL_OUTPUT_EDP;
else
@@ -1349,7 +1423,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
drm_sysfs_connector_add(connector);
@@ -1378,7 +1452,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
- intel_dp_i2c_init(intel_encoder, name);
+ intel_dp_i2c_init(intel_encoder, intel_connector, name);
intel_encoder->ddc_bus = &dp_priv->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e30253755f1..df931f78766 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -96,8 +96,6 @@ struct intel_framebuffer {
struct intel_encoder {
- struct drm_connector base;
-
struct drm_encoder enc;
int type;
struct i2c_adapter *i2c_bus;
@@ -110,6 +108,11 @@ struct intel_encoder {
int clone_mask;
};
+struct intel_connector {
+ struct drm_connector base;
+ void *dev_priv;
+};
+
struct intel_crtc;
struct intel_overlay {
struct drm_device *dev;
@@ -149,17 +152,18 @@ struct intel_crtc {
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
+ int fdi_lanes;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct i2c_adapter *adapter);
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
void intel_i2c_reset_gmbus(struct drm_device *dev);
@@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
@@ -192,17 +196,16 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
extern void intel_wait_for_vblank(struct drm_device *dev);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode);
extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
int dpms_mode);
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
-extern int intelfb_probe(struct drm_device *dev);
-extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
-extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
@@ -212,10 +215,12 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
-extern int intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_framebuffer **fb,
- struct drm_gem_object *obj);
+extern int intel_framebuffer_init(struct drm_device *dev,
+ struct intel_framebuffer *ifb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj);
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
@@ -229,4 +234,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+extern void intel_fb_output_poll_changed(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ebf213c96b9..227feca7cf8 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -96,39 +96,11 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void intel_dvo_save(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- /* Each output should probably just save the registers it touches,
- * but for now, use more overkill.
- */
- dev_priv->saveDVOA = I915_READ(DVOA);
- dev_priv->saveDVOB = I915_READ(DVOB);
- dev_priv->saveDVOC = I915_READ(DVOC);
-
- dvo->dev_ops->save(dvo);
-}
-
-static void intel_dvo_restore(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- dvo->dev_ops->restore(dvo);
-
- I915_WRITE(DVOA, dev_priv->saveDVOA);
- I915_WRITE(DVOB, dev_priv->saveDVOB);
- I915_WRITE(DVOC, dev_priv->saveDVOC);
-}
-
static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -241,7 +213,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
*/
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
return dvo->dev_ops->detect(dvo);
@@ -249,7 +222,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* We should probably have an i2c driver get_modes function for those
@@ -257,7 +231,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(intel_encoder);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (!list_empty(&connector->probed_modes))
return 1;
@@ -275,38 +249,10 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
static void intel_dvo_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
-
- if (dvo) {
- if (dvo->dev_ops->destroy)
- dvo->dev_ops->destroy(dvo);
- if (dvo->panel_fixed_mode)
- kfree(dvo->panel_fixed_mode);
- /* no need, in i830_dvoices[] now */
- //kfree(dvo);
- }
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
-}
-
-#ifdef RANDR_GET_CRTC_INTERFACE
-static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_dvo_device *dvo = intel_encoder->dev_priv;
- int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
-
- return intel_pipe_to_crtc(pScrn, pipe);
+ kfree(connector);
}
-#endif
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.dpms = intel_dvo_dpms,
@@ -318,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_dvo_save,
- .restore = intel_dvo_restore,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -328,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+
+ if (dvo) {
+ if (dvo->dev_ops->destroy)
+ dvo->dev_ops->destroy(dvo);
+ if (dvo->panel_fixed_mode)
+ kfree(dvo->panel_fixed_mode);
+ }
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
@@ -352,7 +310,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
uint32_t dvo_reg = dvo->dvo_reg;
uint32_t dvo_val = I915_READ(dvo_reg);
@@ -384,6 +343,7 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
void intel_dvo_init(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
@@ -393,6 +353,12 @@ void intel_dvo_init(struct drm_device *dev)
if (!intel_encoder)
return;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
/* Set up the DDC bus */
intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
if (!intel_encoder->ddc_bus)
@@ -400,7 +366,7 @@ void intel_dvo_init(struct drm_device *dev)
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- struct drm_connector *connector = &intel_encoder->base;
+ struct drm_connector *connector = &intel_connector->base;
int gpio;
dvo = &intel_dvo_devices[i];
@@ -471,7 +437,7 @@ void intel_dvo_init(struct drm_device *dev)
drm_encoder_helper_add(&intel_encoder->enc,
&intel_dvo_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
@@ -496,4 +462,5 @@ void intel_dvo_init(struct drm_device *dev)
intel_i2c_destroy(i2cbus);
free_intel:
kfree(intel_encoder);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 8a0b3bcdc7b..6f53cf7fbc5 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -44,9 +44,10 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intelfb_par {
+struct intel_fbdev {
struct drm_fb_helper helper;
- struct intel_framebuffer *intel_fb;
+ struct intel_framebuffer ifb;
+ struct list_head fbdev_list;
struct drm_display_mode *our_mode;
};
@@ -54,7 +55,6 @@ static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -63,62 +63,12 @@ static struct fb_ops intelfb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- .gamma_set = intel_crtc_fb_gamma_set,
- .gamma_get = intel_crtc_fb_gamma_get,
-};
-
-
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode = crtc->desired_mode;
-
- fb = crtc->fb;
- if (!fb)
- return 1;
-
- info = fb->fbdev;
- if (!info)
- return 1;
-
- if (!mode)
- return 1;
-
- info->var.xres = mode->hdisplay;
- info->var.right_margin = mode->hsync_start - mode->hdisplay;
- info->var.hsync_len = mode->hsync_end - mode->hsync_start;
- info->var.left_margin = mode->htotal - mode->hsync_end;
- info->var.yres = mode->vdisplay;
- info->var.lower_margin = mode->vsync_start - mode->vdisplay;
- info->var.vsync_len = mode->vsync_end - mode->vsync_start;
- info->var.upper_margin = mode->vtotal - mode->vsync_end;
- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
- /* avoid overflow */
- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
- return 0;
-}
-EXPORT_SYMBOL(intelfb_resize);
-
-static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height,
- uint32_t surface_depth, uint32_t surface_bpp,
- struct drm_framebuffer **fb_p)
+static int intelfb_create(struct intel_fbdev *ifbdev,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct drm_device *dev = ifbdev->helper.dev;
struct fb_info *info;
- struct intelfb_par *par;
struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *fbo = NULL;
struct drm_i915_gem_object *obj_priv;
@@ -126,19 +76,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
/* we don't do packed 24bpp */
- if (surface_bpp == 24)
- surface_bpp = 32;
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = surface_bpp;
+ mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
- mode_cmd.depth = surface_depth;
+ mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- fbo = drm_gem_object_alloc(dev, size);
+ fbo = i915_gem_alloc_object(dev, size);
if (!fbo) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
@@ -157,45 +107,37 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
/* Flush everything out, we'll be doing GTT only from now on */
i915_gem_object_set_to_gtt_domain(fbo, 1);
- ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
- if (ret) {
- DRM_ERROR("failed to allocate fb.\n");
- goto out_unpin;
- }
-
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- intel_fb = to_intel_framebuffer(fb);
- *fb_p = fb;
-
- info = framebuffer_alloc(sizeof(struct intelfb_par), device);
+ info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
goto out_unpin;
}
- par = info->par;
+ info->par = ifbdev;
- par->helper.funcs = &intel_fb_helper_funcs;
- par->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&par->helper, 2,
- INTELFB_CONN_LIMIT);
- if (ret)
- goto out_unref;
+ intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+
+ fb = &ifbdev->ifb.base;
+
+ ifbdev->helper.fb = fb;
+ ifbdev->helper.fbdev = info;
strcpy(info->fix.id, "inteldrmfb");
info->flags = FBINFO_DEFAULT;
-
info->fbops = &intelfb_ops;
-
/* setup aperture base/size for vesafb takeover */
- info->aperture_base = dev->mode_config.fb_base;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ info->apertures->ranges[0].base = dev->mode_config.fb_base;
if (IS_I9XX(dev))
- info->aperture_size = pci_resource_len(dev->pdev, 2);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
else
- info->aperture_size = pci_resource_len(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
info->fix.smem_len = size;
@@ -208,12 +150,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
ret = -ENOSPC;
goto out_unpin;
}
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
info->screen_size = size;
// memset(info->screen_base, 0, size);
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
@@ -225,14 +173,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- fb->fbdev = info;
-
- par->intel_fb = intel_fb;
-
- /* To allow resizeing without swapping buffers */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
- intel_fb->base.width, intel_fb->base.height,
- obj_priv->gtt_offset, fbo);
+ fb->width, fb->height,
+ obj_priv->gtt_offset, fbo);
+
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
@@ -247,35 +191,86 @@ out:
return ret;
}
-int intelfb_probe(struct drm_device *dev)
+static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
+ int new_fb = 0;
int ret;
- DRM_DEBUG_KMS("\n");
- ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
- return ret;
+ if (!helper->fb) {
+ ret = intelfb_create(ifbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
}
-EXPORT_SYMBOL(intelfb_probe);
-int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .gamma_set = intel_crtc_fb_gamma_set,
+ .gamma_get = intel_crtc_fb_gamma_get,
+ .fb_probe = intel_fb_find_or_create_single,
+};
+
+int intel_fbdev_destroy(struct drm_device *dev,
+ struct intel_fbdev *ifbdev)
{
struct fb_info *info;
+ struct intel_framebuffer *ifb = &ifbdev->ifb;
- if (!fb)
- return -EINVAL;
-
- info = fb->fbdev;
-
- if (info) {
- struct intelfb_par *par = info->par;
+ if (ifbdev->helper.fbdev) {
+ info = ifbdev->helper.fbdev;
unregister_framebuffer(info);
iounmap(info->screen_base);
- if (info->par)
- drm_fb_helper_free(&par->helper);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
+ drm_fb_helper_fini(&ifbdev->helper);
+
+ drm_framebuffer_cleanup(&ifb->base);
+ if (ifb->obj)
+ drm_gem_object_unreference_unlocked(ifb->obj);
+
+ return 0;
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+ struct intel_fbdev *ifbdev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+ if (!ifbdev)
+ return -ENOMEM;
+
+ dev_priv->fbdev = ifbdev;
+ ifbdev->helper.funcs = &intel_fb_helper_funcs;
+
+ drm_fb_helper_init(dev, &ifbdev->helper, 2,
+ INTELFB_CONN_LIMIT);
+
+ drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+ drm_fb_helper_initial_config(&ifbdev->helper, 32);
return 0;
}
-EXPORT_SYMBOL(intelfb_remove);
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ intel_fbdev_destroy(dev, dev_priv->fbdev);
+ kfree(dev_priv->fbdev);
+ dev_priv->fbdev = NULL;
+}
MODULE_LICENSE("GPL and additional rights");
+
+void intel_fb_output_poll_changed(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 48cade0cf7b..65727f0a79a 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -39,7 +39,6 @@
struct intel_hdmi_priv {
u32 sdvox_reg;
- u32 save_SDVOX;
bool has_hdmi_sink;
};
@@ -63,8 +62,12 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (hdmi_priv->has_hdmi_sink)
sdvox |= SDVO_AUDIO_ENABLE;
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+ if (intel_crtc->pipe == 1) {
+ if (HAS_PCH_CPT(dev))
+ sdvox |= PORT_TRANS_B_SEL_CPT;
+ else
+ sdvox |= SDVO_PIPE_B_SELECT;
+ }
I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
POSTING_READ(hdmi_priv->sdvox_reg);
@@ -106,27 +109,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void intel_hdmi_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
- hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
-}
-
-static void intel_hdmi_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
-
- I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
- POSTING_READ(hdmi_priv->sdvox_reg);
-}
-
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -151,13 +133,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
hdmi_priv->has_hdmi_sink = false;
- edid = drm_get_edid(&intel_encoder->base,
+ edid = drm_get_edid(connector,
intel_encoder->ddc_bus);
if (edid) {
@@ -165,7 +148,7 @@ intel_hdmi_detect(struct drm_connector *connector)
status = connector_status_connected;
hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
- intel_encoder->base.display_info.raw_edid = NULL;
+ connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -174,24 +157,21 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(intel_encoder);
+ return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -204,8 +184,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_hdmi_save,
- .restore = intel_hdmi_restore,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_hdmi_destroy,
@@ -214,12 +192,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -231,21 +214,30 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_hdmi_priv *hdmi_priv;
intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_encoder)
return;
+
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
- connector = &intel_encoder->base;
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
intel_encoder->type = INTEL_OUTPUT_HDMI;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
@@ -285,7 +277,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
- drm_mode_connector_attach_encoder(&intel_encoder->base,
+ drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
drm_sysfs_connector_add(connector);
@@ -303,6 +295,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
kfree(intel_encoder);
+ kfree(intel_connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b66806a37d3..6a1accd83ae 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -139,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
/* XXX: We never power down the LVDS pairs. */
}
-static void intel_lvds_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (HAS_PCH_SPLIT(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
- } else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
- }
-
- dev_priv->savePP_ON = I915_READ(pp_on_reg);
- dev_priv->savePP_OFF = I915_READ(pp_off_reg);
- dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
- dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
- dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
-
- /*
- * If the light is off at server startup, just make it full brightness
- */
- if (dev_priv->backlight_duty_cycle == 0)
- dev_priv->backlight_duty_cycle =
- intel_lvds_get_max_backlight(dev);
-}
-
-static void intel_lvds_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
- u32 pwm_ctl_reg;
-
- if (HAS_PCH_SPLIT(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_ctl_reg = PCH_PP_CONTROL;
- pp_div_reg = PCH_PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CPU_CTL;
- } else {
- pp_on_reg = PP_ON_DELAYS;
- pp_off_reg = PP_OFF_DELAYS;
- pp_ctl_reg = PP_CONTROL;
- pp_div_reg = PP_DIVISOR;
- pwm_ctl_reg = BLC_PWM_CTL;
- }
-
- I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
- I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
- I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
- I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
- I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
- if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
- intel_lvds_set_power(dev, true);
- else
- intel_lvds_set_power(dev, false);
-}
-
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -635,12 +566,13 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
if (dev_priv->lvds_edid_good) {
- ret = intel_ddc_get_modes(intel_encoder);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret)
return ret;
@@ -717,11 +649,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
static void intel_lvds_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
@@ -734,13 +663,14 @@ static int intel_lvds_set_property(struct drm_connector *connector,
uint64_t value)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder =
- to_intel_encoder(connector);
if (property == dev->mode_config.scaling_mode_property &&
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
+ struct drm_encoder *encoder = connector->encoder;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
+
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
@@ -774,13 +704,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_lvds_save,
- .restore = intel_lvds_restore,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@@ -790,7 +718,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@@ -979,6 +912,7 @@ void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -1012,19 +946,27 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
- connector = &intel_encoder->base;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
encoder = &intel_encoder->enc;
- drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
+ drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
+ if (IS_I965G(dev))
+ intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1039,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
* the initial panel fitting mode will be FULL_SCREEN.
*/
- drm_connector_attach_property(&intel_encoder->base,
+ drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1067,7 +1009,7 @@ void intel_lvds_init(struct drm_device *dev)
*/
dev_priv->lvds_edid_good = true;
- if (!intel_ddc_get_modes(intel_encoder))
+ if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
dev_priv->lvds_edid_good = false;
list_for_each_entry(scan, &connector->probed_modes, head) {
@@ -1151,4 +1093,5 @@ failed:
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 8e5c83b2d12..4b1fd3d9c73 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
}
};
- intel_i2c_quirk_set(intel_encoder->base.dev, true);
+ intel_i2c_quirk_set(intel_encoder->enc.dev, true);
ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
- intel_i2c_quirk_set(intel_encoder->base.dev, false);
+ intel_i2c_quirk_set(intel_encoder->enc.dev, false);
if (ret == 2)
return true;
@@ -66,22 +66,23 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
/**
* intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
+ * @adapter: i2c adapter
*
* Fetch the EDID information from @connector using the DDC bus.
*/
-int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
+int intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
{
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(intel_encoder->base.dev, true);
- edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
- intel_i2c_quirk_set(intel_encoder->base.dev, false);
+ intel_i2c_quirk_set(connector->dev, true);
+ edid = drm_get_edid(connector, adapter);
+ intel_i2c_quirk_set(connector->dev, false);
if (edid) {
- drm_mode_connector_update_edid_property(&intel_encoder->base,
- edid);
- ret = drm_add_edid_modes(&intel_encoder->base, edid);
- intel_encoder->base.display_info.raw_edid = NULL;
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
kfree(edid);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 6d524a1fc27..b0e17b06eb6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -373,7 +373,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
- obj = overlay->vid_bo->obj;
+ obj = &overlay->vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
@@ -411,7 +411,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
switch (overlay->hw_wedged) {
case RELEASE_OLD_VID:
- obj = overlay->old_vid_bo->obj;
+ obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@@ -467,7 +467,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (ret != 0)
return ret;
- obj = overlay->old_vid_bo->obj;
+ obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@@ -1341,7 +1341,7 @@ void intel_setup_overlay(struct drm_device *dev)
return;
overlay->dev = dev;
- reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+ reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (!reg_bo)
goto out_free;
overlay->reg_bo = to_intel_bo(reg_bo);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 87d953664cb..aba72c489a2 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,7 +36,18 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
-#include <linux/dmi.h>
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+
static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -86,12 +97,6 @@ struct intel_sdvo_priv {
/* This is for current tv format name */
char *tv_format_name;
- /* This contains all current supported TV format */
- char *tv_format_supported[TV_FORMAT_NUM];
- int format_supported_num;
- struct drm_property *tv_format_property;
- struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
-
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
@@ -112,12 +117,6 @@ struct intel_sdvo_priv {
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
- /**
- * Returned SDTV resolutions allowed for the current format, if the
- * device reported it.
- */
- struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
-
/*
* supported encoding mode, used to determine whether HDMI is
* supported
@@ -130,11 +129,24 @@ struct intel_sdvo_priv {
/* Mac mini hack -- use the same DDC as the analog connector */
struct i2c_adapter *analog_ddc_bus;
- int save_sdvo_mult;
- u16 save_active_outputs;
- struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
- struct intel_sdvo_dtd save_output_dtd[16];
- u32 save_SDVOX;
+};
+
+struct intel_sdvo_connector {
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ /* This contains all current supported TV format */
+ char *tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format_property;
+ struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
+
+ /**
+ * Returned SDTV resolutions allowed for the current format, if the
+ * device reported it.
+ */
+ struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
+
/* add the property for the SDVO-TV */
struct drm_property *left_property;
struct drm_property *right_property;
@@ -162,7 +174,12 @@ struct intel_sdvo_priv {
};
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
+ uint16_t flags);
+static void
+intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
+static void
+intel_sdvo_create_enhance_property(struct drm_connector *connector);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
@@ -171,12 +188,18 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
*/
static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 bval = val, cval = val;
int i;
+ if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(sdvo_priv->sdvo_reg, val);
+ I915_READ(sdvo_priv->sdvo_reg);
+ return;
+ }
+
if (sdvo_priv->sdvo_reg == SDVOB) {
cval = I915_READ(SDVOC);
} else {
@@ -353,7 +376,8 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
-#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC")
+#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
+#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
@@ -563,17 +587,6 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b
return true;
}
-static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
- u16 *outputs)
-{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
-
- return (status == SDVO_CMD_STATUS_SUCCESS);
-}
-
static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
u16 outputs)
{
@@ -646,40 +659,6 @@ static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
- struct intel_sdvo_dtd *dtd)
-{
- u8 status;
-
- intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
- sizeof(dtd->part1));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
- sizeof(dtd->part2));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_get_timing(intel_encoder,
- SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
-}
-
-static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
- struct intel_sdvo_dtd *dtd)
-{
- return intel_sdvo_get_timing(intel_encoder,
- SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
-}
-
static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
@@ -767,23 +746,6 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_en
return false;
}
-static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
-{
- u8 response, status;
-
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
- status = intel_sdvo_read_response(intel_encoder, &response, 1);
-
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
- return SDVO_CLOCK_RATE_MULT_1X;
- } else {
- DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
- }
-
- return response;
-}
-
static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
{
u8 status;
@@ -1071,7 +1033,7 @@ static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
sizeof(format) : sizeof(format_map));
- intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
sizeof(format));
status = intel_sdvo_read_response(intel_encoder, NULL, 0);
@@ -1101,7 +1063,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
/* Set output timings */
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
intel_sdvo_set_target_output(intel_encoder,
- dev_priv->controlled_output);
+ dev_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
@@ -1139,7 +1101,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
dev_priv->sdvo_lvds_fixed_mode);
intel_sdvo_set_target_output(intel_encoder,
- dev_priv->controlled_output);
+ dev_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
@@ -1204,7 +1166,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
- in_out.in0 = sdvo_priv->controlled_output;
+ in_out.in0 = sdvo_priv->attached_output;
in_out.in1 = 0;
intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
@@ -1230,7 +1192,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
/* Set the output timing to the screen */
intel_sdvo_set_target_output(intel_encoder,
- sdvo_priv->controlled_output);
+ sdvo_priv->attached_output);
intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
}
@@ -1352,107 +1314,16 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
if (0)
intel_sdvo_set_encoder_power_state(intel_encoder, mode);
- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output);
+ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
}
return;
}
-static void intel_sdvo_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- int o;
-
- sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
- intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_encoder, true, false);
- intel_sdvo_get_input_timing(intel_encoder,
- &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_encoder, false, true);
- intel_sdvo_get_input_timing(intel_encoder,
- &sdvo_priv->save_input_dtd_2);
- }
-
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output)
- {
- intel_sdvo_set_target_output(intel_encoder, this_output);
- intel_sdvo_get_output_timing(intel_encoder,
- &sdvo_priv->save_output_dtd[o]);
- }
- }
- if (sdvo_priv->is_tv) {
- /* XXX: Save TV format/enhancements. */
- }
-
- sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
-}
-
-static void intel_sdvo_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- int o;
- int i;
- bool input1, input2;
- u8 status;
-
- intel_sdvo_set_active_outputs(intel_encoder, 0);
-
- for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
- {
- u16 this_output = (1 << o);
- if (sdvo_priv->caps.output_flags & this_output) {
- intel_sdvo_set_target_output(intel_encoder, this_output);
- intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
- }
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- intel_sdvo_set_target_input(intel_encoder, true, false);
- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- intel_sdvo_set_target_input(intel_encoder, false, true);
- intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
- }
-
- intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
-
- if (sdvo_priv->is_tv) {
- /* XXX: Restore TV format/enhancements. */
- }
-
- intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
-
- if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
- {
- for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev);
- status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(sdvo_priv));
- }
-
- intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
-}
-
static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1490,6 +1361,8 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str
return true;
}
+/* No use! */
+#if 0
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
@@ -1560,6 +1433,7 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
intel_sdvo_read_response(intel_encoder, &response, 2);
}
+#endif
static bool
intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
@@ -1598,12 +1472,17 @@ static struct drm_connector *
intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct intel_encoder *intel_encoder;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- intel_encoder = to_intel_encoder(connector);
- if (intel_encoder->type == INTEL_OUTPUT_ANALOG)
- return connector;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector && encoder == intel_attached_encoder(connector))
+ return connector;
+ }
+ }
}
return NULL;
}
@@ -1625,15 +1504,17 @@ intel_analog_is_connected(struct drm_device *dev)
}
enum drm_connector_status
-intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
+intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
enum drm_connector_status status = connector_status_connected;
struct edid *edid = NULL;
- edid = drm_get_edid(&intel_encoder->base,
- intel_encoder->ddc_bus);
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
/* This is only applied to SDVO cards with multiple outputs */
if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
@@ -1646,8 +1527,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
*/
while(temp_ddc > 1) {
sdvo_priv->ddc_bus = temp_ddc;
- edid = drm_get_edid(&intel_encoder->base,
- intel_encoder->ddc_bus);
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
if (edid) {
/*
* When we can get the EDID, maybe it is the
@@ -1664,28 +1544,25 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
- if (edid == NULL &&
- sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_encoder->base.dev))
- edid = drm_get_edid(&intel_encoder->base,
- sdvo_priv->analog_ddc_bus);
+ if (edid == NULL && sdvo_priv->analog_ddc_bus &&
+ !intel_analog_is_connected(connector->dev))
+ edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
+
if (edid != NULL) {
- /* Don't report the output as connected if it's a DVI-I
- * connector with a non-digital EDID coming out.
- */
- if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- sdvo_priv->is_hdmi =
- drm_detect_hdmi_monitor(edid);
- else
- status = connector_status_disconnected;
- }
+ bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
- kfree(edid);
- intel_encoder->base.display_info.raw_edid = NULL;
+ /* DDC bus is shared, match EDID to connector type */
+ if (is_digital && need_digital)
+ sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
+ else if (is_digital != need_digital)
+ status = connector_status_disconnected;
- } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
+ connector->display_info.raw_edid = NULL;
+ } else
status = connector_status_disconnected;
+
+ kfree(edid);
return status;
}
@@ -1694,8 +1571,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
{
uint16_t response;
u8 status;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ enum drm_connector_status ret;
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
@@ -1713,24 +1594,41 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
if (response == 0)
return connector_status_disconnected;
- if (intel_sdvo_multifunc_encoder(intel_encoder) &&
- sdvo_priv->attached_output != response) {
- if (sdvo_priv->controlled_output != response &&
- intel_sdvo_output_setup(intel_encoder, response) != true)
- return connector_status_unknown;
- sdvo_priv->attached_output = response;
+ sdvo_priv->attached_output = response;
+
+ if ((sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (response & SDVO_TMDS_MASK)
+ ret = intel_sdvo_hdmi_sink_detect(connector);
+ else
+ ret = connector_status_connected;
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ sdvo_priv->is_tv = false;
+ sdvo_priv->is_lvds = false;
+ intel_encoder->needs_tv_clock = false;
+
+ if (response & SDVO_TV_MASK) {
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ }
+ if (response & SDVO_LVDS_MASK)
+ sdvo_priv->is_lvds = true;
}
- return intel_sdvo_hdmi_sink_detect(connector, response);
+
+ return ret;
}
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int num_modes;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(intel_encoder);
+ num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1740,17 +1638,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
*/
if (num_modes == 0 &&
sdvo_priv->analog_ddc_bus &&
- !intel_analog_is_connected(intel_encoder->base.dev)) {
- struct i2c_adapter *digital_ddc_bus;
-
+ !intel_analog_is_connected(connector->dev)) {
/* Switch to the analog ddc bus and try that
*/
- digital_ddc_bus = intel_encoder->ddc_bus;
- intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
-
- (void) intel_ddc_get_modes(intel_encoder);
-
- intel_encoder->ddc_bus = digital_ddc_bus;
+ (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
}
}
@@ -1821,8 +1712,9 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
- struct intel_encoder *output = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
@@ -1842,11 +1734,11 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
sizeof(format_map) ? sizeof(format_map) :
sizeof(struct intel_sdvo_sdtv_resolution_request));
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res));
- status = intel_sdvo_read_response(output, &reply, 3);
+ status = intel_sdvo_read_response(intel_encoder, &reply, 3);
if (status != SDVO_CMD_STATUS_SUCCESS)
return;
@@ -1863,7 +1755,8 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_display_mode *newmode;
@@ -1873,7 +1766,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(intel_encoder);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1902,12 +1795,12 @@ end:
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
- struct intel_encoder *output = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (sdvo_priv->is_tv)
+ if (IS_TV(sdvo_connector))
intel_sdvo_get_tv_modes(connector);
- else if (sdvo_priv->is_lvds == true)
+ else if (IS_LVDS(sdvo_connector))
intel_sdvo_get_lvds_modes(connector);
else
intel_sdvo_get_ddc_modes(connector);
@@ -1920,11 +1813,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
static
void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct drm_device *dev = connector->dev;
- if (sdvo_priv->is_tv) {
+ if (IS_TV(sdvo_priv)) {
if (sdvo_priv->left_property)
drm_property_destroy(dev, sdvo_priv->left_property);
if (sdvo_priv->right_property)
@@ -1937,8 +1830,6 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
drm_property_destroy(dev, sdvo_priv->hpos_property);
if (sdvo_priv->vpos_property)
drm_property_destroy(dev, sdvo_priv->vpos_property);
- }
- if (sdvo_priv->is_tv) {
if (sdvo_priv->saturation_property)
drm_property_destroy(dev,
sdvo_priv->saturation_property);
@@ -1948,7 +1839,7 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
if (sdvo_priv->hue_property)
drm_property_destroy(dev, sdvo_priv->hue_property);
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
if (sdvo_priv->brightness_property)
drm_property_destroy(dev,
sdvo_priv->brightness_property);
@@ -1958,31 +1849,17 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
static void intel_sdvo_destroy(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
- if (sdvo_priv->analog_ddc_bus)
- intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
-
- if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
- drm_mode_destroy(connector->dev,
- sdvo_priv->sdvo_lvds_fixed_mode);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (sdvo_priv->tv_format_property)
+ if (sdvo_connector->tv_format_property)
drm_property_destroy(connector->dev,
- sdvo_priv->tv_format_property);
-
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_destroy_enhance_property(connector);
+ sdvo_connector->tv_format_property);
+ intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
-
- kfree(intel_encoder);
+ kfree(connector);
}
static int
@@ -1990,9 +1867,11 @@ intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -2003,101 +1882,101 @@ intel_sdvo_set_property(struct drm_connector *connector,
if (ret < 0)
goto out;
- if (property == sdvo_priv->tv_format_property) {
+ if (property == sdvo_connector->tv_format_property) {
if (val >= TV_FORMAT_NUM) {
ret = -EINVAL;
goto out;
}
if (sdvo_priv->tv_format_name ==
- sdvo_priv->tv_format_supported[val])
+ sdvo_connector->tv_format_supported[val])
goto out;
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
changed = true;
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
cmd = 0;
temp_value = val;
- if (sdvo_priv->left_property == property) {
+ if (sdvo_connector->left_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->right_property, val);
- if (sdvo_priv->left_margin == temp_value)
+ sdvo_connector->right_property, val);
+ if (sdvo_connector->left_margin == temp_value)
goto out;
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->right_property == property) {
+ } else if (sdvo_connector->right_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->left_property, val);
- if (sdvo_priv->right_margin == temp_value)
+ sdvo_connector->left_property, val);
+ if (sdvo_connector->right_margin == temp_value)
goto out;
- sdvo_priv->left_margin = temp_value;
- sdvo_priv->right_margin = temp_value;
- temp_value = sdvo_priv->max_hscan -
- sdvo_priv->left_margin;
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- } else if (sdvo_priv->top_property == property) {
+ } else if (sdvo_connector->top_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->bottom_property, val);
- if (sdvo_priv->top_margin == temp_value)
+ sdvo_connector->bottom_property, val);
+ if (sdvo_connector->top_margin == temp_value)
goto out;
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->bottom_property == property) {
+ } else if (sdvo_connector->bottom_property == property) {
drm_connector_property_set_value(connector,
- sdvo_priv->top_property, val);
- if (sdvo_priv->bottom_margin == temp_value)
+ sdvo_connector->top_property, val);
+ if (sdvo_connector->bottom_margin == temp_value)
goto out;
- sdvo_priv->top_margin = temp_value;
- sdvo_priv->bottom_margin = temp_value;
- temp_value = sdvo_priv->max_vscan -
- sdvo_priv->top_margin;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- } else if (sdvo_priv->hpos_property == property) {
- if (sdvo_priv->cur_hpos == temp_value)
+ } else if (sdvo_connector->hpos_property == property) {
+ if (sdvo_connector->cur_hpos == temp_value)
goto out;
cmd = SDVO_CMD_SET_POSITION_H;
- sdvo_priv->cur_hpos = temp_value;
- } else if (sdvo_priv->vpos_property == property) {
- if (sdvo_priv->cur_vpos == temp_value)
+ sdvo_connector->cur_hpos = temp_value;
+ } else if (sdvo_connector->vpos_property == property) {
+ if (sdvo_connector->cur_vpos == temp_value)
goto out;
cmd = SDVO_CMD_SET_POSITION_V;
- sdvo_priv->cur_vpos = temp_value;
- } else if (sdvo_priv->saturation_property == property) {
- if (sdvo_priv->cur_saturation == temp_value)
+ sdvo_connector->cur_vpos = temp_value;
+ } else if (sdvo_connector->saturation_property == property) {
+ if (sdvo_connector->cur_saturation == temp_value)
goto out;
cmd = SDVO_CMD_SET_SATURATION;
- sdvo_priv->cur_saturation = temp_value;
- } else if (sdvo_priv->contrast_property == property) {
- if (sdvo_priv->cur_contrast == temp_value)
+ sdvo_connector->cur_saturation = temp_value;
+ } else if (sdvo_connector->contrast_property == property) {
+ if (sdvo_connector->cur_contrast == temp_value)
goto out;
cmd = SDVO_CMD_SET_CONTRAST;
- sdvo_priv->cur_contrast = temp_value;
- } else if (sdvo_priv->hue_property == property) {
- if (sdvo_priv->cur_hue == temp_value)
+ sdvo_connector->cur_contrast = temp_value;
+ } else if (sdvo_connector->hue_property == property) {
+ if (sdvo_connector->cur_hue == temp_value)
goto out;
cmd = SDVO_CMD_SET_HUE;
- sdvo_priv->cur_hue = temp_value;
- } else if (sdvo_priv->brightness_property == property) {
- if (sdvo_priv->cur_brightness == temp_value)
+ sdvo_connector->cur_hue = temp_value;
+ } else if (sdvo_connector->brightness_property == property) {
+ if (sdvo_connector->cur_brightness == temp_value)
goto out;
cmd = SDVO_CMD_SET_BRIGHTNESS;
- sdvo_priv->cur_brightness = temp_value;
+ sdvo_connector->cur_brightness = temp_value;
}
if (cmd) {
intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
@@ -2127,8 +2006,6 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_sdvo_save,
- .restore = intel_sdvo_restore,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
@@ -2138,12 +2015,27 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
.get_modes = intel_sdvo_get_modes,
.mode_valid = intel_sdvo_mode_valid,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ if (sdvo_priv->analog_ddc_bus)
+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
+
+ if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ sdvo_priv->sdvo_lvds_fixed_mode);
+
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
@@ -2159,49 +2051,29 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
* outputs, then LVDS outputs.
*/
static void
-intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+ struct intel_sdvo_priv *sdvo, u32 reg)
{
- uint16_t mask = 0;
- unsigned int num_bits;
-
- /* Make a mask of outputs less than or equal to our own priority in the
- * list.
- */
- switch (dev_priv->controlled_output) {
- case SDVO_OUTPUT_LVDS1:
- mask |= SDVO_OUTPUT_LVDS1;
- case SDVO_OUTPUT_LVDS0:
- mask |= SDVO_OUTPUT_LVDS0;
- case SDVO_OUTPUT_TMDS1:
- mask |= SDVO_OUTPUT_TMDS1;
- case SDVO_OUTPUT_TMDS0:
- mask |= SDVO_OUTPUT_TMDS0;
- case SDVO_OUTPUT_RGB1:
- mask |= SDVO_OUTPUT_RGB1;
- case SDVO_OUTPUT_RGB0:
- mask |= SDVO_OUTPUT_RGB0;
- break;
- }
+ struct sdvo_device_mapping *mapping;
- /* Count bits to find what number we are in the priority list. */
- mask &= dev_priv->caps.output_flags;
- num_bits = hweight16(mask);
- if (num_bits > 3) {
- /* if more than 3 outputs, default to DDC bus 3 for now */
- num_bits = 3;
- }
+ if (IS_SDVOB(reg))
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
- /* Corresponds to SDVO_CONTROL_BUS_DDCx */
- dev_priv->ddc_bus = 1 << num_bits;
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
}
static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output)
+intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
{
struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
uint8_t status;
- intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+ if (device == 0)
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
+ else
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
@@ -2214,15 +2086,13 @@ static struct intel_encoder *
intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
{
struct drm_device *dev = chan->drm_dev;
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
struct intel_encoder *intel_encoder = NULL;
- list_for_each_entry(connector,
- &dev->mode_config.connector_list, head) {
- if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) {
- intel_encoder = to_intel_encoder(connector);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->ddc_bus == &chan->adapter)
break;
- }
}
return intel_encoder;
}
@@ -2259,7 +2129,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
- if (sdvo_reg == SDVOB) {
+ if (IS_SDVOB(sdvo_reg)) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@@ -2284,120 +2154,237 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
- if (sdvo_reg == SDVOB)
+ if (IS_SDVOB(sdvo_reg))
return 0x70;
else
return 0x72;
}
-static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
+static bool
+intel_sdvo_connector_alloc (struct intel_connector **ret)
{
- DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
- return 1;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ *ret = kzalloc(sizeof(*intel_connector) +
+ sizeof(*sdvo_connector), GFP_KERNEL);
+ if (!*ret)
+ return false;
+
+ intel_connector = *ret;
+ sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
+ intel_connector->dev_priv = sdvo_connector;
+
+ return true;
}
-static struct dmi_system_id intel_sdvo_bad_tv[] = {
- {
- .callback = intel_sdvo_bad_tv_callback,
- .ident = "IntelG45/ICH10R/DME1737",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
- },
- },
+static void
+intel_sdvo_connector_create (struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
+ connector->connector_type);
- { } /* terminating entry */
-};
+ drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ drm_sysfs_connector_add(connector);
+}
static bool
-intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
{
- struct drm_connector *connector = &intel_encoder->base;
struct drm_encoder *encoder = &intel_encoder->enc;
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- bool ret = true, registered = false;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ sdvo_connector = intel_connector->dev_priv;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
+ && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
+ && sdvo_priv->is_hdmi) {
+ /* enable hdmi encoding mode if supported */
+ intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_encoder,
+ SDVO_COLORIMETRY_RGB256);
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ }
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->controlled_output |= type;
+ sdvo_connector->output_flag = type;
+
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+ intel_sdvo_connector_create(encoder, connector);
+
+ intel_sdvo_tv_create_property(connector, type);
+
+ intel_sdvo_create_enhance_property(connector);
+
+ return true;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ sdvo_connector = intel_connector->dev_priv;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+ return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
+{
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_connector *connector;
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
+
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->is_lvds = true;
+
+ if (device == 0) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+
+ intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_create_enhance_property(connector);
+ return true;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
+{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
sdvo_priv->is_tv = false;
intel_encoder->needs_tv_clock = false;
sdvo_priv->is_lvds = false;
- if (device_is_registered(&connector->kdev)) {
- drm_sysfs_connector_remove(connector);
- registered = true;
- }
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
- if (flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
- if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
- else
- sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
-
- encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
- connector->connector_type = DRM_MODE_CONNECTOR_DVID;
-
- if (intel_sdvo_get_supp_encode(intel_encoder,
- &sdvo_priv->encode) &&
- intel_sdvo_get_digital_encoding_mode(intel_encoder) &&
- sdvo_priv->is_hdmi) {
- /* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_encoder,
- SDVO_COLORIMETRY_RGB256);
- connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- intel_encoder->clone_mask =
- (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- }
- } else if ((flags & SDVO_OUTPUT_SVID0) &&
- !dmi_check_system(intel_sdvo_bad_tv)) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- } else if (flags & SDVO_OUTPUT_RGB0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_RGB1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
- encoder->encoder_type = DRM_MODE_ENCODER_DAC;
- connector->connector_type = DRM_MODE_CONNECTOR_VGA;
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_CVBS0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
- encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
- connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
- sdvo_priv->is_tv = true;
- intel_encoder->needs_tv_clock = true;
- intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- } else if (flags & SDVO_OUTPUT_LVDS0) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- } else if (flags & SDVO_OUTPUT_LVDS1) {
-
- sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
- encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
- connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
- sdvo_priv->is_lvds = true;
- intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- } else {
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!intel_sdvo_dvi_init(intel_encoder, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!intel_sdvo_dvi_init(intel_encoder, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!intel_sdvo_analog_init(intel_encoder, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!intel_sdvo_analog_init(intel_encoder, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!intel_sdvo_lvds_init(intel_encoder, 0))
+ return false;
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!intel_sdvo_lvds_init(intel_encoder, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
sdvo_priv->controlled_output = 0;
@@ -2405,28 +2392,25 @@ intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(sdvo_priv),
bytes[0], bytes[1]);
- ret = false;
+ return false;
}
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
- if (ret && registered)
- ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
-
-
- return ret;
-
+ return true;
}
-static void intel_sdvo_tv_create_property(struct drm_connector *connector)
+static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
uint8_t status;
- intel_sdvo_set_target_output(intel_encoder,
- sdvo_priv->controlled_output);
+ intel_sdvo_set_target_output(intel_encoder, type);
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
@@ -2441,35 +2425,37 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
if (format_map == 0)
return;
- sdvo_priv->format_supported_num = 0;
+ sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
if (format_map & (1 << i)) {
- sdvo_priv->tv_format_supported
- [sdvo_priv->format_supported_num++] =
+ sdvo_connector->tv_format_supported
+ [sdvo_connector->format_supported_num++] =
tv_format_names[i];
}
- sdvo_priv->tv_format_property =
+ sdvo_connector->tv_format_property =
drm_property_create(
connector->dev, DRM_MODE_PROP_ENUM,
- "mode", sdvo_priv->format_supported_num);
+ "mode", sdvo_connector->format_supported_num);
- for (i = 0; i < sdvo_priv->format_supported_num; i++)
+ for (i = 0; i < sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
- sdvo_priv->tv_format_property, i,
- i, sdvo_priv->tv_format_supported[i]);
+ sdvo_connector->tv_format_property, i,
+ i, sdvo_connector->tv_format_supported[i]);
- sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0];
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
drm_connector_attach_property(
- connector, sdvo_priv->tv_format_property, 0);
+ connector, sdvo_connector->tv_format_property, 0);
}
static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct intel_sdvo_enhancements_reply sdvo_data;
struct drm_device *dev = connector->dev;
uint8_t status;
@@ -2488,7 +2474,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
DRM_DEBUG_KMS("No enhancement is supported\n");
return;
}
- if (sdvo_priv->is_tv) {
+ if (IS_TV(sdvo_priv)) {
/* when horizontal overscan is supported, Add the left/right
* property
*/
@@ -2636,8 +2622,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
"default %d, current %d\n",
data_value[0], data_value[1], response);
}
- }
- if (sdvo_priv->is_tv) {
if (sdvo_data.saturation) {
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
@@ -2733,7 +2717,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
data_value[0], data_value[1], response);
}
}
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
if (sdvo_data.brightness) {
intel_sdvo_write_cmd(intel_encoder,
SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
@@ -2773,12 +2757,11 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_sdvo_priv *sdvo_priv;
-
u8 ch[0x40];
int i;
+ u32 i2c_reg, ddc_reg, analog_ddc_reg;
intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
if (!intel_encoder) {
@@ -2791,11 +2774,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
intel_encoder->dev_priv = sdvo_priv;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ if (HAS_PCH_SPLIT(dev)) {
+ i2c_reg = PCH_GPIOE;
+ ddc_reg = PCH_GPIOE;
+ analog_ddc_reg = PCH_GPIOA;
+ } else {
+ i2c_reg = GPIOE;
+ ddc_reg = GPIOE;
+ analog_ddc_reg = GPIOA;
+ }
+
/* setup the DDC bus. */
- if (sdvo_reg == SDVOB)
- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+ if (IS_SDVOB(sdvo_reg))
+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOB");
else
- intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+ intel_encoder->i2c_bus = intel_i2c_create(dev, i2c_reg, "SDVOCTRL_E for SDVOC");
if (!intel_encoder->i2c_bus)
goto err_inteloutput;
@@ -2809,20 +2802,20 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
for (i = 0; i < 0x40; i++) {
if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- sdvo_reg == SDVOB ? 'B' : 'C');
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
}
}
/* setup the DDC bus. */
- if (sdvo_reg == SDVOB) {
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ if (IS_SDVOB(sdvo_reg)) {
+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOB/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
- intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
- sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOC/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
@@ -2833,41 +2826,21 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
/* Wrap with our custom algo which switches to DDC mode */
intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
+ /* encoder type will be decided later */
+ drm_encoder_init(dev, &intel_encoder->enc, &intel_sdvo_enc_funcs, 0);
+ drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
+
/* In default case sdvo lvds is false */
intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
if (intel_sdvo_output_setup(intel_encoder,
sdvo_priv->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- sdvo_reg == SDVOB ? 'B' : 'C');
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
}
-
- connector = &intel_encoder->base;
- drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
- connector->connector_type);
-
- drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-
- drm_encoder_init(dev, &intel_encoder->enc,
- &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
-
- drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
-
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
- if (sdvo_priv->is_tv)
- intel_sdvo_tv_create_property(connector);
-
- if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
- intel_sdvo_create_enhance_property(connector);
-
- drm_sysfs_connector_add(connector);
-
- intel_sdvo_select_ddc_bus(sdvo_priv);
+ intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
intel_sdvo_set_target_input(intel_encoder, true, false);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d7d39b2327d..6d553c29d10 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void
-intel_tv_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- int i;
-
- tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
- tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
- tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
- tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
- tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
- tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
- tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
- tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
- tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
- tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
- tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
- tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
- tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
-
- tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
- tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
- tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
- tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
- tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
- tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
- tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
- tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
- tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
- tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
- tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
- tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
- tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
-
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
- for (i = 0; i < 60; i++)
- tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
- for (i = 0; i < 43; i++)
- tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
-
- tv_priv->save_TV_DAC = I915_READ(TV_DAC);
- tv_priv->save_TV_CTL = I915_READ(TV_CTL);
-}
-
-static void
-intel_tv_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_crtc *crtc = connector->encoder->crtc;
- struct intel_crtc *intel_crtc;
- int i;
-
- /* FIXME: No CRTC? */
- if (!crtc)
- return;
-
- intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
- I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
- I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
- I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
- I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
- I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
- I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
- I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
- I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
- I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
- I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
- I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
- I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
-
- I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
- I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
- I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
- I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
- I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
- I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
- I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
- I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
-
- {
- int pipeconf_reg = (intel_crtc->pipe == 0) ?
- PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (intel_crtc->plane == 0) ?
- DSPACNTR : DSPBCNTR;
- int pipeconf = I915_READ(pipeconf_reg);
- int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = (intel_crtc->plane == 0) ?
- DSPAADDR : DSPBADDR;
- /* Pipe must be off here */
- I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-
- if (!IS_I9XX(dev)) {
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank(dev);
- }
-
- I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
- /* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev);
-
- /* Filter ctl must be set before TV_WIN_SIZE */
- I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
- I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
- I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
- I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
- I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
- I915_WRITE(pipeconf_reg, pipeconf);
- I915_WRITE(dspcntr_reg, dspcntr);
- /* Flush the plane changes */
- I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
- }
-
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
- for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
- for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
-
- I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
- I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
-}
-
static const struct tv_mode *
intel_tv_mode_lookup (char *tv_format)
{
@@ -1078,7 +941,8 @@ intel_tv_mode_find (struct intel_encoder *intel_encoder)
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
/* Ensure TV refresh is close to desired refresh */
@@ -1441,7 +1305,8 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int i;
@@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector)
{
struct drm_crtc *crtc;
struct drm_display_mode mode;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
int dpms_mode;
int type = tv_priv->type;
@@ -1487,10 +1352,12 @@ intel_tv_detect(struct drm_connector *connector)
if (encoder->crtc && encoder->crtc->enabled) {
type = intel_tv_detect_type(encoder->crtc, intel_encoder);
} else {
- crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+ &mode, &dpms_mode);
if (crtc) {
type = intel_tv_detect_type(crtc, intel_encoder);
- intel_release_load_detect_pipe(intel_encoder, dpms_mode);
+ intel_release_load_detect_pipe(intel_encoder, connector,
+ dpms_mode);
} else
type = -1;
}
@@ -1525,7 +1392,8 @@ static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1550,7 +1418,8 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int j, count = 0;
u64 tmp;
@@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector)
static void
intel_tv_destroy (struct drm_connector *connector)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
-
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
- kfree(intel_encoder);
+ kfree(connector);
}
@@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(connector);
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
- struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .save = intel_tv_save,
- .restore = intel_tv_restore,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
@@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
- .best_encoder = intel_best_encoder,
+ .best_encoder = intel_attached_encoder,
};
static void intel_tv_enc_destroy(struct drm_encoder *encoder)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1741,6 +1609,7 @@ intel_tv_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
+ struct intel_connector *intel_connector;
struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
@@ -1786,7 +1655,13 @@ intel_tv_init(struct drm_device *dev)
return;
}
- connector = &intel_encoder->base;
+ intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+ if (!intel_connector) {
+ kfree(intel_encoder);
+ return;
+ }
+
+ connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
@@ -1794,7 +1669,7 @@ intel_tv_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
- drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
+ drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 453df3f6053..acd31ed861e 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -22,7 +22,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
- nv17_gpio.o nv50_gpio.o
+ nv17_gpio.o nv50_gpio.o \
+ nv50_calc.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index abc382a9918..e7e69ccce5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -26,6 +26,7 @@
#define NV_DEBUG_NOTRACE
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nouveau_encoder.h"
/* these defines are made up */
#define NV_CIO_CRE_44_HEADA 0x0
@@ -256,6 +257,11 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
struct init_tbl_entry {
char *name;
uint8_t id;
+ /* Return:
+ * > 0: success, length of opcode
+ * 0: success, but abort further parsing of table (INIT_DONE etc)
+ * < 0: failure, table parsing will be aborted
+ */
int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
@@ -709,6 +715,83 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
return dcb_entry;
}
+static int
+read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
+{
+ uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
+ int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
+ int recordoffset = 0, rdofs = 1, wrofs = 0;
+ uint8_t port_type = 0;
+
+ if (!i2ctable)
+ return -EINVAL;
+
+ if (dcb_version >= 0x30) {
+ if (i2ctable[0] != dcb_version) /* necessary? */
+ NV_WARN(dev,
+ "DCB I2C table version mismatch (%02X vs %02X)\n",
+ i2ctable[0], dcb_version);
+ dcb_i2c_ver = i2ctable[0];
+ headerlen = i2ctable[1];
+ if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
+ i2c_entries = i2ctable[2];
+ else
+ NV_WARN(dev,
+ "DCB I2C table has more entries than indexable "
+ "(%d entries, max %d)\n", i2ctable[2],
+ DCB_MAX_NUM_I2C_ENTRIES);
+ entry_len = i2ctable[3];
+ /* [4] is i2c_default_indices, read in parse_dcb_table() */
+ }
+ /*
+ * It's your own fault if you call this function on a DCB 1.1 BIOS --
+ * the test below is for DCB 1.2
+ */
+ if (dcb_version < 0x14) {
+ recordoffset = 2;
+ rdofs = 0;
+ wrofs = 1;
+ }
+
+ if (index == 0xf)
+ return 0;
+ if (index >= i2c_entries) {
+ NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
+ index, i2ctable[2]);
+ return -ENOENT;
+ }
+ if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
+ NV_ERROR(dev, "DCB I2C entry invalid\n");
+ return -EINVAL;
+ }
+
+ if (dcb_i2c_ver >= 0x30) {
+ port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
+
+ /*
+ * Fixup for chips using same address offset for read and
+ * write.
+ */
+ if (port_type == 4) /* seen on C51 */
+ rdofs = wrofs = 1;
+ if (port_type >= 5) /* G80+ */
+ rdofs = wrofs = 0;
+ }
+
+ if (dcb_i2c_ver >= 0x40) {
+ if (port_type != 5 && port_type != 6)
+ NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
+
+ i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
+ }
+
+ i2c->port_type = port_type;
+ i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
+ i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
+
+ return 0;
+}
+
static struct nouveau_i2c_chan *
init_i2c_device_find(struct drm_device *dev, int i2c_index)
{
@@ -727,6 +810,20 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index)
}
if (i2c_index == 0x80) /* g80+ */
i2c_index = dcb->i2c_default_indices & 0xf;
+ else
+ if (i2c_index == 0x81)
+ i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
+
+ if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) {
+ NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
+ return NULL;
+ }
+
+ /* Make sure i2c table entry has been parsed, it may not
+ * have been if this is a bus not referenced by a DCB encoder
+ */
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ i2c_index, &dcb->i2c[i2c_index]);
return nouveau_i2c_find(dev, i2c_index);
}
@@ -818,7 +915,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -920,7 +1017,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -1067,6 +1164,126 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
}
static int
+init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_DP_CONDITION opcode: 0x3A ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): "sub" opcode
+ * offset + 2 (8 bit): unknown
+ *
+ */
+
+ struct bit_displayport_encoder_table *dpe = NULL;
+ struct dcb_entry *dcb = bios->display.output;
+ struct drm_device *dev = bios->dev;
+ uint8_t cond = bios->data[offset + 1];
+ int dummy;
+
+ BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
+
+ if (!iexec->execute)
+ return 3;
+
+ dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
+ if (!dpe) {
+ NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
+ return -EINVAL;
+ }
+
+ switch (cond) {
+ case 0:
+ {
+ struct dcb_connector_table_entry *ent =
+ &bios->dcb.connector.entry[dcb->connector];
+
+ if (ent->type != DCB_CONNECTOR_eDP)
+ iexec->execute = false;
+ }
+ break;
+ case 1:
+ case 2:
+ if (!(dpe->unknown & cond))
+ iexec->execute = false;
+ break;
+ case 5:
+ {
+ struct nouveau_i2c_chan *auxch;
+ int ret;
+
+ auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
+ if (!auxch)
+ return -ENODEV;
+
+ ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
+ if (ret)
+ return ret;
+
+ if (cond & 1)
+ iexec->execute = false;
+ }
+ break;
+ default:
+ NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond);
+ break;
+ }
+
+ if (iexec->execute)
+ BIOSLOG(bios, "0x%04X: continuing to execute\n", offset);
+ else
+ BIOSLOG(bios, "0x%04X: skipping following commands\n", offset);
+
+ return 3;
+}
+
+static int
+init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_3B opcode: 0x3B ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): crtc index
+ *
+ */
+
+ uint8_t or = ffs(bios->display.output->or) - 1;
+ uint8_t index = bios->data[offset + 1];
+ uint8_t data;
+
+ if (!iexec->execute)
+ return 2;
+
+ data = bios_idxprt_rd(bios, 0x3d4, index);
+ bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or));
+ return 2;
+}
+
+static int
+init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_3C opcode: 0x3C ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): crtc index
+ *
+ */
+
+ uint8_t or = ffs(bios->display.output->or) - 1;
+ uint8_t index = bios->data[offset + 1];
+ uint8_t data;
+
+ if (!iexec->execute)
+ return 2;
+
+ data = bios_idxprt_rd(bios, 0x3d4, index);
+ bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or));
+ return 2;
+}
+
+static int
init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1170,7 +1387,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return 0;
+ return -EINVAL;
}
freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1231,12 +1448,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
- int len = 4 + count * 3;
struct nouveau_i2c_chan *chan;
- struct i2c_msg msg;
- int i;
+ int len = 4 + count * 3;
+ int ret, i;
if (!iexec->execute)
return len;
@@ -1247,35 +1463,34 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
- uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
+ uint8_t reg = bios->data[offset + 4 + i * 3];
uint8_t mask = bios->data[offset + 5 + i * 3];
uint8_t data = bios->data[offset + 6 + i * 3];
- uint8_t value;
+ union i2c_smbus_data val;
- msg.addr = i2c_address;
- msg.flags = I2C_M_RD;
- msg.len = 1;
- msg.buf = &value;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
"Mask: 0x%02X, Data: 0x%02X\n",
- offset, i2c_reg, value, mask, data);
+ offset, reg, val.byte, mask, data);
- value = (value & mask) | data;
+ if (!bios->execute)
+ continue;
- if (bios->execute) {
- msg.addr = i2c_address;
- msg.flags = 0;
- msg.len = 1;
- msg.buf = &value;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
- }
+ val.byte &= mask;
+ val.byte |= data;
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
}
return len;
@@ -1301,12 +1516,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
- int len = 4 + count * 2;
struct nouveau_i2c_chan *chan;
- struct i2c_msg msg;
- int i;
+ int len = 4 + count * 2;
+ int ret, i;
if (!iexec->execute)
return len;
@@ -1317,23 +1531,25 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
- uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
- uint8_t data = bios->data[offset + 5 + i * 2];
+ uint8_t reg = bios->data[offset + 4 + i * 2];
+ union i2c_smbus_data val;
+
+ val.byte = bios->data[offset + 5 + i * 2];
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
- offset, i2c_reg, data);
-
- if (bios->execute) {
- msg.addr = i2c_address;
- msg.flags = 0;
- msg.len = 1;
- msg.buf = &data;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
- }
+ offset, reg, val.byte);
+
+ if (!bios->execute)
+ continue;
+
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0)
+ return ret;
}
return len;
@@ -1357,7 +1573,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
- uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
int len = 4 + count;
struct nouveau_i2c_chan *chan;
@@ -1374,7 +1590,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
- return 0;
+ return -ENODEV;
for (i = 0; i < count; i++) {
data[i] = bios->data[offset + 4 + i];
@@ -1388,7 +1604,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = count;
msg.buf = data;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return 0;
+ return -EIO;
}
return len;
@@ -1427,7 +1643,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return 0;
+ return -EINVAL;
bios_wr32(bios, reg,
tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1471,7 +1687,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
- return 0;
+ return -EINVAL;
for (i = 0; i < count; i++) {
uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1946,7 +2162,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
uint32_t reg, data;
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2001,7 +2217,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
int clock;
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
clock = ROM16(bios->data[meminitoffs + 4]) * 10;
setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2034,7 +2250,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
if (bios->major_version > 2)
- return 0;
+ return -ENODEV;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
@@ -2656,7 +2872,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Zero block length - has the M table "
"been parsed?\n", offset);
- return 0;
+ return -EINVAL;
}
strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
@@ -2840,14 +3056,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_AUXCH: no active output\n");
- return 0;
+ return -EINVAL;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return 0;
+ return -ENODEV;
}
if (!iexec->execute)
@@ -2860,7 +3076,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
- return 0;
+ return ret;
}
data &= bios->data[offset + 0];
@@ -2869,7 +3085,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
- return 0;
+ return ret;
}
}
@@ -2899,14 +3115,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
- return 0;
+ return -EINVAL;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return 0;
+ return -ENODEV;
}
if (!iexec->execute)
@@ -2917,7 +3133,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
if (ret) {
NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
- return 0;
+ return ret;
}
}
@@ -2934,6 +3150,9 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_COPY" , 0x37, init_copy },
{ "INIT_NOT" , 0x38, init_not },
{ "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
+ { "INIT_DP_CONDITION" , 0x3A, init_dp_condition },
+ { "INIT_OP_3B" , 0x3B, init_op_3b },
+ { "INIT_OP_3C" , 0x3C, init_op_3c },
{ "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
{ "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
{ "INIT_PLL2" , 0x4B, init_pll2 },
@@ -3001,7 +3220,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
* is changed back to EXECUTE.
*/
- int count = 0, i, res;
+ int count = 0, i, ret;
uint8_t id;
/*
@@ -3016,26 +3235,33 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
;
- if (itbl_entry[i].name) {
- BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
- offset, itbl_entry[i].id, itbl_entry[i].name);
-
- /* execute eventual command handler */
- res = (*itbl_entry[i].handler)(bios, offset, iexec);
- if (!res)
- break;
- /*
- * Add the offset of the current command including all data
- * of that command. The offset will then be pointing on the
- * next op code.
- */
- offset += res;
- } else {
+ if (!itbl_entry[i].name) {
NV_ERROR(bios->dev,
"0x%04X: Init table command not found: "
"0x%02X\n", offset, id);
return -ENOENT;
}
+
+ BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset,
+ itbl_entry[i].id, itbl_entry[i].name);
+
+ /* execute eventual command handler */
+ ret = (*itbl_entry[i].handler)(bios, offset, iexec);
+ if (ret < 0) {
+ NV_ERROR(bios->dev, "0x%04X: Failed parsing init "
+ "table opcode: %s %d\n", offset,
+ itbl_entry[i].name, ret);
+ }
+
+ if (ret <= 0)
+ break;
+
+ /*
+ * Add the offset of the current command including all data
+ * of that command. The offset will then be pointing on the
+ * next op code.
+ */
+ offset += ret;
}
if (offset >= bios->length)
@@ -4285,31 +4511,32 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
break;
}
-#if 0 /* for easy debugging */
- ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
- ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
- ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
- ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
-
- ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
- ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
- ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
- ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
-
- ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
- ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
- ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
- ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
- ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
- ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
- ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
- ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
-
- ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
- ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
-
- ErrorF("pll.refclk: %d\n", pll_lim->refclk);
-#endif
+ NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
+ NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
+ NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
+ NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
+ NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
+ NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
+ NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
+ NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
+ if (pll_lim->vco2.maxfreq) {
+ NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
+ NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
+ NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
+ NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
+ NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
+ NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
+ NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
+ NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
+ }
+ if (!pll_lim->max_p) {
+ NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p);
+ NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias);
+ } else {
+ NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p);
+ NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p);
+ }
+ NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk);
return 0;
}
@@ -4953,79 +5180,6 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
return 0;
}
-static int
-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
-{
- uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
- int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
- int recordoffset = 0, rdofs = 1, wrofs = 0;
- uint8_t port_type = 0;
-
- if (!i2ctable)
- return -EINVAL;
-
- if (dcb_version >= 0x30) {
- if (i2ctable[0] != dcb_version) /* necessary? */
- NV_WARN(dev,
- "DCB I2C table version mismatch (%02X vs %02X)\n",
- i2ctable[0], dcb_version);
- dcb_i2c_ver = i2ctable[0];
- headerlen = i2ctable[1];
- if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
- i2c_entries = i2ctable[2];
- else
- NV_WARN(dev,
- "DCB I2C table has more entries than indexable "
- "(%d entries, max %d)\n", i2ctable[2],
- DCB_MAX_NUM_I2C_ENTRIES);
- entry_len = i2ctable[3];
- /* [4] is i2c_default_indices, read in parse_dcb_table() */
- }
- /*
- * It's your own fault if you call this function on a DCB 1.1 BIOS --
- * the test below is for DCB 1.2
- */
- if (dcb_version < 0x14) {
- recordoffset = 2;
- rdofs = 0;
- wrofs = 1;
- }
-
- if (index == 0xf)
- return 0;
- if (index >= i2c_entries) {
- NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
- index, i2ctable[2]);
- return -ENOENT;
- }
- if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
- NV_ERROR(dev, "DCB I2C entry invalid\n");
- return -EINVAL;
- }
-
- if (dcb_i2c_ver >= 0x30) {
- port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
-
- /*
- * Fixup for chips using same address offset for read and
- * write.
- */
- if (port_type == 4) /* seen on C51 */
- rdofs = wrofs = 1;
- if (port_type >= 5) /* G80+ */
- rdofs = wrofs = 0;
- }
-
- if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
- NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
-
- i2c->port_type = port_type;
- i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
- i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
-
- return 0;
-}
-
static struct dcb_gpio_entry *
new_gpio_entry(struct nvbios *bios)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index c0d7b0a3ece..adf4ec2d06c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -35,6 +35,7 @@
#define DCB_LOC_ON_CHIP 0
struct dcb_i2c_entry {
+ uint32_t entry;
uint8_t port_type;
uint8_t read, write;
struct nouveau_i2c_chan *chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 957d1762984..6f3c1952237 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -160,11 +160,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement, align, 0,
false, NULL, size, nouveau_bo_del_ttm);
- nvbo->channel = NULL;
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
+ nvbo->channel = NULL;
spin_lock(&dev_priv->ttm.bo_list_lock);
list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
-
- man->io_addr = NULL;
- man->io_offset = drm_get_resource_start(dev, 1);
- man->io_size = drm_get_resource_len(dev, 1);
- if (man->io_size > dev_priv->vram_size)
- man->io_size = dev_priv->vram_size;
-
man->gpu_offset = dev_priv->vm_vram_base;
break;
case TTM_PL_TT:
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
break;
@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
dev_priv->gart_info.type);
return -EINVAL;
}
-
- man->io_offset = dev_priv->gart_info.aper_base;
- man->io_size = dev_priv->gart_info.aper_size;
- man->io_addr = NULL;
man->gpu_offset = dev_priv->vm_gart_base;
break;
default:
@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
- struct nouveau_bo *nvbo, bool evict, bool no_wait,
+ struct nouveau_bo *nvbo, bool evict,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct nouveau_fence *fence = NULL;
@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
- evict, no_wait, new_mem);
+ evict, no_wait_reserve, no_wait_gpu, new_mem);
if (nvbo->channel && nvbo->channel != chan)
ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence);
@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- int no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
dst_offset += (PAGE_SIZE * line_count);
}
- return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+ return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (tmp_mem.mm_node) {
spin_lock(&bo->bdev->glob->lru_lock);
@@ -618,7 +608,8 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret)
goto out;
@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Software copy if the card isn't up and running yet. */
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
!dev_priv->channel) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (!ret)
goto out;
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (ret)
@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return 0;
}
+static int
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ return 0;
+ case TTM_PL_TT:
+#if __OS_HAS_AGP
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->gart_info.aper_base;
+ mem->bus.is_iomem = true;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = drm_get_resource_start(dev, 1);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int
+nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ return 0;
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
.invalidate_caches = nouveau_bo_invalidate_caches,
@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
.sync_obj_flush = nouveau_fence_flush,
.sync_obj_unref = nouveau_fence_unref,
.sync_obj_ref = nouveau_fence_ref,
+ .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
+ .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
+ .io_mem_free = &nouveau_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 14afe1e47e5..7e663a79829 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -843,6 +843,7 @@ nouveau_connector_create(struct drm_device *dev,
switch (dcb->type) {
case DCB_CONNECTOR_VGA:
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
@@ -854,6 +855,17 @@ nouveau_connector_create(struct drm_device *dev,
case DCB_CONNECTOR_TV_3:
nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
break;
+ case DCB_CONNECTOR_DP:
+ case DCB_CONNECTOR_eDP:
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ case DCB_CONNECTOR_DVI_I:
+ case DCB_CONNECTOR_DVI_D:
+ if (dev_priv->card_type >= NV_50)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ /* fall-through */
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index a251886a0ce..7933de4aff2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -33,6 +33,8 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
static int
nouveau_debugfs_channel_info(struct seq_file *m, void *data)
{
@@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+ { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cf1c5c0a0ab..74e6b4ed12c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -34,10 +34,6 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- struct drm_device *dev = drm_fb->dev;
-
- if (drm_fb->fbdev)
- nouveau_fbcon_remove(dev, drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(fb->nvbo->gem);
@@ -61,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
.create_handle = nouveau_user_framebuffer_create_handle,
};
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
- struct drm_mode_fb_cmd *mode_cmd)
+int
+nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
{
- struct nouveau_framebuffer *fb;
int ret;
- fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
- if (!fb)
- return NULL;
-
- ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
if (ret) {
- kfree(fb);
- return NULL;
+ return ret;
}
- drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
-
- fb->nvbo = nvbo;
- return &fb->base;
+ drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
+ nouveau_fb->nvbo = nvbo;
+ return 0;
}
static struct drm_framebuffer *
@@ -89,24 +78,29 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_mode_fb_cmd *mode_cmd)
{
- struct drm_framebuffer *fb;
+ struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
+ int ret;
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (!gem)
return NULL;
- fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
- if (!fb) {
+ nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
+ if (!nouveau_fb)
+ return NULL;
+
+ ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
+ if (ret) {
drm_gem_object_unreference(gem);
return NULL;
}
- return fb;
+ return &nouveau_fb->base;
}
const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
- .fb_changed = nouveau_fbcon_probe,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 1de974acbc6..c6079e36669 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -153,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct drm_crtc *crtc;
- uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +162,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
return 0;
NV_INFO(dev, "Disabling fbcon acceleration...\n");
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -230,9 +228,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
}
acquire_console_sem();
- fb_set_suspend(dev_priv->fbdev_info, 1);
+ nouveau_fbcon_set_suspend(dev, 1);
release_console_sem();
- dev_priv->fbdev_info->flags = fbdev_flags;
+ nouveau_fbcon_restore_accel(dev);
return 0;
out_abort:
@@ -250,14 +248,12 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct drm_crtc *crtc;
- uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "We're back, enabling device...\n");
pci_set_power_state(pdev, PCI_D0);
@@ -332,13 +328,14 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
acquire_console_sem();
- fb_set_suspend(dev_priv->fbdev_info, 0);
+ nouveau_fbcon_set_suspend(dev, 0);
release_console_sem();
- nouveau_fbcon_zfill(dev);
+ nouveau_fbcon_zfill_all(dev);
drm_helper_resume_force_mode(dev);
- dev_priv->fbdev_info->flags = fbdev_flags;
+
+ nouveau_fbcon_restore_accel(dev);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ace630aa89e..5b134438eff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -535,6 +535,7 @@ struct drm_nouveau_private {
struct fb_info *fbdev_info;
+ int fifo_alloc_count;
struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
struct nouveau_engine engine;
@@ -621,6 +622,9 @@ struct drm_nouveau_private {
struct {
struct dentry *channel_root;
} debugfs;
+
+ struct nouveau_fbdev *nfbdev;
+ struct apertures_struct *apertures;
};
static inline struct drm_nouveau_private *
@@ -1166,6 +1170,12 @@ int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+/* nv50_calc. */
+int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
+ int *N1, int *M1, int *N2, int *M2, int *P);
+int nv50_calc_pll2(struct drm_device *, struct pll_lims *,
+ int clk, int *N, int *fN, int *M, int *P);
+
#ifndef ioread32_native
#ifdef __BIG_ENDIAN
#define ioread16_native ioread16be
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 9f28b94e479..e1df8209cd0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -48,6 +48,8 @@ struct nouveau_encoder {
union {
struct {
int mc_unknown;
+ uint32_t unk0;
+ uint32_t unk1;
int dpcd_version;
int link_nr;
int link_bw;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 4a3f31aa194..d432134b71e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
-struct drm_framebuffer *
-nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
- struct drm_mode_fb_cmd *);
-
+int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+ struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8e7dc1d4912..fd4a2df715e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -52,8 +52,8 @@
static int
nouveau_fbcon_sync(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
@@ -97,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -111,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv04_fbcon_fillrect,
.fb_copyarea = nv04_fbcon_copyarea,
.fb_imageblit = nv04_fbcon_imageblit,
@@ -125,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv50_fbcon_fillrect,
.fb_copyarea = nv50_fbcon_copyarea,
.fb_imageblit = nv50_fbcon_imageblit,
@@ -155,54 +152,10 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = nv_crtc->lut.b[regno];
}
-static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
- .gamma_set = nouveau_fbcon_gamma_set,
- .gamma_get = nouveau_fbcon_gamma_get
-};
-
-#if defined(__i386__) || defined(__x86_64__)
-static bool
-nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
-{
- struct pci_dev *pdev = dev->pdev;
- int ramin;
-
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
- screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
- return false;
-
- if (screen_info.lfb_base < pci_resource_start(pdev, 1))
- goto not_fb;
-
- if (screen_info.lfb_base + screen_info.lfb_size >=
- pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
- goto not_fb;
-
- return true;
-not_fb:
- ramin = 2;
- if (pci_resource_len(pdev, ramin) == 0) {
- ramin = 3;
- if (pci_resource_len(pdev, ramin) == 0)
- return false;
- }
-
- if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
- return false;
-
- if (screen_info.lfb_base + screen_info.lfb_size >=
- pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
- return false;
-
- return true;
-}
-#endif
-
-void
-nouveau_fbcon_zfill(struct drm_device *dev)
+static void
+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct fb_info *info = dev_priv->fbdev_info;
+ struct fb_info *info = nfbdev->helper.fbdev;
struct fb_fillrect rect;
/* Clear the entire fbcon. The drm will program every connector
@@ -218,28 +171,27 @@ nouveau_fbcon_zfill(struct drm_device *dev)
}
static int
-nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
- uint32_t fb_height, uint32_t surface_width,
- uint32_t surface_height, uint32_t surface_depth,
- uint32_t surface_bpp, struct drm_framebuffer **pfb)
+nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+ struct drm_fb_helper_surface_size *sizes)
{
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct fb_info *info;
- struct nouveau_fbcon_par *par;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd mode_cmd;
- struct device *device = &dev->pdev->dev;
+ struct pci_dev *pdev = dev->pdev;
+ struct device *device = &pdev->dev;
int size, ret;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = surface_bpp;
+ mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
- mode_cmd.depth = surface_depth;
+ mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
@@ -268,31 +220,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
mutex_lock(&dev->struct_mutex);
- fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
- if (!fb) {
+ info = framebuffer_alloc(0, device);
+ if (!info) {
ret = -ENOMEM;
- NV_ERROR(dev, "failed to allocate fb.\n");
goto out_unref;
}
- list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
-
- nouveau_fb = nouveau_framebuffer(fb);
- *pfb = fb;
-
- info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
- if (!info) {
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
ret = -ENOMEM;
goto out_unref;
}
- par = info->par;
- par->helper.funcs = &nouveau_fbcon_helper_funcs;
- par->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
- if (ret)
- goto out_unref;
- dev_priv->fbdev_info = info;
+ info->par = nfbdev;
+
+ nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
+
+ nouveau_fb = &nfbdev->nouveau_fb;
+ fb = &nouveau_fb->base;
+
+ /* setup helper */
+ nfbdev->helper.fb = fb;
+ nfbdev->helper.fbdev = info;
strcpy(info->fix.id, "nouveaufb");
if (nouveau_nofbaccel)
@@ -310,31 +259,17 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->screen_size = size;
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
- info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
- info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
+ info->fix.mmio_start = pci_resource_start(pdev, 1);
+ info->fix.mmio_len = pci_resource_len(pdev, 1);
/* Set aperture base/size for vesafb takeover */
-#if defined(__i386__) || defined(__x86_64__)
- if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
- /* Some NVIDIA VBIOS' are stupid and decide to put the
- * framebuffer in the middle of the PRAMIN BAR for
- * whatever reason. We need to know the exact lfb_base
- * to get vesafb kicked off, and the only reliable way
- * we have left is to find out lfb_base the same way
- * vesafb did.
- */
- info->aperture_base = screen_info.lfb_base;
- info->aperture_size = screen_info.lfb_size;
- if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
- info->aperture_size *= 65536;
- } else
-#endif
- {
- info->aperture_base = info->fix.mmio_start;
- info->aperture_size = info->fix.mmio_len;
+ info->apertures = dev_priv->apertures;
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
}
info->pixmap.size = 64*1024;
@@ -343,11 +278,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- fb->fbdev = info;
-
- par->nouveau_fb = nouveau_fb;
- par->dev = dev;
-
if (dev_priv->channel && !nouveau_nofbaccel) {
switch (dev_priv->card_type) {
case NV_50:
@@ -361,7 +291,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
};
}
- nouveau_fbcon_zfill(dev);
+ nouveau_fbcon_zfill(dev, nfbdev);
/* To allow resizeing without swapping buffers */
NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
@@ -379,44 +309,123 @@ out:
return ret;
}
-int
-nouveau_fbcon_probe(struct drm_device *dev)
+static int
+nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
- NV_DEBUG_KMS(dev, "\n");
+ struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = nouveau_fbcon_create(nfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
- return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
}
int
-nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
- struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
+ struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
struct fb_info *info;
- if (!fb)
- return -EINVAL;
-
- info = fb->fbdev;
- if (info) {
- struct nouveau_fbcon_par *par = info->par;
-
+ if (nfbdev->helper.fbdev) {
+ info = nfbdev->helper.fbdev;
unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
- if (par)
- drm_fb_helper_free(&par->helper);
- framebuffer_release(info);
}
-
+ drm_fb_helper_fini(&nfbdev->helper);
+ drm_framebuffer_cleanup(&nouveau_fb->base);
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
}
+
+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+ .gamma_set = nouveau_fbcon_gamma_set,
+ .gamma_get = nouveau_fbcon_gamma_get,
+ .fb_probe = nouveau_fbcon_find_or_create_single,
+};
+
+
+int nouveau_fbcon_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fbdev *nfbdev;
+
+ nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+ if (!nfbdev)
+ return -ENOMEM;
+
+ nfbdev->dev = dev;
+ dev_priv->nfbdev = nfbdev;
+ nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+ drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
+ drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
+ drm_fb_helper_initial_config(&nfbdev->helper, 32);
+ return 0;
+}
+
+void nouveau_fbcon_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->nfbdev)
+ return;
+
+ nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
+ kfree(dev_priv->nfbdev);
+ dev_priv->nfbdev = NULL;
+}
+
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
+ dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+}
+
+void nouveau_fbcon_restore_accel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
+}
+
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+}
+
+void nouveau_fbcon_zfill_all(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index f9c34e1a8c1..e7e12684c37 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -29,16 +29,16 @@
#include "drm_fb_helper.h"
-struct nouveau_fbcon_par {
+#include "nouveau_fb.h"
+struct nouveau_fbdev {
struct drm_fb_helper helper;
+ struct nouveau_framebuffer nouveau_fb;
+ struct list_head fbdev_list;
struct drm_device *dev;
- struct nouveau_framebuffer *nouveau_fb;
+ unsigned int saved_flags;
};
-int nouveau_fbcon_probe(struct drm_device *dev);
-int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void nouveau_fbcon_restore(void);
-void nouveau_fbcon_zfill(struct drm_device *dev);
void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
+
+int nouveau_fbcon_init(struct drm_device *dev);
+void nouveau_fbcon_fini(struct drm_device *dev);
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
+void nouveau_fbcon_zfill_all(struct drm_device *dev);
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
+void nouveau_fbcon_restore_accel(struct drm_device *dev);
+
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1bc0b38a516..69c76cf9340 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
}
ttm_bo_unref(&bo);
+
+ drm_gem_object_release(gem);
+ kfree(gem);
}
int
@@ -382,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
- false, false);
+ false, false, false);
nvbo->channel = NULL;
if (unlikely(ret)) {
NV_ERROR(dev, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 32f0e495464..f731c5f6053 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -68,13 +68,12 @@ nouveau_grctx_prog_load(struct drm_device *dev)
return ret;
}
- pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
+ pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!pgraph->ctxprog) {
NV_ERROR(dev, "OOM copying ctxprog\n");
release_firmware(fw);
return -ENOMEM;
}
- memcpy(pgraph->ctxprog, fw->data, fw->size);
cp = pgraph->ctxprog;
if (le32_to_cpu(cp->signature) != 0x5043564e ||
@@ -97,14 +96,13 @@ nouveau_grctx_prog_load(struct drm_device *dev)
return ret;
}
- pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
+ pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!pgraph->ctxvals) {
NV_ERROR(dev, "OOM copying ctxvals\n");
release_firmware(fw);
nouveau_grctx_fini(dev);
return -ENOMEM;
}
- memcpy(pgraph->ctxvals, fw->data, fw->size);
cv = (void *)pgraph->ctxvals;
if (le32_to_cpu(cv->signature) != 0x5643564e ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 88583e7bf65..316a3c7e6eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -254,16 +254,27 @@ struct nouveau_i2c_chan *
nouveau_i2c_find(struct drm_device *dev, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
+ struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
if (index >= DCB_MAX_NUM_I2C_ENTRIES)
return NULL;
- if (!bios->dcb.i2c[index].chan) {
- if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
- return NULL;
+ if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) {
+ uint32_t reg = 0xe500, val;
+
+ if (i2c->port_type == 6) {
+ reg += i2c->read * 0x50;
+ val = 0x2002;
+ } else {
+ reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
+ val = 0xe001;
+ }
+
+ nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val);
}
- return bios->dcb.i2c[index].chan;
+ if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
+ return NULL;
+ return i2c->chan;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 13e73cee4c4..53360f15606 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -1204,7 +1204,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t status, fbdev_flags = 0;
+ uint32_t status;
unsigned long flags;
status = nv_rd32(dev, NV03_PMC_INTR_0);
@@ -1213,11 +1213,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- if (dev_priv->fbdev_info) {
- fbdev_flags = dev_priv->fbdev_info->flags;
- dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
- }
-
if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
nouveau_fifo_irq_handler(dev);
status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
@@ -1247,9 +1242,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
if (status)
NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
- if (dev_priv->fbdev_info)
- dev_priv->fbdev_info->flags = fbdev_flags;
-
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index aa9b310e41b..6ca80a3fe70 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -826,6 +826,7 @@
#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e1710640a27..e632339c323 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -34,6 +34,7 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_fbcon.h"
#include "nv50_display.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -515,8 +516,10 @@ nouveau_card_init(struct drm_device *dev)
dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_helper_initial_config(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ nouveau_fbcon_init(dev);
+ drm_kms_helper_poll_init(dev);
+ }
return 0;
@@ -563,6 +566,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+
nouveau_backlight_exit(dev);
if (dev_priv->channel) {
@@ -637,6 +641,48 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
#endif
}
+static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct apertures_struct *aper = alloc_apertures(3);
+ if (!aper)
+ return NULL;
+
+ aper->ranges[0].base = pci_resource_start(pdev, 1);
+ aper->ranges[0].size = pci_resource_len(pdev, 1);
+ aper->count = 1;
+
+ if (pci_resource_len(pdev, 2)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
+ aper->count++;
+ }
+
+ if (pci_resource_len(pdev, 3)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
+ aper->count++;
+ }
+
+ return aper;
+}
+
+static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ bool primary = false;
+ dev_priv->apertures = nouveau_get_apertures(dev);
+ if (!dev_priv->apertures)
+ return -ENOMEM;
+
+#ifdef CONFIG_X86
+ primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+
+ remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
+ return 0;
+}
+
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
struct drm_nouveau_private *dev_priv;
@@ -724,6 +770,12 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ int ret = nouveau_remove_conflicting_drivers(dev);
+ if (ret)
+ return ret;
+ }
+
/* map larger RAMIN aperture on NV40 cards */
dev_priv->ramin = NULL;
if (dev_priv->card_type >= NV_40) {
@@ -794,6 +846,8 @@ int nouveau_unload(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_kms_helper_poll_fini(dev);
+ nouveau_fbcon_fini(dev);
if (dev_priv->card_type >= NV_50)
nv50_display_destroy(dev);
else
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 813b25cec72..1eeac4fae73 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -30,8 +30,8 @@
void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t fg;
@@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
int
nv04_fbcon_accel_init(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
const int sub = NvSubCtxSurf2D;
@@ -236,7 +236,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
- ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+ ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
0x009f : 0x005f, NvImageBlit);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index e260986ea65..618355e9cdd 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -532,9 +532,82 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
return 0;
}
-static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
- int mthd, uint32_t data)
+/*
+ * Software methods, why they are needed, and how they all work:
+ *
+ * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
+ * 2d engine settings are kept inside the grobjs themselves. The grobjs are
+ * 3 words long on both. grobj format on NV04 is:
+ *
+ * word 0:
+ * - bits 0-7: class
+ * - bit 12: color key active
+ * - bit 13: clip rect active
+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ * NV03_CONTEXT_SURFACE_DST].
+ * - bits 15-17: 2d operation [aka patch config]
+ * - bit 24: patch valid [enables rendering using this object]
+ * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
+ * word 1:
+ * - bits 0-1: mono format
+ * - bits 8-13: color format
+ * - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ * - bits 0-15: DMA_A instance
+ * - bits 16-31: DMA_B instance
+ *
+ * On NV05 it's:
+ *
+ * word 0:
+ * - bits 0-7: class
+ * - bit 12: color key active
+ * - bit 13: clip rect active
+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ * NV03_CONTEXT_SURFACE_DST].
+ * - bits 15-17: 2d operation [aka patch config]
+ * - bits 20-22: dither mode
+ * - bit 24: patch valid [enables rendering using this object]
+ * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
+ * - bit 26: surface_src/surface_zeta valid
+ * - bit 27: pattern valid
+ * - bit 28: rop valid
+ * - bit 29: beta1 valid
+ * - bit 30: beta4 valid
+ * word 1:
+ * - bits 0-1: mono format
+ * - bits 8-13: color format
+ * - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ * - bits 0-15: DMA_A instance
+ * - bits 16-31: DMA_B instance
+ *
+ * NV05 will set/unset the relevant valid bits when you poke the relevant
+ * object-binding methods with object of the proper type, or with the NULL
+ * type. It'll only allow rendering using the grobj if all needed objects
+ * are bound. The needed set of objects depends on selected operation: for
+ * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
+ *
+ * NV04 doesn't have these methods implemented at all, and doesn't have the
+ * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
+ * is set. So we have to emulate them in software, internally keeping the
+ * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
+ * but the last word isn't actually used for anything, we abuse it for this
+ * purpose.
+ *
+ * Actually, NV05 can optionally check bit 24 too, but we disable this since
+ * there's no use for it.
+ *
+ * For unknown reasons, NV04 implements surf3d binding in hardware as an
+ * exception. Also for unknown reasons, NV04 doesn't implement the clipping
+ * methods on the surf3d object, so we have to emulate them too.
+ */
+
+static void
+nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
{
struct drm_device *dev = chan->dev;
uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
@@ -542,42 +615,509 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
uint32_t tmp;
tmp = nv_ri32(dev, instance);
- tmp &= ~0x00038000;
- tmp |= ((data & 7) << 15);
+ tmp &= ~mask;
+ tmp |= value;
nv_wi32(dev, instance, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+}
+
+static void
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ uint32_t tmp, ctx1;
+ int class, op, valid = 1;
+
+ ctx1 = nv_ri32(dev, instance);
+ class = ctx1 & 0xff;
+ op = (ctx1 >> 15) & 7;
+ tmp = nv_ri32(dev, instance + 0xc);
+ tmp &= ~mask;
+ tmp |= value;
+ nv_wi32(dev, instance + 0xc, tmp);
+
+ /* check for valid surf2d/surf_dst/surf_color */
+ if (!(tmp & 0x02000000))
+ valid = 0;
+ /* check for valid surf_src/surf_zeta */
+ if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
+ valid = 0;
+
+ switch (op) {
+ /* SRCCOPY_AND, SRCCOPY: no extra objects required */
+ case 0:
+ case 3:
+ break;
+ /* ROP_AND: requires pattern and rop */
+ case 1:
+ if (!(tmp & 0x18000000))
+ valid = 0;
+ break;
+ /* BLEND_AND: requires beta1 */
+ case 2:
+ if (!(tmp & 0x20000000))
+ valid = 0;
+ break;
+ /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
+ case 4:
+ case 5:
+ if (!(tmp & 0x40000000))
+ valid = 0;
+ break;
+ }
+
+ nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ if (data > 5)
+ return 1;
+ /* Old versions of the objects only accept first three operations. */
+ if (data > 2 && grclass < 0x40)
+ return 1;
+ nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
+ /* changing operation changes set of objects needed for validation */
+ nv04_graph_set_ctx_val(chan, 0, 0);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ uint32_t min = data & 0xffff, max;
+ uint32_t w = data >> 16;
+ if (min & 0x8000)
+ /* too large */
+ return 1;
+ if (w & 0x8000)
+ /* yes, it accepts negative for some reason. */
+ w |= 0xffff0000;
+ max = min + w;
+ max &= 0x3ffff;
+ nv_wr32(chan->dev, 0x40053c, min);
+ nv_wr32(chan->dev, 0x400544, max);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ uint32_t min = data & 0xffff, max;
+ uint32_t w = data >> 16;
+ if (min & 0x8000)
+ /* too large */
+ return 1;
+ if (w & 0x8000)
+ /* yes, it accepts negative for some reason. */
+ w |= 0xffff0000;
+ max = min + w;
+ max &= 0x3ffff;
+ nv_wr32(chan->dev, 0x400540, min);
+ nv_wr32(chan->dev, 0x400548, max);
return 0;
}
+static int
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x42:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x42:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ case 0x52:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ return 0;
+ case 0x18:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ return 0;
+ case 0x44:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x10000000, 0);
+ return 0;
+ case 0x43:
+ nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x20000000, 0);
+ return 0;
+ case 0x12:
+ nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x40000000, 0);
+ return 0;
+ case 0x72:
+ nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x58:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ return 0;
+ case 0x59:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x5a:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ return 0;
+ case 0x5b:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x2000, 0);
+ return 0;
+ case 0x19:
+ nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x1000, 0);
+ return 0;
+ /* Yes, for some reason even the old versions of objects
+ * accept 0x57 and not 0x17. Consistency be damned.
+ */
+ case 0x57:
+ nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
+ return 0;
+ }
+ return 1;
+}
+
static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
{ 0x0150, nv04_graph_mthd_set_ref },
{}
};
-static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
+ { 0x0184, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0188, nv04_graph_mthd_bind_rop },
+ { 0x018c, nv04_graph_mthd_bind_beta1 },
+ { 0x0190, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x019c, nv04_graph_mthd_bind_surf_src },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv04_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
+ { 0x0188, nv04_graph_mthd_bind_chroma },
+ { 0x018c, nv04_graph_mthd_bind_clip },
+ { 0x0190, nv04_graph_mthd_bind_nv04_patt },
+ { 0x0194, nv04_graph_mthd_bind_rop },
+ { 0x0198, nv04_graph_mthd_bind_beta1 },
+ { 0x019c, nv04_graph_mthd_bind_beta4 },
+ { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x03e4, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_nv01_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
+ { 0x0184, nv04_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x0304, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x0304, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
+ { 0x0184, nv04_graph_mthd_bind_clip },
+ { 0x0188, nv04_graph_mthd_bind_nv01_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
+ { 0x0184, nv04_graph_mthd_bind_clip },
+ { 0x0188, nv04_graph_mthd_bind_nv04_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
+ { 0x0188, nv04_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_surf_color },
+ { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+ {},
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
+ { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+ { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+ {},
+};
+
struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
- { 0x0039, false, NULL },
- { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
- { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
- { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
- { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
+ { 0x0038, false, NULL }, /* dvd subpicture */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
+ { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
+ { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
+ { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
+ { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
+ { 0x0064, false, NULL }, /* nv05 iifc */
+ { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
+ { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
+ { 0x0065, false, NULL }, /* nv05 ifc */
+ { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
+ { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
+ { 0x0066, false, NULL }, /* nv05 sifc */
+ { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
+ { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
{ 0x0030, false, NULL }, /* null */
{ 0x0042, false, NULL }, /* surf2d */
{ 0x0043, false, NULL }, /* rop */
{ 0x0012, false, NULL }, /* beta1 */
{ 0x0072, false, NULL }, /* beta4 */
{ 0x0019, false, NULL }, /* cliprect */
- { 0x0044, false, NULL }, /* pattern */
+ { 0x0018, false, NULL }, /* nv01 pattern */
+ { 0x0044, false, NULL }, /* nv04 pattern */
{ 0x0052, false, NULL }, /* swzsurf */
- { 0x0053, false, NULL }, /* surf3d */
+ { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
+ { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
{ 0x0054, false, NULL }, /* tex_tri */
{ 0x0055, false, NULL }, /* multitex_tri */
+ { 0x0017, false, NULL }, /* nv01 chroma */
+ { 0x0057, false, NULL }, /* nv04 chroma */
+ { 0x0058, false, NULL }, /* surf_dst */
+ { 0x0059, false, NULL }, /* surf_src */
+ { 0x005a, false, NULL }, /* surf_color */
+ { 0x005b, false, NULL }, /* surf_zeta */
+ { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
+ { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
+ { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
+ { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
+ { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
+ { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
{ 0x506e, true, nv04_graph_mthds_sw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index 11b11c31f54..9b5c9746958 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -115,11 +115,6 @@
/* TODO:
* - get vs count from 0x1540
- * - document unimplemented bits compared to nvidia
- * - nsource handling
- * - R0 & 0x0200 handling
- * - single-vs handling
- * - 400314 bit 0
*/
static int
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
new file mode 100644
index 00000000000..2cdc2bfe717
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "drm_fixed.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+int
+nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
+ int *N1, int *M1, int *N2, int *M2, int *P)
+{
+ struct nouveau_pll_vals pll_vals;
+ int ret;
+
+ ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals);
+ if (ret <= 0)
+ return ret;
+
+ *N1 = pll_vals.N1;
+ *M1 = pll_vals.M1;
+ *N2 = pll_vals.N2;
+ *M2 = pll_vals.M2;
+ *P = pll_vals.log2P;
+ return ret;
+}
+
+int
+nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
+ int *N, int *fN, int *M, int *P)
+{
+ fixed20_12 fb_div, a, b;
+
+ *P = pll->vco1.maxfreq / clk;
+ if (*P > pll->max_p)
+ *P = pll->max_p;
+ if (*P < pll->min_p)
+ *P = pll->min_p;
+
+ /* *M = ceil(refclk / pll->vco.max_inputfreq); */
+ a.full = dfixed_const(pll->refclk);
+ b.full = dfixed_const(pll->vco1.max_inputfreq);
+ a.full = dfixed_div(a, b);
+ a.full = dfixed_ceil(a);
+ *M = dfixed_trunc(a);
+
+ /* fb_div = (vco * *M) / refclk; */
+ fb_div.full = dfixed_const(clk * *P);
+ fb_div.full = dfixed_mul(fb_div, a);
+ a.full = dfixed_const(pll->refclk);
+ fb_div.full = dfixed_div(fb_div, a);
+
+ /* *N = floor(fb_div); */
+ a.full = dfixed_floor(fb_div);
+ *N = dfixed_trunc(fb_div);
+
+ /* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */
+ b.full = dfixed_const(8192);
+ a.full = dfixed_mul(a, b);
+ fb_div.full = dfixed_mul(fb_div, b);
+ fb_div.full = fb_div.full - a.full;
+ *fN = dfixed_trunc(fb_div) - 4096;
+ *fN &= 0xffff;
+
+ return clk;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index cfabeb974a5..b4e4a3b05ea 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -264,32 +264,40 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
int
nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
{
- uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
- struct nouveau_pll_vals pll;
- struct pll_lims limits;
+ uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+ struct pll_lims pll;
uint32_t reg1, reg2;
- int ret;
+ int ret, N1, M1, N2, M2, P;
- ret = get_pll_limits(dev, pll_reg, &limits);
+ ret = get_pll_limits(dev, reg, &pll);
if (ret)
return ret;
- ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
- if (ret <= 0)
- return ret;
+ if (pll.vco2.maxfreq) {
+ ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P);
+ if (ret <= 0)
+ return 0;
+
+ NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
+ pclk, ret, N1, M1, N2, M2, P);
- if (limits.vco2.maxfreq) {
- reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
- reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
- nv_wr32(dev, pll_reg, 0x10000611);
- nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
- nv_wr32(dev, pll_reg + 8,
- reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
+ reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00;
+ reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00;
+ nv_wr32(dev, reg, 0x10000611);
+ nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
+ nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
} else {
- reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
- nv_wr32(dev, pll_reg, 0x50000610);
- nv_wr32(dev, pll_reg + 4, reg1 |
- (pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
+ ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
+ if (ret <= 0)
+ return 0;
+
+ NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
+ pclk, ret, N1, N2, M1, P);
+
+ reg1 = nv_rd32(dev, reg + 4) & 0xffc00000;
+ nv_wr32(dev, reg, 0x50000610);
+ nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
+ nv_wr32(dev, reg + 8, N2);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 649db4c1b69..580a5d10be9 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -29,6 +29,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
#include "drm_crtc_helper.h"
static void
@@ -783,6 +784,37 @@ ack:
}
static void
+nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
+{
+ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+ struct drm_encoder *encoder;
+ uint32_t tmp, unk0 = 0, unk1 = 0;
+
+ if (dcb->type != OUTPUT_DP)
+ return;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb == dcb) {
+ unk0 = nv_encoder->dp.unk0;
+ unk1 = nv_encoder->dp.unk1;
+ break;
+ }
+ }
+
+ if (unk0 || unk1) {
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= 0xfffffe03;
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
+
+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+ tmp &= 0xfef080c0;
+ nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
+ }
+}
+
+static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct dcb_entry *dcbent;
@@ -805,6 +837,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
nouveau_bios_run_display_table(dev, dcbent, script, pclk);
+ nv50_display_unk20_dp_hack(dev, dcbent);
+
tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
tmp &= ~0x000000f;
nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
@@ -945,6 +979,8 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
if (dev_priv->chipset >= 0x90)
nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
+
+ drm_helper_hpd_irq_event(dev);
}
void
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index a8c70e7e918..6bf025c6fc6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -6,8 +6,8 @@
void
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t width, dwords, *data = (uint32_t *)image->data;
@@ -152,8 +152,8 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int
nv50_fbcon_accel_init(struct fb_info *info)
{
- struct nouveau_fbcon_par *par = info->par;
- struct drm_device *dev = par->dev;
+ struct nouveau_fbdev *nfbdev = info->par;
+ struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
struct nouveau_gpuobj *eng2d = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 0c68698f23d..b11eaf9c5c7 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -321,18 +321,23 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
encoder->possible_clones = 0;
if (nv_encoder->dcb->type == OUTPUT_DP) {
- uint32_t mc, or = nv_encoder->or;
+ int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
+ uint32_t tmp;
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
- mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
+ tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
else
- mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
+ tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
- switch ((mc & 0x00000f00) >> 8) {
+ switch ((tmp & 0x00000f00) >> 8) {
case 8:
case 9:
- nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16;
+ nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16;
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ nv_encoder->dp.unk0 = tmp & 0x000001fc;
+ tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
+ nv_encoder->dp.unk1 = tmp & 0x010f7f3f;
break;
default:
break;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 6732b5dd8ff..1bc72c3190a 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2912,7 +2912,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2
UCHAR ucTV_BootUpDefaultStandard;
UCHAR ucExt_TV_ASIC_ID;
UCHAR ucExt_TV_ASIC_SlaveAddr;
- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING_V1_2];
}ATOM_ANALOG_TV_INFO_V1_2;
typedef struct _ATOM_DPCD_INFO
@@ -3780,7 +3780,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
UCHAR ucReserved[2];
}ATOM_ASIC_SS_ASSIGNMENT;
-//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
+//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
//SS is not required or enabled if a match is not found.
#define ASIC_INTERNAL_MEMORY_SS 1
#define ASIC_INTERNAL_ENGINE_SS 2
@@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
#define ATOM_PP_THERMALCONTROLLER_RV770 8
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
typedef struct _ATOM_PPLIB_STATE
{
@@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE
UCHAR ucClockStateIndices[1]; // variable-sized
} ATOM_PPLIB_STATE;
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
+ UCHAR ucTHyst; // Temperature hysteresis. Integer.
+ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+ USHORT usTMed; // The middle temperature where we change slopes.
+ USHORT usTHigh; // The high point above TMed for adjusting the second slope.
+ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
+ USHORT usPWMMed; // The PWM value (in percent) at TMed.
+ USHORT usPWMHigh; // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+ USHORT usSize;
+ ULONG ulMaxEngineClock; // For Overdrive.
+ ULONG ulMaxMemoryClock; // For Overdrive.
+ // Add extra system parameters here, always adjust size to include all fields.
+} ATOM_PPLIB_EXTENDEDHEADER;
+
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
@@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE
#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
@@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
} ATOM_PPLIB_POWERPLAYTABLE;
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+ ATOM_PPLIB_POWERPLAYTABLE basicTable;
+ UCHAR ucNumCustomThermalPolicy;
+ USHORT usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+ USHORT usFormatID; // To be used ONLY by PPGen.
+ USHORT usFanTableOffset;
+ USHORT usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
-// remaining 3 bits are reserved
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
@@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
-#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
+
+//M3 Arb //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK 0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT 17
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
@@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
+
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
{
USHORT usEngineClockLow;
@@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ USHORT usUnused;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
@@ -5895,7 +5967,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
UCHAR ucPadding; // For proper alignment and size.
USHORT usVDDC; // For the 780, use: None, Low, High, Variable
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
- UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a87990b3ae8..03dd6c41dc1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -26,7 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
#include "atom-bits.h"
@@ -245,25 +245,27 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
+ radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
- /* XXX re-enable when interrupt support is added */
- if (!ASIC_IS_DCE4(rdev))
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- /* XXX re-enable when interrupt support is added */
- if (!ASIC_IS_DCE4(rdev))
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
+ radeon_crtc->enabled = false;
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
break;
}
}
@@ -1160,6 +1162,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 28b31c64f48..abffb1499e2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -351,7 +351,7 @@ retry:
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
if (ASIC_IS_DCE4(rdev))
- args.v2.ucHPD_ID = chan->rec.hpd_id;
+ args.v2.ucHPD_ID = chan->rec.hpd;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e8f447e2050..8c8e4d3cbaa 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -28,39 +28,235 @@
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
-#include "rv770d.h"
+#include "evergreend.h"
#include "atom.h"
#include "avivod.h"
#include "evergreen_reg.h"
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
+void evergreen_pm_misc(struct radeon_device *rdev)
+{
+
+}
+
+void evergreen_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+void evergreen_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
- /* XXX */
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+
return connected;
}
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
- /* XXX */
+ u32 tmp;
+ bool connected = evergreen_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
}
void evergreen_hpd_init(struct radeon_device *rdev)
{
- /* XXX */
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+ DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ rdev->irq.hpd[1] = true;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ rdev->irq.hpd[2] = true;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ rdev->irq.hpd[3] = true;
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ rdev->irq.hpd[4] = true;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ rdev->irq.hpd[5] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ if (rdev->irq.installed)
+ evergreen_irq_set(rdev);
}
-
-void evergreen_bandwidth_update(struct radeon_device *rdev)
+void evergreen_hpd_fini(struct radeon_device *rdev)
{
- /* XXX */
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ rdev->irq.hpd[1] = false;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ rdev->irq.hpd[2] = false;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ rdev->irq.hpd[3] = false;
+ break;
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ rdev->irq.hpd[4] = false;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ rdev->irq.hpd[5] = false;
+ break;
+ default:
+ break;
+ }
+ }
}
-void evergreen_hpd_fini(struct radeon_device *rdev)
+void evergreen_bandwidth_update(struct radeon_device *rdev)
{
/* XXX */
}
@@ -83,10 +279,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
/*
* GART
*/
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+ tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+ if (tmp == 2) {
+ printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
+ return;
+ }
+ if (tmp) {
+ return;
+ }
+ udelay(1);
+ }
+}
+
int evergreen_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
- int r, i;
+ int r;
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -121,10 +338,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
- for (i = 1; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
- r600_pcie_gart_tlb_flush(rdev);
+ evergreen_pcie_gart_tlb_flush(rdev);
rdev->gart.ready = true;
return 0;
}
@@ -132,11 +348,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i, r;
+ int r;
/* Disable all tables */
- for (i = 0; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
@@ -173,7 +389,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
void evergreen_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -193,8 +408,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
- for (i = 0; i < 7; i++)
- WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
}
static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -400,40 +615,656 @@ static void evergreen_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
-#if 0
/*
* CP.
*/
-static void evergreen_cp_stop(struct radeon_device *rdev)
-{
- /* XXX */
-}
-
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
{
- /* XXX */
+ const __be32 *fw_data;
+ int i;
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ r700_cp_stop(rdev);
+ WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
return 0;
}
+int evergreen_cp_resume(struct radeon_device *rdev)
+{
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_SH |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB_CNTL, tmp);
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB_RPTR_WR, 0);
+ WREG32(CP_RB_WPTR, 0);
+ WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
+ WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
+ mdelay(1);
+ WREG32(CP_RB_CNTL, tmp);
+
+ WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+ rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ rdev->cp.wptr = RREG32(CP_RB_WPTR);
+
+ r600_cp_start(rdev);
+ rdev->cp.ready = true;
+ r = radeon_ring_test(rdev);
+ if (r) {
+ rdev->cp.ready = false;
+ return r;
+ }
+ return 0;
+}
/*
* Core functions
*/
-static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
+ u32 enabled_backends_mask = 0;
+ u32 enabled_backends_count = 0;
+ u32 cur_pipe;
+ u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
+ u32 cur_backend = 0;
+ u32 i;
+ bool force_no_swizzle;
+
+ if (num_tile_pipes > EVERGREEN_MAX_PIPES)
+ num_tile_pipes = EVERGREEN_MAX_PIPES;
+ if (num_tile_pipes < 1)
+ num_tile_pipes = 1;
+ if (num_backends > EVERGREEN_MAX_BACKENDS)
+ num_backends = EVERGREEN_MAX_BACKENDS;
+ if (num_backends < 1)
+ num_backends = 1;
+
+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+ if (((backend_disable_mask >> i) & 1) == 0) {
+ enabled_backends_mask |= (1 << i);
+ ++enabled_backends_count;
+ }
+ if (enabled_backends_count == num_backends)
+ break;
+ }
+
+ if (enabled_backends_count == 0) {
+ enabled_backends_mask = 1;
+ enabled_backends_count = 1;
+ }
+
+ if (enabled_backends_count != num_backends)
+ num_backends = enabled_backends_count;
+
+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
+ switch (rdev->family) {
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ force_no_swizzle = false;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ case CHIP_JUNIPER:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+ if (force_no_swizzle) {
+ bool last_backend_enabled = false;
+
+ force_no_swizzle = false;
+ for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
+ if (((enabled_backends_mask >> i) & 1) == 1) {
+ if (last_backend_enabled)
+ force_no_swizzle = true;
+ last_backend_enabled = true;
+ } else
+ last_backend_enabled = false;
+ }
+ }
+
+ switch (num_tile_pipes) {
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ DRM_ERROR("odd number of pipes!\n");
+ break;
+ case 2:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ break;
+ case 4:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ swizzle_pipe[3] = 3;
+ }
+ break;
+ case 6:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 5;
+ }
+ break;
+ case 8:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 1;
+ swizzle_pipe[5] = 3;
+ swizzle_pipe[6] = 5;
+ swizzle_pipe[7] = 7;
+ }
+ break;
+ }
+
+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+
+ backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
+
+ cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
+ }
return backend_map;
}
-#endif
static void evergreen_gpu_init(struct radeon_device *rdev)
{
- /* XXX */
+ u32 cc_rb_backend_disable = 0;
+ u32 cc_gc_shader_pipe_config;
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 gb_backend_map;
+ u32 grbm_gfx_index;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 sq_config;
+ u32 sq_lds_resource_mgmt;
+ u32 sq_gpr_resource_mgmt_1;
+ u32 sq_gpr_resource_mgmt_2;
+ u32 sq_gpr_resource_mgmt_3;
+ u32 sq_thread_resource_mgmt;
+ u32 sq_thread_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_1;
+ u32 sq_stack_resource_mgmt_2;
+ u32 sq_stack_resource_mgmt_3;
+ u32 vgt_cache_invalidation;
+ u32 hdp_host_path_cntl;
+ int i, j, num_shader_engines, ps_thread_count;
+
+ switch (rdev->family) {
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ rdev->config.evergreen.num_ses = 2;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 8;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_JUNIPER:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 10;
+ rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 512;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_REDWOOD:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+ rdev->config.evergreen.max_tile_pipes = 4;
+ rdev->config.evergreen.max_simds = 5;
+ rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 248;
+ rdev->config.evergreen.max_gs_threads = 32;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+ rdev->config.evergreen.max_hw_contexts = 8;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ case CHIP_CEDAR:
+ default:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+ rdev->config.evergreen.max_gprs = 256;
+ rdev->config.evergreen.max_threads = 192;
+ rdev->config.evergreen.max_gs_threads = 16;
+ rdev->config.evergreen.max_stack_entries = 256;
+ rdev->config.evergreen.sx_num_of_sets = 4;
+ rdev->config.evergreen.sx_max_export_size = 128;
+ rdev->config.evergreen.sx_max_export_pos_size = 32;
+ rdev->config.evergreen.sx_max_export_smx_size = 96;
+ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 1;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+ rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
+
+ cc_gc_shader_pipe_config |=
+ INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
+ & EVERGREEN_MAX_PIPES_MASK);
+ cc_gc_shader_pipe_config |=
+ INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
+ & EVERGREEN_MAX_SIMDS_MASK);
+
+ cc_rb_backend_disable =
+ BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
+ & EVERGREEN_MAX_BACKENDS_MASK);
+
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ switch (rdev->config.evergreen.max_tile_pipes) {
+ case 1:
+ default:
+ gb_addr_config |= NUM_PIPES(0);
+ break;
+ case 2:
+ gb_addr_config |= NUM_PIPES(1);
+ break;
+ case 4:
+ gb_addr_config |= NUM_PIPES(2);
+ break;
+ case 8:
+ gb_addr_config |= NUM_PIPES(3);
+ break;
+ }
+
+ gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
+ gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
+ gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
+ gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
+
+ if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
+ gb_addr_config |= ROW_SIZE(2);
+ else
+ gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
+
+ if (rdev->ddev->pdev->device == 0x689e) {
+ u32 efuse_straps_4;
+ u32 efuse_straps_3;
+ u8 efuse_box_bit_131_124;
+
+ WREG32(RCU_IND_INDEX, 0x204);
+ efuse_straps_4 = RREG32(RCU_IND_DATA);
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
+
+ switch(efuse_box_bit_131_124) {
+ case 0x00:
+ gb_backend_map = 0x76543210;
+ break;
+ case 0x55:
+ gb_backend_map = 0x77553311;
+ break;
+ case 0x56:
+ gb_backend_map = 0x77553300;
+ break;
+ case 0x59:
+ gb_backend_map = 0x77552211;
+ break;
+ case 0x66:
+ gb_backend_map = 0x77443300;
+ break;
+ case 0x99:
+ gb_backend_map = 0x66552211;
+ break;
+ case 0x5a:
+ gb_backend_map = 0x77552200;
+ break;
+ case 0xaa:
+ gb_backend_map = 0x66442200;
+ break;
+ case 0x95:
+ gb_backend_map = 0x66553311;
+ break;
+ default:
+ DRM_ERROR("bad backend map, using default\n");
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ break;
+ }
+ } else if (rdev->ddev->pdev->device == 0x68b9) {
+ u32 efuse_straps_3;
+ u8 efuse_box_bit_127_124;
+
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28;
+
+ switch(efuse_box_bit_127_124) {
+ case 0x0:
+ gb_backend_map = 0x00003210;
+ break;
+ case 0x5:
+ case 0x6:
+ case 0x9:
+ case 0xa:
+ gb_backend_map = 0x00003311;
+ break;
+ default:
+ DRM_ERROR("bad backend map, using default\n");
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+ break;
+ }
+ } else
+ gb_backend_map =
+ evergreen_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.evergreen.max_tile_pipes,
+ rdev->config.evergreen.max_backends,
+ ((EVERGREEN_MAX_BACKENDS_MASK <<
+ rdev->config.evergreen.max_backends) &
+ EVERGREEN_MAX_BACKENDS_MASK));
+
+ WREG32(GB_BACKEND_MAP, gb_backend_map);
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+ num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
+ grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
+
+ for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
+ u32 rb = cc_rb_backend_disable | (0xf0 << 16);
+ u32 sp = cc_gc_shader_pipe_config;
+ u32 gfx = grbm_gfx_index | SE_INDEX(i);
+
+ if (i == num_shader_engines) {
+ rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
+ sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+ }
+
+ WREG32(GRBM_GFX_INDEX, gfx);
+ WREG32(RLC_GFX_INDEX, gfx);
+
+ WREG32(CC_RB_BACKEND_DISABLE, rb);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
+ WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
+ }
+
+ grbm_gfx_index |= SE_BROADCAST_WRITES;
+ WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
+ WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+
+ WREG32(CGTS_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+ ROQ_IB2_START(0x2b)));
+
+ WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+ WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
+ SYNC_GRADIENT |
+ SYNC_WALKER |
+ SYNC_ALIGNER));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+
+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+ WREG32(SPI_CONFIG_CNTL, 0);
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+ FETCH_FIFO_HIWATER(0x4) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+ sq_config = RREG32(SQ_CONFIG);
+ sq_config &= ~(PS_PRIO(3) |
+ VS_PRIO(3) |
+ GS_PRIO(3) |
+ ES_PRIO(3));
+ sq_config |= (VC_ENABLE |
+ EXPORT_SRC_C |
+ PS_PRIO(0) |
+ VS_PRIO(1) |
+ GS_PRIO(2) |
+ ES_PRIO(3));
+
+ if (rdev->family == CHIP_CEDAR)
+ /* no vertex cache */
+ sq_config &= ~VC_ENABLE;
+
+ sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
+
+ sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
+ sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
+ sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+ sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+ sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+
+ if (rdev->family == CHIP_CEDAR)
+ ps_thread_count = 96;
+ else
+ ps_thread_count = 128;
+
+ sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
+ sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+ sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
+
+ sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+ sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+
+ WREG32(SQ_CONFIG, sq_config);
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+ WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+ WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
+ WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+ WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+ WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+ WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
+ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+ WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ if (rdev->family == CHIP_CEDAR)
+ vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
+ else
+ vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+ vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
+ WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(CB_PERF_CTR0_SEL_0, 0);
+ WREG32(CB_PERF_CTR0_SEL_1, 0);
+ WREG32(CB_PERF_CTR1_SEL_0, 0);
+ WREG32(CB_PERF_CTR1_SEL_1, 0);
+ WREG32(CB_PERF_CTR2_SEL_0, 0);
+ WREG32(CB_PERF_CTR2_SEL_1, 0);
+ WREG32(CB_PERF_CTR3_SEL_0, 0);
+ WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+
}
int evergreen_mc_init(struct radeon_device *rdev)
@@ -476,26 +1307,627 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
}
-int evergreen_gpu_reset(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
/* FIXME: implement for evergreen */
+ return false;
+}
+
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 srbm_reset = 0;
+ u32 grbm_reset = 0;
+
+ dev_info(rdev->dev, "GPU softreset \n");
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ /* reset all the gfx blocks */
+ grbm_reset = (SOFT_RESET_CP |
+ SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VC |
+ SOFT_RESET_VGT);
+
+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+ WREG32(GRBM_SOFT_RESET, grbm_reset);
+ (void)RREG32(GRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(GRBM_SOFT_RESET, 0);
+ (void)RREG32(GRBM_SOFT_RESET);
+
+ /* reset all the system blocks */
+ srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
+
+ dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
+ WREG32(SRBM_SOFT_RESET, srbm_reset);
+ (void)RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+ (void)RREG32(SRBM_SOFT_RESET);
+ /* Wait a little for things to settle down */
+ udelay(50);
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ /* After reset we need to reinit the asic as GPU often endup in an
+ * incoherent state.
+ */
+ atom_asic_init(rdev->mode_info.atom_context);
+ evergreen_mc_resume(rdev, &save);
+ return 0;
+}
+
+int evergreen_asic_reset(struct radeon_device *rdev)
+{
+ return evergreen_gpu_soft_reset(rdev);
+}
+
+/* Interrupts */
+
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+ switch (crtc) {
+ case 0:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ case 1:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ case 2:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ case 3:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ case 4:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ case 5:
+ return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ default:
+ return 0;
+ }
+}
+
+void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(CP_INT_CNTL, 0);
+ WREG32(GRBM_INT_CNTL, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+int evergreen_irq_set(struct radeon_device *rdev)
+{
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+ u32 grbm_int_cntl = 0;
+
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ return -EINVAL;
+ }
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled) {
+ r600_disable_interrupts(rdev);
+ /* force the active interrupt state to all disabled */
+ evergreen_disable_interrupt_state(rdev);
+ return 0;
+ }
+
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+
+ if (rdev->irq.sw_int) {
+ DRM_DEBUG("evergreen_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 0\n");
+ crtc1 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 1\n");
+ crtc2 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[2]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 2\n");
+ crtc3 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[3]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 3\n");
+ crtc4 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[4]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 4\n");
+ crtc5 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[5]) {
+ DRM_DEBUG("evergreen_irq_set: vblank 5\n");
+ crtc6 |= VBLANK_INT_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 1\n");
+ hpd1 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[1]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 2\n");
+ hpd2 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[2]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 3\n");
+ hpd3 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[3]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 4\n");
+ hpd4 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[4]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 5\n");
+ hpd5 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[5]) {
+ DRM_DEBUG("evergreen_irq_set: hpd 6\n");
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.gui_idle) {
+ DRM_DEBUG("gui idle\n");
+ grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+ }
+
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+ WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+ WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
return 0;
}
+static inline void evergreen_irq_ack(struct radeon_device *rdev,
+ u32 *disp_int,
+ u32 *disp_int_cont,
+ u32 *disp_int_cont2,
+ u32 *disp_int_cont3,
+ u32 *disp_int_cont4,
+ u32 *disp_int_cont5)
+{
+ u32 tmp;
+
+ *disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+ *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+ *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+ *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+
+ if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+ if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+
+ if (*disp_int & DC_HPD1_INTERRUPT) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+}
+
+void evergreen_irq_disable(struct radeon_device *rdev)
+{
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+
+ r600_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+ evergreen_disable_interrupt_state(rdev);
+}
+
+static void evergreen_irq_suspend(struct radeon_device *rdev)
+{
+ evergreen_irq_disable(rdev);
+ r600_rlc_stop(rdev);
+}
+
+static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+
+ /* XXX use writeback */
+ wptr = RREG32(IH_RB_WPTR);
+
+ if (wptr & RB_OVERFLOW) {
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+ rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ return (wptr & rdev->ih.ptr_mask);
+}
+
+int evergreen_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr = evergreen_get_ih_wptr(rdev);
+ u32 rptr = rdev->ih.rptr;
+ u32 src_id, src_data;
+ u32 ring_index;
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+ u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
+ unsigned long flags;
+ bool queue_hotplug = false;
+
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ if (!rdev->ih.enabled)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&rdev->ih.lock, flags);
+
+ if (rptr == wptr) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+ if (rdev->shutdown) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+
+restart_ih:
+ /* display interrupts */
+ evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
+ &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+
+ rdev->ih.wptr = wptr;
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = rdev->ih.ring[ring_index] & 0xff;
+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 0);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D1_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 2: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D2 vline */
+ if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 3: /* D3 vblank/vline */
+ switch (src_data) {
+ case 0: /* D3 vblank */
+ if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 2);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
+ }
+ break;
+ case 1: /* D3 vline */
+ if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 4: /* D4 vblank/vline */
+ switch (src_data) {
+ case 0: /* D4 vblank */
+ if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 3);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
+ }
+ break;
+ case 1: /* D4 vline */
+ if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D5 vblank/vline */
+ switch (src_data) {
+ case 0: /* D5 vblank */
+ if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 4);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
+ }
+ break;
+ case 1: /* D5 vline */
+ if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 6: /* D6 vblank/vline */
+ switch (src_data) {
+ case 0: /* D6 vblank */
+ if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 5);
+ wake_up(&rdev->irq.vblank_queue);
+ disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
+ }
+ break;
+ case 1: /* D6 vline */
+ if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 42: /* HPD hotplug */
+ switch (src_data) {
+ case 0:
+ if (disp_int & DC_HPD1_INTERRUPT) {
+ disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
+ break;
+ case 1:
+ if (disp_int_cont & DC_HPD2_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
+ break;
+ case 2:
+ if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
+ break;
+ case 3:
+ if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
+ break;
+ case 4:
+ if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
+ break;
+ case 5:
+ if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: CP EOP\n");
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ rptr += 16;
+ rptr &= rdev->ih.ptr_mask;
+ }
+ /* make sure wptr hasn't changed while processing */
+ wptr = evergreen_get_ih_wptr(rdev);
+ if (wptr != rdev->ih.wptr)
+ goto restart_ih;
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_HANDLED;
+}
+
static int evergreen_startup(struct radeon_device *rdev)
{
-#if 0
int r;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
@@ -505,17 +1937,15 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
}
-#endif
+
evergreen_mc_program(rdev);
-#if 0
if (rdev->flags & RADEON_IS_AGP) {
- evergreem_agp_enable(rdev);
+ evergreen_agp_enable(rdev);
} else {
r = evergreen_pcie_gart_enable(rdev);
if (r)
return r;
}
-#endif
evergreen_gpu_init(rdev);
#if 0
if (!rdev->r600_blit.shader_obj) {
@@ -536,6 +1966,7 @@ static int evergreen_startup(struct radeon_device *rdev)
DRM_ERROR("failed to pin blit object %d\n", r);
return r;
}
+#endif
/* Enable IRQ */
r = r600_irq_init(rdev);
@@ -544,7 +1975,7 @@ static int evergreen_startup(struct radeon_device *rdev)
radeon_irq_kms_fini(rdev);
return r;
}
- r600_irq_set(rdev);
+ evergreen_irq_set(rdev);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
@@ -552,12 +1983,12 @@ static int evergreen_startup(struct radeon_device *rdev)
r = evergreen_cp_load_microcode(rdev);
if (r)
return r;
- r = r600_cp_resume(rdev);
+ r = evergreen_cp_resume(rdev);
if (r)
return r;
/* write back buffer are not vital so don't worry about failure */
r600_wb_enable(rdev);
-#endif
+
return 0;
}
@@ -582,13 +2013,13 @@ int evergreen_resume(struct radeon_device *rdev)
DRM_ERROR("r600 startup failed on resume\n");
return r;
}
-#if 0
+
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
-#endif
+
return r;
}
@@ -597,12 +2028,14 @@ int evergreen_suspend(struct radeon_device *rdev)
{
#if 0
int r;
-
+#endif
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
+ evergreen_irq_suspend(rdev);
r600_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
+#if 0
/* unpin shaders bo */
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (likely(r == 0)) {
@@ -682,8 +2115,6 @@ int evergreen_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -702,7 +2133,7 @@ int evergreen_init(struct radeon_device *rdev)
r = radeon_bo_init(rdev);
if (r)
return r;
-#if 0
+
r = radeon_irq_kms_init(rdev);
if (r)
return r;
@@ -716,14 +2147,16 @@ int evergreen_init(struct radeon_device *rdev)
r = r600_pcie_gart_init(rdev);
if (r)
return r;
-#endif
+
rdev->accel_working = false;
r = evergreen_startup(rdev);
if (r) {
- evergreen_suspend(rdev);
- /*r600_wb_fini(rdev);*/
- /*radeon_ring_fini(rdev);*/
- /*evergreen_pcie_gart_fini(rdev);*/
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r700_cp_fini(rdev);
+ r600_wb_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
@@ -743,16 +2176,12 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
- evergreen_suspend(rdev);
-#if 0
- r600_blit_fini(rdev);
+ /*r600_blit_fini(rdev);*/
+ r700_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
evergreen_pcie_gart_fini(rdev);
-#endif
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index f7c7c964343..af86af836f1 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -164,8 +164,12 @@
#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34
#define EVERGREEN_CRTC_CONTROL 0x6e70
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_STATUS 0x6e8c
+#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644
index 00000000000..93e9e17ad54
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef EVERGREEND_H
+#define EVERGREEND_H
+
+#define EVERGREEN_MAX_SH_GPRS 256
+#define EVERGREEN_MAX_TEMP_GPRS 16
+#define EVERGREEN_MAX_SH_THREADS 256
+#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096
+#define EVERGREEN_MAX_FRC_EOV_CNT 16384
+#define EVERGREEN_MAX_BACKENDS 8
+#define EVERGREEN_MAX_BACKENDS_MASK 0xFF
+#define EVERGREEN_MAX_SIMDS 16
+#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF
+#define EVERGREEN_MAX_PIPES 8
+#define EVERGREEN_MAX_PIPES_MASK 0xFF
+#define EVERGREEN_MAX_LDS_NUM 0xFFFF
+
+/* Registers */
+
+#define RCU_IND_INDEX 0x100
+#define RCU_IND_DATA 0x104
+
+#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SE_INDEX(x) ((x) << 16)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+#define RLC_GFX_INDEX 0x3fC4
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define WRITE_DIS (1 << 0)
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x98F8
+#define NUM_PIPES(x) ((x) << 0)
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define NUM_GPUS(x) ((x) << 20)
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define ROW_SIZE(x) ((x) << 28)
+#define GB_BACKEND_MAP 0x98FC
+#define DMIF_ADDR_CONFIG 0xBD4
+#define HDP_ADDR_CONFIG 0x2F48
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+
+#define CGTS_SYS_TCC_DISABLE 0x3F90
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+#define CGTS_USER_TCC_DISABLE 0x914C
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define CP_ME_CNTL 0x86D8
+#define CP_ME_HALT (1 << 28)
+#define CP_PFP_HALT (1 << 26)
+#define CP_ME_RAM_DATA 0xC160
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_MEQ_THRESHOLDS 0x8764
+#define STQ_SPLIT(x) ((x) << 0)
+#define CP_PERFMON_CNTL 0x87FC
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_QUEUE_THRESHOLDS 0x8760
+#define ROQ_IB1_START(x) ((x) << 0)
+#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_RB_BASE 0xC100
+#define CP_RB_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB_RPTR 0x8700
+#define CP_RB_RPTR_ADDR 0xC10C
+#define CP_RB_RPTR_ADDR_HI 0xC110
+#define CP_RB_RPTR_WR 0xC108
+#define CP_RB_WPTR 0xC114
+#define CP_RB_WPTR_ADDR 0xC118
+#define CP_RB_WPTR_ADDR_HI 0xC11C
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_DEBUG 0xC1FC
+
+
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0x00FF0000
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SH (1 << 9)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VC (1 << 13)
+#define SOFT_RESET_VGT (1 << 14)
+
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define SRBM_RQ_PENDING (1 << 5)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GRBM_EE_BUSY (1 << 10)
+#define SX_CLEAN (1 << 11)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define VGT_BUSY_NO_DMA (1 << 16)
+#define VGT_BUSY (1 << 17)
+#define SX_BUSY (1 << 20)
+#define SH_BUSY (1 << 21)
+#define SPI_BUSY (1 << 22)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x8014
+#define GRBM_STATUS_SE1 0x8018
+#define SE_SX_CLEAN (1 << 0)
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SH_BUSY (1 << 28)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+#define HDP_TILING_CONFIG 0x2F3C
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define BURSTLENGTH_SHIFT 9
+#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
+#define MC_VM_AGP_TOP 0x2028
+#define MC_VM_AGP_BOT 0x202C
+#define MC_VM_AGP_BASE 0x2030
+#define MC_VM_FB_LOCATION 0x2024
+#define MC_VM_MB_L1_TLB0_CNTL 0x2234
+#define MC_VM_MB_L1_TLB1_CNTL 0x2238
+#define MC_VM_MB_L1_TLB2_CNTL 0x223C
+#define MC_VM_MB_L1_TLB3_CNTL 0x2240
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
+#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
+#define MC_VM_MD_L1_TLB0_CNTL 0x2654
+#define MC_VM_MD_L1_TLB1_CNTL 0x2658
+#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_AA_CONFIG 0x28C04
+#define PA_SC_CLIPRECT_RULE 0x2820C
+#define PA_SC_EDGERULE 0x28230
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+#define PA_SC_LINE_STIPPLE 0x28A0C
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+
+#define SMX_DC_CTL0 0xA020
+#define USE_HASH_FUNCTION (1 << 0)
+#define NUMBER_OF_SETS(x) ((x) << 1)
+#define FLUSH_ALL_ON_EVENT (1 << 10)
+#define STALL_ON_EVENT (1 << 11)
+#define SMX_EVENT_CTL 0xA02C
+#define ES_FLUSH_CTL(x) ((x) << 0)
+#define GS_FLUSH_CTL(x) ((x) << 3)
+#define ACK_FLUSH_CTL(x) ((x) << 6)
+#define SYNC_FLUSH_CTL (1 << 8)
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define SPI_INPUT_Z 0x286D8
+#define SPI_PS_IN_CONTROL_0 0x286CC
+#define NUM_INTERP(x) ((x)<<0)
+#define POSITION_ENA (1<<8)
+#define POSITION_CENTROID (1<<9)
+#define POSITION_ADDR(x) ((x)<<10)
+#define PARAM_GEN(x) ((x)<<15)
+#define PARAM_GEN_ADDR(x) ((x)<<19)
+#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
+#define PERSP_GRADIENT_ENA (1<<28)
+#define LINEAR_GRADIENT_ENA (1<<29)
+#define POSITION_SAMPLE (1<<30)
+#define BARYC_AT_SAMPLE_ENA (1<<31)
+
+#define SQ_CONFIG 0x8C00
+#define VC_ENABLE (1 << 0)
+#define EXPORT_SRC_C (1 << 1)
+#define CS_PRIO(x) ((x) << 18)
+#define LS_PRIO(x) ((x) << 20)
+#define HS_PRIO(x) ((x) << 22)
+#define PS_PRIO(x) ((x) << 24)
+#define VS_PRIO(x) ((x) << 26)
+#define GS_PRIO(x) ((x) << 28)
+#define ES_PRIO(x) ((x) << 30)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
+#define NUM_PS_GPRS(x) ((x) << 0)
+#define NUM_VS_GPRS(x) ((x) << 16)
+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
+#define NUM_GS_GPRS(x) ((x) << 0)
+#define NUM_ES_GPRS(x) ((x) << 16)
+#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
+#define NUM_HS_GPRS(x) ((x) << 0)
+#define NUM_LS_GPRS(x) ((x) << 16)
+#define SQ_THREAD_RESOURCE_MGMT 0x8C18
+#define NUM_PS_THREADS(x) ((x) << 0)
+#define NUM_VS_THREADS(x) ((x) << 8)
+#define NUM_GS_THREADS(x) ((x) << 16)
+#define NUM_ES_THREADS(x) ((x) << 24)
+#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C
+#define NUM_HS_THREADS(x) ((x) << 0)
+#define NUM_LS_THREADS(x) ((x) << 8)
+#define SQ_STACK_RESOURCE_MGMT_1 0x8C20
+#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_2 0x8C24
+#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_3 0x8C28
+#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
+#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
+#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define SQ_LDS_RESOURCE_MGMT 0x8E2C
+
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+
+#define SX_DEBUG_1 0x9058
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+#define SX_EXPORT_BUFFER_SIZES 0x900C
+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
+#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_MISC 0x28350
+
+#define CB_PERF_CTR0_SEL_0 0x9A20
+#define CB_PERF_CTR0_SEL_1 0x9A24
+#define CB_PERF_CTR1_SEL_0 0x9A28
+#define CB_PERF_CTR1_SEL_1 0x9A2C
+#define CB_PERF_CTR2_SEL_0 0x9A30
+#define CB_PERF_CTR2_SEL_1 0x9A34
+#define CB_PERF_CTR3_SEL_0 0x9A38
+#define CB_PERF_CTR3_SEL_1 0x9A3C
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+#define SYNC_GRADIENT (1 << 24)
+#define SYNC_WALKER (1 << 25)
+#define SYNC_ALIGNER (1 << 26)
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_GS_VERTEX_REUSE 0x88D4
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_OUT_DEALLOC_CNTL 0x28C5C
+#define DEALLOC_DIST_MASK 0x0000007F
+#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
+#define VTX_REUSE_DEPTH_MASK 0x000000FF
+
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
+#define RESPONSE_TYPE_MASK 0x000000F0
+#define RESPONSE_TYPE_SHIFT 4
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define CACHE_UPDATE_MODE(x) ((x) << 6)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+
+#define WAIT_UNTIL 0x8040
+
+#define SRBM_STATUS 0x0E50
+#define SRBM_SOFT_RESET 0x0E60
+#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_CG (1 << 2)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_RLC (1 << 13)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+
+#define CP_INT_CNTL 0xc124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define SCRATCH_INT_ENABLE (1 << 25)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define IB2_INT_ENABLE (1 << 29)
+# define IB1_INT_ENABLE (1 << 30)
+# define RB_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS 0xc128
+# define SCRATCH_INT_STAT (1 << 25)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define IB2_INT_STAT (1 << 29)
+# define IB1_INT_STAT (1 << 30)
+# define RB_INT_STAT (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT 0x6e98
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS 0x6bb8
+# define VLINE_OCCURRED (1 << 0)
+# define VLINE_ACK (1 << 4)
+# define VLINE_STAT (1 << 12)
+# define VLINE_INTERRUPT (1 << 16)
+# define VLINE_INTERRUPT_TYPE (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS 0x6bbc
+# define VBLANK_OCCURRED (1 << 0)
+# define VBLANK_ACK (1 << 4)
+# define VBLANK_STAT (1 << 12)
+# define VBLANK_INTERRUPT (1 << 16)
+# define VBLANK_INTERRUPT_TYPE (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK 0x6b40
+# define VBLANK_INT_MASK (1 << 0)
+# define VLINE_INT_MASK (1 << 4)
+
+#define DISP_INTERRUPT_STATUS 0x60f4
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D1_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD1_INTERRUPT (1 << 17)
+# define DC_HPD1_RX_INTERRUPT (1 << 18)
+# define DACA_AUTODETECT_INTERRUPT (1 << 22)
+# define DACB_AUTODETECT_INTERRUPT (1 << 23)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
+# define LB_D2_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD2_INTERRUPT (1 << 17)
+# define DC_HPD2_RX_INTERRUPT (1 << 18)
+# define DISP_TIMER_INTERRUPT (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
+# define LB_D3_VLINE_INTERRUPT (1 << 2)
+# define LB_D3_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD3_INTERRUPT (1 << 17)
+# define DC_HPD3_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
+# define LB_D4_VLINE_INTERRUPT (1 << 2)
+# define LB_D4_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD4_INTERRUPT (1 << 17)
+# define DC_HPD4_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
+# define LB_D5_VLINE_INTERRUPT (1 << 2)
+# define LB_D5_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD5_INTERRUPT (1 << 17)
+# define DC_HPD5_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
+# define LB_D6_VLINE_INTERRUPT (1 << 2)
+# define LB_D6_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD6_INTERRUPT (1 << 17)
+# define DC_HPD6_RX_INTERRUPT (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS 0x6858
+# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define GRPH_PFLIP_INT_CLEAR (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL 0x685c
+# define GRPH_PFLIP_INT_MASK (1 << 0)
+# define GRPH_PFLIP_INT_TYPE (1 << 8)
+
+#define DACA_AUTODETECT_INT_CONTROL 0x66c8
+#define DACB_AUTODETECT_INT_CONTROL 0x67c8
+
+#define DC_HPD1_INT_STATUS 0x601c
+#define DC_HPD2_INT_STATUS 0x6028
+#define DC_HPD3_INT_STATUS 0x6034
+#define DC_HPD4_INT_STATUS 0x6040
+#define DC_HPD5_INT_STATUS 0x604c
+#define DC_HPD6_INT_STATUS 0x6058
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HPD1_INT_CONTROL 0x6020
+#define DC_HPD2_INT_CONTROL 0x602c
+#define DC_HPD3_INT_CONTROL 0x6038
+#define DC_HPD4_INT_CONTROL 0x6044
+#define DC_HPD5_INT_CONTROL 0x6050
+#define DC_HPD6_INT_CONTROL 0x605c
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+#define DC_HPD1_CONTROL 0x6024
+#define DC_HPD2_CONTROL 0x6030
+#define DC_HPD3_CONTROL 0x603c
+#define DC_HPD4_CONTROL 0x6048
+#define DC_HPD5_CONTROL 0x6054
+#define DC_HPD6_CONTROL 0x6060
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+# define DC_HPDx_EN (1 << 28)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index d7388fdb6d0..cc004b05d63 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -37,6 +37,7 @@
#include "rs100d.h"
#include "rv200d.h"
#include "rv250d.h"
+#include "atom.h"
#include <linux/firmware.h>
#include <linux/platform_device.h>
@@ -67,6 +68,264 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
+void r100_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+ int i;
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.current_power_state_index == 0) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_downclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i >= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index - 1;
+ }
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_power_state_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i <= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index + 1;
+ }
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ /* only one clock mode per power state */
+ rdev->pm.requested_clock_mode_index = 0;
+
+ DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ pcie_lanes);
+}
+
+void r100_pm_init_profile(struct radeon_device *rdev)
+{
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+void r100_pm_misc(struct radeon_device *rdev)
+{
+ int requested_index = rdev->pm.requested_power_state_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+ u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
+
+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp |= voltage->gpio.mask;
+ else
+ tmp &= ~(voltage->gpio.mask);
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ } else {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp &= ~voltage->gpio.mask;
+ else
+ tmp |= voltage->gpio.mask;
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ }
+ }
+
+ sclk_cntl = RREG32_PLL(SCLK_CNTL);
+ sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
+ sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
+ sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+ sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
+ if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
+ else
+ sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
+ else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
+ sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
+ } else
+ sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
+
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+ sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
+ if (voltage->delay) {
+ sclk_more_cntl |= VOLTAGE_DROP_SYNC;
+ switch (voltage->delay) {
+ case 33:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
+ break;
+ case 66:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
+ break;
+ case 99:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
+ break;
+ case 132:
+ sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
+ break;
+ }
+ } else
+ sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
+ } else
+ sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
+
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+ sclk_cntl &= ~FORCE_HDP;
+ else
+ sclk_cntl |= FORCE_HDP;
+
+ WREG32_PLL(SCLK_CNTL, sclk_cntl);
+ WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
+ WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
+
+ /* set pcie lanes */
+ if ((rdev->flags & RADEON_IS_PCIE) &&
+ !(rdev->flags & RADEON_IS_IGP) &&
+ rdev->asic->set_pcie_lanes &&
+ (ps->pcie_lanes !=
+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ radeon_set_pcie_lanes(rdev,
+ ps->pcie_lanes);
+ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+ }
+}
+
+void r100_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ if (radeon_crtc->crtc_id) {
+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+ } else {
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp |= RADEON_CRTC_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+ }
+ }
+ }
+}
+
+void r100_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ if (radeon_crtc->crtc_id) {
+ tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+ tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+ } else {
+ tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+ tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
+ WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+ }
+ }
+ }
+}
+
+bool r100_gui_idle(struct radeon_device *rdev)
+{
+ if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
+ return false;
+ else
+ return true;
+}
+
/* hpd for digital panel detect/disconnect */
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
@@ -254,6 +513,9 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
tmp |= RADEON_SW_INT_ENABLE;
}
+ if (rdev->irq.gui_idle) {
+ tmp |= RADEON_GUI_IDLE_MASK;
+ }
if (rdev->irq.crtc_vblank_int[0]) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
@@ -288,6 +550,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
+ /* the interrupt works, but the status bit is permanently asserted */
+ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+ if (!rdev->irq.gui_idle_acked)
+ irq_mask |= RADEON_GUI_IDLE_STAT;
+ }
+
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
}
@@ -299,6 +567,9 @@ int r100_irq_process(struct radeon_device *rdev)
uint32_t status, msi_rearm;
bool queue_hotplug = false;
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
+
status = r100_irq_ack(rdev);
if (!status) {
return IRQ_NONE;
@@ -311,6 +582,12 @@ int r100_irq_process(struct radeon_device *rdev)
if (status & RADEON_SW_INT_TEST) {
radeon_fence_process(rdev);
}
+ /* gui idle interrupt */
+ if (status & RADEON_GUI_IDLE_STAT) {
+ rdev->irq.gui_idle_acked = true;
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ }
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0);
@@ -332,6 +609,8 @@ int r100_irq_process(struct radeon_device *rdev)
}
status = r100_irq_ack(rdev);
}
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
@@ -663,26 +942,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
if (r100_debugfs_cp_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for CP !\n");
}
- /* Reset CP */
- tmp = RREG32(RADEON_CP_CSQ_STAT);
- if ((tmp & (1 << 31))) {
- DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
- tmp = RREG32(RADEON_RBBM_SOFT_RESET);
- mdelay(2);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- tmp = RREG32(RADEON_RBBM_SOFT_RESET);
- mdelay(2);
- tmp = RREG32(RADEON_CP_CSQ_STAT);
- if ((tmp & (1 << 31))) {
- DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
- }
- } else {
- DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
- }
-
if (!rdev->me_fw) {
r = r100_cp_init_microcode(rdev);
if (r) {
@@ -787,39 +1046,6 @@ void r100_cp_disable(struct radeon_device *rdev)
}
}
-int r100_cp_reset(struct radeon_device *rdev)
-{
- uint32_t tmp;
- bool reinit_cp;
- int i;
-
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 16))) {
- DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
- return -1;
-}
-
void r100_cp_commit(struct radeon_device *rdev)
{
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@@ -1733,76 +1959,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_init(struct radeon_device *rdev)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
- /* TODO: anythings to do here ? pipes ? */
- r100_hdp_reset(rdev);
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+}
+
+/**
+ * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
+ * @rdev: radeon device structure
+ * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
+ * @cp: radeon_cp structure holding CP information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+{
+ unsigned long cjiffies, elapsed;
+
+ cjiffies = jiffies;
+ if (!time_after(cjiffies, lockup->last_jiffies)) {
+ /* likely a wrap around */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ if (cp->rptr != lockup->last_cp_rptr) {
+ /* CP is still working no lockup */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
+ if (elapsed >= 3000) {
+ /* very likely the improbable case where current
+ * rptr is equal to last recorded, a while ago, rptr
+ * this is more likely a false positive update tracking
+ * information which should force us to be recall at
+ * latter point
+ */
+ lockup->last_cp_rptr = cp->rptr;
+ lockup->last_jiffies = jiffies;
+ return false;
+ }
+ if (elapsed >= 1000) {
+ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+ return true;
+ }
+ /* give a chance to the GPU ... */
+ return false;
}
-void r100_hdp_reset(struct radeon_device *rdev)
+bool r100_gpu_is_lockup(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 rbbm_status;
+ int r;
- tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
- tmp |= (7 << 28);
- WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- WREG32(RADEON_HOST_PATH_CNTL, tmp);
- (void)RREG32(RADEON_HOST_PATH_CNTL);
+ rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
}
-int r100_rb2d_reset(struct radeon_device *rdev)
+void r100_bm_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
- int i;
+ u32 tmp;
- WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
+ /* disable bus mastering */
+ tmp = RREG32(R_000030_BUS_CNTL);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
+ mdelay(1);
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+ tmp = RREG32(RADEON_BUS_CNTL);
+ mdelay(1);
+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
mdelay(1);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 26))) {
- DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
- return -1;
}
-int r100_gpu_reset(struct radeon_device *rdev)
+int r100_asic_reset(struct radeon_device *rdev)
{
- uint32_t status;
+ struct r100_mc_save save;
+ u32 status, tmp;
- /* reset order likely matter */
- status = RREG32(RADEON_RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
}
- /* TODO: reset 3D engine */
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
+ S_0000F0_SOFT_RESET_RE(1) |
+ S_0000F0_SOFT_RESET_PP(1) |
+ S_0000F0_SOFT_RESET_RB(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset CP */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
- }
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
/* Check if GPU is idle */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & RADEON_RBBM_ACTIVE) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
+ G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
return -1;
}
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ r100_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
@@ -2002,11 +2315,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
}
void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -2335,53 +2643,53 @@ void r100_bandwidth_update(struct radeon_device *rdev)
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
fixed20_12 memtcas_ff[8] = {
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(0),
- fixed_init_half(1),
- fixed_init_half(2),
- fixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(0),
+ dfixed_init_half(1),
+ dfixed_init_half(2),
+ dfixed_init(0),
};
fixed20_12 memtcas_rs480_ff[8] = {
- fixed_init(0),
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(0),
- fixed_init_half(1),
- fixed_init_half(2),
- fixed_init_half(3),
+ dfixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(0),
+ dfixed_init_half(1),
+ dfixed_init_half(2),
+ dfixed_init_half(3),
};
fixed20_12 memtcas2_ff[8] = {
- fixed_init(0),
- fixed_init(1),
- fixed_init(2),
- fixed_init(3),
- fixed_init(4),
- fixed_init(5),
- fixed_init(6),
- fixed_init(7),
+ dfixed_init(0),
+ dfixed_init(1),
+ dfixed_init(2),
+ dfixed_init(3),
+ dfixed_init(4),
+ dfixed_init(5),
+ dfixed_init(6),
+ dfixed_init(7),
};
fixed20_12 memtrbs[8] = {
- fixed_init(1),
- fixed_init_half(1),
- fixed_init(2),
- fixed_init_half(2),
- fixed_init(3),
- fixed_init_half(3),
- fixed_init(4),
- fixed_init_half(4)
+ dfixed_init(1),
+ dfixed_init_half(1),
+ dfixed_init(2),
+ dfixed_init_half(2),
+ dfixed_init(3),
+ dfixed_init_half(3),
+ dfixed_init(4),
+ dfixed_init_half(4)
};
fixed20_12 memtrbs_r4xx[8] = {
- fixed_init(4),
- fixed_init(5),
- fixed_init(6),
- fixed_init(7),
- fixed_init(8),
- fixed_init(9),
- fixed_init(10),
- fixed_init(11)
+ dfixed_init(4),
+ dfixed_init(5),
+ dfixed_init(6),
+ dfixed_init(7),
+ dfixed_init(8),
+ dfixed_init(9),
+ dfixed_init(10),
+ dfixed_init(11)
};
fixed20_12 min_mem_eff;
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
@@ -2412,7 +2720,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
}
}
- min_mem_eff.full = rfixed_const_8(0);
+ min_mem_eff.full = dfixed_const_8(0);
/* get modes */
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
@@ -2433,28 +2741,28 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mclk_ff = rdev->pm.mclk;
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
- temp_ff.full = rfixed_const(temp);
- mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
+ temp_ff.full = dfixed_const(temp);
+ mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
pix_clk.full = 0;
pix_clk2.full = 0;
peak_disp_bw.full = 0;
if (mode1) {
- temp_ff.full = rfixed_const(1000);
- pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
- pix_clk.full = rfixed_div(pix_clk, temp_ff);
- temp_ff.full = rfixed_const(pixel_bytes1);
- peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const(1000);
+ pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
+ pix_clk.full = dfixed_div(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const(pixel_bytes1);
+ peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
}
if (mode2) {
- temp_ff.full = rfixed_const(1000);
- pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
- pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
- temp_ff.full = rfixed_const(pixel_bytes2);
- peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const(1000);
+ pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
+ pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const(pixel_bytes2);
+ peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
}
- mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
+ mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
if (peak_disp_bw.full >= mem_bw.full) {
DRM_ERROR("You may not have enough display bandwidth for current mode\n"
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
@@ -2496,9 +2804,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mem_tras = ((temp >> 12) & 0xf) + 4;
}
/* convert to FF */
- trcd_ff.full = rfixed_const(mem_trcd);
- trp_ff.full = rfixed_const(mem_trp);
- tras_ff.full = rfixed_const(mem_tras);
+ trcd_ff.full = dfixed_const(mem_trcd);
+ trp_ff.full = dfixed_const(mem_trp);
+ tras_ff.full = dfixed_const(mem_tras);
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
@@ -2516,7 +2824,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/* extra cas latency stored in bits 23-25 0-4 clocks */
data = (temp >> 23) & 0x7;
if (data < 5)
- tcas_ff.full += rfixed_const(data);
+ tcas_ff.full += dfixed_const(data);
}
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
@@ -2553,72 +2861,72 @@ void r100_bandwidth_update(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) {
fixed20_12 agpmode_ff;
- agpmode_ff.full = rfixed_const(radeon_agpmode);
- temp_ff.full = rfixed_const_666(16);
- sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
+ agpmode_ff.full = dfixed_const(radeon_agpmode);
+ temp_ff.full = dfixed_const_666(16);
+ sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
}
/* TODO PCIE lanes may affect this - agpmode == 16?? */
if (ASIC_IS_R300(rdev)) {
- sclk_delay_ff.full = rfixed_const(250);
+ sclk_delay_ff.full = dfixed_const(250);
} else {
if ((rdev->family == CHIP_RV100) ||
rdev->flags & RADEON_IS_IGP) {
if (rdev->mc.vram_is_ddr)
- sclk_delay_ff.full = rfixed_const(41);
+ sclk_delay_ff.full = dfixed_const(41);
else
- sclk_delay_ff.full = rfixed_const(33);
+ sclk_delay_ff.full = dfixed_const(33);
} else {
if (rdev->mc.vram_width == 128)
- sclk_delay_ff.full = rfixed_const(57);
+ sclk_delay_ff.full = dfixed_const(57);
else
- sclk_delay_ff.full = rfixed_const(41);
+ sclk_delay_ff.full = dfixed_const(41);
}
}
- mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
+ mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
if (rdev->mc.vram_is_ddr) {
if (rdev->mc.vram_width == 32) {
- k1.full = rfixed_const(40);
+ k1.full = dfixed_const(40);
c = 3;
} else {
- k1.full = rfixed_const(20);
+ k1.full = dfixed_const(20);
c = 1;
}
} else {
- k1.full = rfixed_const(40);
+ k1.full = dfixed_const(40);
c = 3;
}
- temp_ff.full = rfixed_const(2);
- mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
- temp_ff.full = rfixed_const(c);
- mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
- temp_ff.full = rfixed_const(4);
- mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
- mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
+ temp_ff.full = dfixed_const(2);
+ mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
+ temp_ff.full = dfixed_const(c);
+ mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
+ temp_ff.full = dfixed_const(4);
+ mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
+ mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
mc_latency_mclk.full += k1.full;
- mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
- mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
+ mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
+ mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
/*
HW cursor time assuming worst case of full size colour cursor.
*/
- temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+ temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
temp_ff.full += trcd_ff.full;
if (temp_ff.full < tras_ff.full)
temp_ff.full = tras_ff.full;
- cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
+ cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
- temp_ff.full = rfixed_const(cur_size);
- cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
+ temp_ff.full = dfixed_const(cur_size);
+ cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
/*
Find the total latency for the display data.
*/
- disp_latency_overhead.full = rfixed_const(8);
- disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
+ disp_latency_overhead.full = dfixed_const(8);
+ disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
@@ -2646,16 +2954,16 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the drain rate of the display buffer.
*/
- temp_ff.full = rfixed_const((16/pixel_bytes1));
- disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
+ temp_ff.full = dfixed_const((16/pixel_bytes1));
+ disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
/*
Find the critical point of the display buffer.
*/
- crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
- crit_point_ff.full += rfixed_const_half(0);
+ crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
+ crit_point_ff.full += dfixed_const_half(0);
- critical_point = rfixed_trunc(crit_point_ff);
+ critical_point = dfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point = 0;
@@ -2726,8 +3034,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the drain rate of the display buffer.
*/
- temp_ff.full = rfixed_const((16/pixel_bytes2));
- disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
+ temp_ff.full = dfixed_const((16/pixel_bytes2));
+ disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
@@ -2748,8 +3056,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
critical_point2 = 0;
else {
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
- temp_ff.full = rfixed_const(temp);
- temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
+ temp_ff.full = dfixed_const(temp);
+ temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
if (sclk_ff.full < temp_ff.full)
temp_ff.full = sclk_ff.full;
@@ -2757,15 +3065,15 @@ void r100_bandwidth_update(struct radeon_device *rdev)
if (mode1) {
temp_ff.full = read_return_rate.full - disp_drain_rate.full;
- time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
+ time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
} else {
time_disp1_drop_priority.full = 0;
}
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
- crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
- crit_point_ff.full += rfixed_const_half(0);
+ crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
+ crit_point_ff.full += dfixed_const_half(0);
- critical_point2 = rfixed_trunc(crit_point_ff);
+ critical_point2 = dfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point2 = 0;
@@ -2975,7 +3283,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < track->num_cb; i++) {
if (track->cb[i].robj == NULL) {
- if (!(track->fastfill || track->color_channel_mask ||
+ if (!(track->zb_cb_clear || track->color_channel_mask ||
track->blend_read_enable)) {
continue;
}
@@ -3399,7 +3707,7 @@ static int r100_startup(struct radeon_device *rdev)
/* Resume clock */
r100_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
- r100_gpu_init(rdev);
+// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
@@ -3436,7 +3744,7 @@ int r100_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r100_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -3462,7 +3770,6 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -3505,7 +3812,7 @@ int r100_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -3518,8 +3825,6 @@ int r100_init(struct radeon_device *rdev)
r100_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index fadfe68de9c..f47cdca1c00 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -75,7 +75,7 @@ struct r100_cs_track {
struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
bool z_enabled;
bool separate_cube;
- bool fastfill;
+ bool zb_cb_clear;
bool blend_read_enable;
};
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index df29a630c46..d016b16fa11 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -74,6 +74,134 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define R_000030_BUS_CNTL 0x000030
+#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0)
+#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1)
+#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE
+#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1)
+#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1)
+#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD
+#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2)
+#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1)
+#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB
+#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3)
+#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1)
+#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7
+#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4)
+#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1)
+#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF
+#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5)
+#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1)
+#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF
+#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6)
+#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1)
+#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF
+#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7)
+#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1)
+#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F
+#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8)
+#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1)
+#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF
+#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9)
+#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1)
+#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF
+#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10)
+#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1)
+#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF
+#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11)
+#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1)
+#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF
+#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12)
+#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1)
+#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF
+#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13)
+#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1)
+#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF
+#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14)
+#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1)
+#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF
+#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15)
+#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1)
+#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF
+#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16)
+#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF)
+#define C_000030_BUS_RETRY_WS 0xFFF0FFFF
+#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20)
+#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1)
+#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF
+#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21)
+#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1)
+#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF
+#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22)
+#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1)
+#define C_000030_BUS_SUSPEND 0xFFBFFFFF
+#define S_000030_LAT_16X(x) (((x) & 0x1) << 23)
+#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1)
+#define C_000030_LAT_16X 0xFF7FFFFF
+#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24)
+#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1)
+#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF
+#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25)
+#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1)
+#define C_000030_ENFRCWRDY 0xFDFFFFFF
+#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26)
+#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1)
+#define C_000030_BUS_MSTR_WS 0xFBFFFFFF
+#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27)
+#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1)
+#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF
+#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28)
+#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1)
+#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF
+#define S_000030_SERR_EN(x) (((x) & 0x1) << 29)
+#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1)
+#define C_000030_SERR_EN 0xDFFFFFFF
+#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30)
+#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1)
+#define C_000030_BUS_READ_BURST 0xBFFFFFFF
+#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31)
+#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1)
+#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF
#define R_000040_GEN_INT_CNTL 0x000040
#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0)
#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1)
@@ -710,5 +838,41 @@
#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
#define C_00000D_FORCE_RB 0xEFFFFFFF
+/* PLL regs */
+#define SCLK_CNTL 0xd
+#define FORCE_HDP (1 << 17)
+#define CLK_PWRMGT_CNTL 0x14
+#define GLOBAL_PMAN_EN (1 << 10)
+#define DISP_PM (1 << 20)
+#define PLL_PWRMGT_CNTL 0x15
+#define MPLL_TURNOFF (1 << 0)
+#define SPLL_TURNOFF (1 << 1)
+#define PPLL_TURNOFF (1 << 2)
+#define P2PLL_TURNOFF (1 << 3)
+#define TVPLL_TURNOFF (1 << 4)
+#define MOBILE_SU (1 << 16)
+#define SU_SCLK_USE_BCLK (1 << 17)
+#define SCLK_CNTL2 0x1e
+#define REDUCED_SPEED_SCLK_MODE (1 << 16)
+#define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17)
+#define MCLK_MISC 0x1f
+#define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18)
+#define SCLK_MORE_CNTL 0x35
+#define REDUCED_SPEED_SCLK_EN (1 << 16)
+#define IO_CG_VOLTAGE_DROP (1 << 17)
+#define VOLTAGE_DELAY_SEL(x) ((x) << 20)
+#define VOLTAGE_DROP_SYNC (1 << 19)
+
+/* mmreg */
+#define DISP_PWR_MAN 0xd08
+#define DISP_D3_GRPH_RST (1 << 18)
+#define DISP_D3_SUBPIC_RST (1 << 19)
+#define DISP_D3_OV0_RST (1 << 20)
+#define DISP_D1D2_GRPH_RST (1 << 21)
+#define DISP_D1D2_SUBPIC_RST (1 << 22)
+#define DISP_D1D2_OV0_RST (1 << 23)
+#define DISP_DVO_ENABLE_RST (1 << 24)
+#define TV_ENABLE_RST (1 << 25)
+#define AUTO_PWRUP_EN (1 << 26)
#endif
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index bd75f99bd65..b2f9efe2897 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -27,8 +27,9 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include "drmP.h"
-#include "drm.h"
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
@@ -151,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
u32 tmp;
int r;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+ WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
@@ -323,14 +328,12 @@ void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
- r100_hdp_reset(rdev);
- /* FIXME: rv380 one pipes ? */
if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
- (rdev->family == CHIP_R350)) {
+ (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
/* r300,r350 */
rdev->num_gb_pipes = 2;
} else {
- /* rv350,rv370,rv380,r300 AD */
+ /* rv350,rv370,rv380,r300 AD, r350 AH */
rdev->num_gb_pipes = 1;
}
rdev->num_z_pipes = 1;
@@ -376,89 +379,85 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-int r300_ga_reset(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev)
{
- uint32_t tmp;
- bool reinit_cp;
- int i;
+ u32 rbbm_status;
+ int r;
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- for (i = 0; i < rdev->usec_timeout; i++) {
- WREG32(RADEON_CP_CSQ_MODE, 0);
- WREG32(RADEON_CP_CSQ_CNTL, 0);
- WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
- (void)RREG32(RADEON_RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RADEON_RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (tmp & ((1 << 20) | (1 << 26))) {
- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
- /* GA still busy soft reset it */
- WREG32(0x429C, 0x200);
- WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(R300_RE_SCISSORS_TL, 0);
- WREG32(R300_RE_SCISSORS_BR, 0);
- WREG32(0x24AC, 0);
- }
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- break;
- }
+ rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ return false;
}
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
}
- tmp = RREG32(RADEON_RBBM_STATUS);
- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
- return -1;
+ rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
-int r300_gpu_reset(struct radeon_device *rdev)
+int r300_asic_reset(struct radeon_device *rdev)
{
- uint32_t status;
-
- /* reset order likely matter */
- status = RREG32(RADEON_RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
- }
- /* reset GA */
- if (status & ((1 << 20) | (1 << 26))) {
- r300_ga_reset(rdev);
- }
- /* reset CP */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
+ struct r100_mc_save save;
+ u32 status, tmp;
+
+ r100_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
}
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ /* save PCI state */
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ r100_bm_disable(rdev);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* resetting the CP seems to be problematic sometimes it end up
+ * hard locking the computer, but it's necessary for successfull
+ * reset more test & playing is needed on R3XX/R4XX to find a
+ * reliable (if any solution)
+ */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ r100_enable_bm(rdev);
/* Check if GPU is idle */
- status = RREG32(RADEON_RBBM_STATUS);
- if (status & RADEON_RBBM_ACTIVE) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
return -1;
}
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
+ r100_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
-
/*
* r300,r350,rv350,rv380 VRAM info
*/
@@ -1045,7 +1044,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4d1c:
/* ZB_BW_CNTL */
- track->fastfill = !!(idx_value & (1 << 2));
+ track->zb_cb_clear = !!(idx_value & (1 << 5));
break;
case 0x4e04:
/* RB3D_BLENDCNTL */
@@ -1317,7 +1316,7 @@ int r300_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r300_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -1345,7 +1344,6 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -1388,7 +1386,7 @@ int r300_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -1401,8 +1399,6 @@ int r300_init(struct radeon_device *rdev)
r300_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index ea46d558e8f..c5c2742e414 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -921,7 +921,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
sizeof(stack_ptr_addr), &stack_ptr_addr);
- ref_age_base = (u32 *)(unsigned long)*ptr_addr;
+ ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
for (i=0; i < header.scratch.n_bufs; i++) {
buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 4c73114f0de..968a33317fb 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -209,7 +209,52 @@
#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
-
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_00000D_SCLK_CNTL 0x00000D
#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 3dc968c9f5a..4415a5ee587 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -36,6 +36,35 @@
#include "r420d.h"
#include "r420_reg_safe.h"
+void r420_pm_init_profile(struct radeon_device *rdev)
+{
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
static void r420_set_reg_safe(struct radeon_device *rdev)
{
rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
@@ -59,6 +88,12 @@ void r420_pipes_init(struct radeon_device *rdev)
/* get max number of pipes */
gb_pipe_select = RREG32(0x402C);
num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
+
+ /* SE chips have 1 pipe */
+ if ((rdev->pdev->device == 0x5e4c) ||
+ (rdev->pdev->device == 0x5e4f))
+ num_pipes = 1;
+
rdev->num_gb_pipes = num_pipes;
tmp = 0;
switch (num_pipes) {
@@ -235,7 +270,7 @@ int r420_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -268,7 +303,6 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -316,7 +350,7 @@ int r420_init(struct radeon_device *rdev)
}
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -328,8 +362,6 @@ int r420_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 0cf2ad2a558..93c9a2bbccf 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -347,9 +347,11 @@
#define AVIVO_D1CRTC_CONTROL 0x6080
# define AVIVO_CRTC_EN (1 << 0)
+# define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
+#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
@@ -488,6 +490,7 @@
#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
+#define AVIVO_D2CRTC_STATUS_POSITION 0x68a0
#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4
#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c44b8d3931..34330df2848 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -53,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
- r100_hdp_reset(rdev);
rv515_vga_render_disable(rdev);
/*
* DST_PIPE_CONFIG 0x170C
@@ -209,7 +208,7 @@ int r520_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -246,7 +245,7 @@ int r520_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -262,8 +261,6 @@ int r520_init(struct radeon_device *rdev)
}
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 8f3454e2056..44e96a2ae25 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -44,6 +44,9 @@
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
#define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -68,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -75,6 +90,401 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
+void r600_irq_disable(struct radeon_device *rdev);
+
+void r600_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+ int i;
+
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+
+ /* power state array is low to high, default is first */
+ if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
+ int min_power_state_index = 0;
+
+ if (rdev->pm.num_power_states > 2)
+ min_power_state_index = 1;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state_index = min_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.current_power_state_index == min_power_state_index) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_downclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i >= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index - 1;
+ }
+ rdev->pm.requested_clock_mode_index = 0;
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_power_state_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+ rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else {
+ if (rdev->pm.active_crtc_count > 1) {
+ for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if (i <= rdev->pm.current_power_state_index) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index;
+ break;
+ } else {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ } else
+ rdev->pm.requested_power_state_index =
+ rdev->pm.current_power_state_index + 1;
+ }
+ rdev->pm.requested_clock_mode_index = 0;
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ } else {
+ /* XXX select a power state based on AC/DC, single/dualhead, etc. */
+ /* for now just select the first power state and switch between clock modes */
+ /* power state array is low to high, default is first (0) */
+ if (rdev->pm.active_crtc_count > 1) {
+ rdev->pm.requested_power_state_index = -1;
+ /* start at 1 as we don't want the default mode */
+ for (i = 1; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+ continue;
+ else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
+ (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
+ rdev->pm.requested_power_state_index = i;
+ break;
+ }
+ }
+ /* if nothing selected, grab the default state. */
+ if (rdev->pm.requested_power_state_index == -1)
+ rdev->pm.requested_power_state_index = 0;
+ } else
+ rdev->pm.requested_power_state_index = 1;
+
+ switch (rdev->pm.dynpm_planned_action) {
+ case DYNPM_ACTION_MINIMUM:
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ break;
+ case DYNPM_ACTION_DOWNCLOCK:
+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+ if (rdev->pm.current_clock_mode_index == 0) {
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ } else
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.current_clock_mode_index - 1;
+ } else {
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_downclock = false;
+ }
+ /* don't use the power state if crtcs are active and no display flag is set */
+ if ((rdev->pm.active_crtc_count > 0) &&
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].flags &
+ RADEON_PM_MODE_NO_DISPLAY)) {
+ rdev->pm.requested_clock_mode_index++;
+ }
+ break;
+ case DYNPM_ACTION_UPCLOCK:
+ if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+ if (rdev->pm.current_clock_mode_index ==
+ (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
+ rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
+ rdev->pm.dynpm_can_upclock = false;
+ } else
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.current_clock_mode_index + 1;
+ } else {
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
+ rdev->pm.dynpm_can_upclock = false;
+ }
+ break;
+ case DYNPM_ACTION_DEFAULT:
+ rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.requested_clock_mode_index = 0;
+ rdev->pm.dynpm_can_upclock = false;
+ break;
+ case DYNPM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ }
+
+ DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk,
+ rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ pcie_lanes);
+}
+
+static int r600_pm_get_type_index(struct radeon_device *rdev,
+ enum radeon_pm_state_type ps_type,
+ int instance)
+{
+ int i;
+ int found_instance = -1;
+
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.power_state[i].type == ps_type) {
+ found_instance++;
+ if (found_instance == instance)
+ return i;
+ }
+ }
+ /* return default if no match */
+ return rdev->pm.default_power_state_index;
+}
+
+void rs780_pm_init_profile(struct radeon_device *rdev)
+{
+ if (rdev->pm.num_power_states == 2) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else if (rdev->pm.num_power_states == 3) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ }
+}
+
+void r600_pm_init_profile(struct radeon_device *rdev)
+{
+ if (rdev->family == CHIP_R600) {
+ /* XXX */
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+ } else {
+ if (rdev->pm.num_power_states < 4) {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* low sh */
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+ } else {
+ /* default */
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ /* low sh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
+ }
+ /* high sh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ /* low mh */
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
+ } else {
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
+ }
+ /* high mh */
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
+ r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+ }
+ }
+}
+
+void r600_pm_misc(struct radeon_device *rdev)
+{
+
+}
+
+bool r600_gui_idle(struct radeon_device *rdev)
+{
+ if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
+ return false;
+ else
+ return true;
+}
/* hpd for digital panel detect/disconnect */
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
@@ -714,11 +1124,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
if (rdev->flags & RADEON_IS_IGP)
@@ -750,7 +1155,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
- u32 srbm_reset = 0;
u32 tmp;
dev_info(rdev->dev, "GPU softreset \n");
@@ -765,7 +1169,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -784,72 +1188,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008020_SOFT_RESET_VGT(1);
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- udelay(50);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
}
/* Reset CP (we always reset CP) */
tmp = S_008020_SOFT_RESET_CP(1);
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- udelay(50);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
- (void)RREG32(R_008020_GRBM_SOFT_RESET);
- /* Reset others GPU block if necessary */
- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
- if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
- if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_IH(1);
- if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
- if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_MC(1);
- if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
- if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
- if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
- srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
- dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- udelay(50);
- WREG32(R_000E60_SRBM_SOFT_RESET, 0);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
- udelay(50);
- WREG32(R_000E60_SRBM_SOFT_RESET, 0);
- (void)RREG32(R_000E60_SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
- udelay(50);
+ mdelay(1);
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
- /* After reset we need to reinit the asic as GPU often endup in an
- * incoherent state.
- */
- atom_asic_init(rdev->mode_info.atom_context);
rv515_mc_resume(rdev, &save);
return 0;
}
-int r600_gpu_reset(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev)
+{
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status2;
+ int r;
+
+ srbm_status = RREG32(R_000E50_SRBM_STATUS);
+ grbm_status = RREG32(R_008010_GRBM_STATUS);
+ grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
+ if (!G_008010_GUI_ACTIVE(grbm_status)) {
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
{
return r600_gpu_soft_reset(rdev);
}
@@ -1467,10 +1855,31 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "RV710";
rlc_chip_name = "R700";
break;
+ case CHIP_CEDAR:
+ chip_name = "CEDAR";
+ rlc_chip_name = "CEDAR";
+ break;
+ case CHIP_REDWOOD:
+ chip_name = "REDWOOD";
+ rlc_chip_name = "REDWOOD";
+ break;
+ case CHIP_JUNIPER:
+ chip_name = "JUNIPER";
+ rlc_chip_name = "JUNIPER";
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_name = "CYPRESS";
+ rlc_chip_name = "CYPRESS";
+ break;
default: BUG();
}
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ } else if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@@ -1584,12 +1993,15 @@ int r600_cp_start(struct radeon_device *rdev)
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
- if (rdev->family < CHIP_RV770) {
- radeon_ring_write(rdev, 0x3);
- radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
- } else {
+ if (rdev->family >= CHIP_CEDAR) {
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+ } else if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+ } else {
+ radeon_ring_write(rdev, 0x3);
+ radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
}
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
@@ -2051,8 +2463,6 @@ int r600_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -2117,7 +2527,6 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
@@ -2290,10 +2699,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev)
}
}
-static void r600_rlc_stop(struct radeon_device *rdev)
+void r600_rlc_stop(struct radeon_device *rdev)
{
- if (rdev->family >= CHIP_RV770) {
+ if ((rdev->family >= CHIP_RV770) &&
+ (rdev->family <= CHIP_RV740)) {
/* r7xx asics need to soft reset RLC before halting */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
RREG32(SRBM_SOFT_RESET);
@@ -2330,7 +2740,12 @@ static int r600_rlc_init(struct radeon_device *rdev)
WREG32(RLC_UCODE_CNTL, 0);
fw_data = (const __be32 *)rdev->rlc_fw->data;
- if (rdev->family >= CHIP_RV770) {
+ if (rdev->family >= CHIP_CEDAR) {
+ for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_RV770) {
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@@ -2360,7 +2775,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev)
rdev->ih.enabled = true;
}
-static void r600_disable_interrupts(struct radeon_device *rdev)
+void r600_disable_interrupts(struct radeon_device *rdev)
{
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
u32 ih_cntl = RREG32(IH_CNTL);
@@ -2475,7 +2890,10 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(IH_CNTL, ih_cntl);
/* force the active interrupt state to all disabled */
- r600_disable_interrupt_state(rdev);
+ if (rdev->family >= CHIP_CEDAR)
+ evergreen_disable_interrupt_state(rdev);
+ else
+ r600_disable_interrupt_state(rdev);
/* enable irqs */
r600_enable_interrupts(rdev);
@@ -2485,7 +2903,7 @@ int r600_irq_init(struct radeon_device *rdev)
void r600_irq_suspend(struct radeon_device *rdev)
{
- r600_disable_interrupts(rdev);
+ r600_irq_disable(rdev);
r600_rlc_stop(rdev);
}
@@ -2500,6 +2918,8 @@ int r600_irq_set(struct radeon_device *rdev)
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ u32 grbm_int_cntl = 0;
+ u32 hdmi1, hdmi2;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
@@ -2513,7 +2933,9 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
+ hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
if (ASIC_IS_DCE3(rdev)) {
+ hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2523,6 +2945,7 @@ int r600_irq_set(struct radeon_device *rdev)
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
}
} else {
+ hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2564,10 +2987,25 @@ int r600_irq_set(struct radeon_device *rdev)
DRM_DEBUG("r600_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
+ if (rdev->irq.hdmi[0]) {
+ DRM_DEBUG("r600_irq_set: hdmi 1\n");
+ hdmi1 |= R600_HDMI_INT_EN;
+ }
+ if (rdev->irq.hdmi[1]) {
+ DRM_DEBUG("r600_irq_set: hdmi 2\n");
+ hdmi2 |= R600_HDMI_INT_EN;
+ }
+ if (rdev->irq.gui_idle) {
+ DRM_DEBUG("gui idle\n");
+ grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+ }
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
+ WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+ WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
+ WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -2577,6 +3015,7 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD6_INT_CONTROL, hpd6);
}
} else {
+ WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
@@ -2660,6 +3099,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
}
+ if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ if (ASIC_IS_DCE3(rdev)) {
+ if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ } else {
+ if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
+ WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ }
+ }
}
void r600_irq_disable(struct radeon_device *rdev)
@@ -2713,6 +3164,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
* 19 1 FP Hot plug detection B
* 19 2 DAC A auto-detection
* 19 3 DAC B auto-detection
+ * 21 4 HDMI block A
+ * 21 5 HDMI block B
* 176 - CP_INT RB
* 177 - CP_INT IB1
* 178 - CP_INT IB2
@@ -2852,6 +3305,10 @@ restart_ih:
break;
}
break;
+ case 21: /* HDMI */
+ DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
+ r600_audio_schedule_polling(rdev);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -2861,6 +3318,11 @@ restart_ih:
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: CP EOP\n");
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 1d898051c63..2b26553c352 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -44,7 +44,7 @@ static int r600_audio_chipset_supported(struct radeon_device *rdev)
/*
* current number of channels
*/
-static int r600_audio_channels(struct radeon_device *rdev)
+int r600_audio_channels(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
}
@@ -52,7 +52,7 @@ static int r600_audio_channels(struct radeon_device *rdev)
/*
* current bits per sample
*/
-static int r600_audio_bits_per_sample(struct radeon_device *rdev)
+int r600_audio_bits_per_sample(struct radeon_device *rdev)
{
uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
switch (value) {
@@ -71,7 +71,7 @@ static int r600_audio_bits_per_sample(struct radeon_device *rdev)
/*
* current sampling rate in HZ
*/
-static int r600_audio_rate(struct radeon_device *rdev)
+int r600_audio_rate(struct radeon_device *rdev)
{
uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
uint32_t result;
@@ -90,7 +90,7 @@ static int r600_audio_rate(struct radeon_device *rdev)
/*
* iec 60958 status bits
*/
-static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
+uint8_t r600_audio_status_bits(struct radeon_device *rdev)
{
return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
}
@@ -98,12 +98,21 @@ static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
/*
* iec 60958 category code
*/
-static uint8_t r600_audio_category_code(struct radeon_device *rdev)
+uint8_t r600_audio_category_code(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
}
/*
+ * schedule next audio update event
+ */
+void r600_audio_schedule_polling(struct radeon_device *rdev)
+{
+ mod_timer(&rdev->audio_timer,
+ jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+}
+
+/*
* update all hdmi interfaces with current audio parameters
*/
static void r600_audio_update_hdmi(unsigned long param)
@@ -118,7 +127,7 @@ static void r600_audio_update_hdmi(unsigned long param)
uint8_t category_code = r600_audio_category_code(rdev);
struct drm_encoder *encoder;
- int changes = 0;
+ int changes = 0, still_going = 0;
changes |= channels != rdev->audio_channels;
changes |= rate != rdev->audio_rate;
@@ -135,15 +144,13 @@ static void r600_audio_update_hdmi(unsigned long param)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ still_going |= radeon_encoder->audio_polling_active;
if (changes || r600_hdmi_buffer_status_changed(encoder))
- r600_hdmi_update_audio_settings(
- encoder, channels,
- rate, bps, status_bits,
- category_code);
+ r600_hdmi_update_audio_settings(encoder);
}
- mod_timer(&rdev->audio_timer,
- jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+ if(still_going) r600_audio_schedule_polling(rdev);
}
/*
@@ -176,9 +183,34 @@ int r600_audio_init(struct radeon_device *rdev)
r600_audio_update_hdmi,
(unsigned long)rdev);
+ return 0;
+}
+
+/*
+ * enable the polling timer, to check for status changes
+ */
+void r600_audio_enable_polling(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active);
+ if (radeon_encoder->audio_polling_active)
+ return;
+
+ radeon_encoder->audio_polling_active = 1;
mod_timer(&rdev->audio_timer, jiffies + 1);
+}
- return 0;
+/*
+ * disable the polling timer, so we get no more status updates
+ */
+void r600_audio_disable_polling(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active);
+ radeon_encoder->audio_polling_active = 0;
}
/*
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index f6c6c77db7e..d13622ae74e 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -447,6 +447,9 @@ int r600_blit_init(struct radeon_device *rdev)
u32 packet2s[16];
int num_packet2s = 0;
+ /* don't reinitialize blit */
+ if (rdev->r600_blit.shader_obj)
+ return 0;
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 2616b822ba6..26b4bc9d89a 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -290,17 +290,15 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
if (!offset)
return;
- if (r600_hdmi_is_audio_buffer_filled(encoder)) {
- /* disable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
+ if (!radeon_encoder->hdmi_audio_workaround ||
+ r600_hdmi_is_audio_buffer_filled(encoder)) {
- } else if (radeon_encoder->hdmi_audio_workaround) {
- /* enable audio workaround and start delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
+ /* disable audio workaround */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
} else {
- /* disable audio workaround and stop delivering of audio frames */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
+ /* enable audio workaround */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
}
}
@@ -345,25 +343,23 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
/* audio packets per line, does anyone know how to calc this ? */
WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
-
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
}
/*
* update settings with current parameters from audio engine
*/
-void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code)
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ int channels = r600_audio_channels(rdev);
+ int rate = r600_audio_rate(rdev);
+ int bps = r600_audio_bits_per_sample(rdev);
+ uint8_t status_bits = r600_audio_status_bits(rdev);
+ uint8_t category_code = r600_audio_category_code(rdev);
+
uint32_t iec;
if (!offset)
@@ -415,9 +411,6 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
r600_hdmi_audio_workaround(encoder);
-
- /* update? reset? don't realy know */
- WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
}
static int r600_hdmi_find_free_block(struct drm_device *dev)
@@ -486,6 +479,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset;
if (ASIC_IS_DCE4(rdev))
return;
@@ -499,10 +493,10 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
}
+ offset = radeon_encoder->hdmi_offset;
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- int offset = radeon_encoder->hdmi_offset;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
@@ -518,6 +512,21 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
}
+ if (rdev->irq.installed
+ && rdev->family != CHIP_RS600
+ && rdev->family != CHIP_RS690
+ && rdev->family != CHIP_RS740) {
+
+ /* if irq is available use it */
+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
+ radeon_irq_set(rdev);
+
+ r600_audio_disable_polling(encoder);
+ } else {
+ /* if not fallback to polling */
+ r600_audio_enable_polling(encoder);
+ }
+
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
}
@@ -530,22 +539,30 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset;
if (ASIC_IS_DCE4(rdev))
return;
- if (!radeon_encoder->hdmi_offset) {
+ offset = radeon_encoder->hdmi_offset;
+ if (!offset) {
dev_err(rdev->dev, "Disabling not enabled HDMI\n");
return;
}
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
+
+ /* disable irq */
+ rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
+ radeon_irq_set(rdev);
+
+ /* disable polling */
+ r600_audio_disable_polling(encoder);
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
- int offset = radeon_encoder->hdmi_offset;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 7b1d22370f6..d84612ae47e 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -157,33 +157,36 @@
#define R600_HDMI_BLOCK3 0x7800
/* HDMI registers */
-#define R600_HDMI_ENABLE 0x00
-#define R600_HDMI_STATUS 0x04
-#define R600_HDMI_CNTL 0x08
-#define R600_HDMI_UNKNOWN_0 0x0C
-#define R600_HDMI_AUDIOCNTL 0x10
-#define R600_HDMI_VIDEOCNTL 0x14
-#define R600_HDMI_VERSION 0x18
-#define R600_HDMI_UNKNOWN_1 0x28
-#define R600_HDMI_VIDEOINFOFRAME_0 0x54
-#define R600_HDMI_VIDEOINFOFRAME_1 0x58
-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
-#define R600_HDMI_VIDEOINFOFRAME_3 0x60
-#define R600_HDMI_32kHz_CTS 0xac
-#define R600_HDMI_32kHz_N 0xb0
-#define R600_HDMI_44_1kHz_CTS 0xb4
-#define R600_HDMI_44_1kHz_N 0xb8
-#define R600_HDMI_48kHz_CTS 0xbc
-#define R600_HDMI_48kHz_N 0xc0
-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
-#define R600_HDMI_IEC60958_1 0xd4
-#define R600_HDMI_IEC60958_2 0xd8
-#define R600_HDMI_UNKNOWN_2 0xdc
-#define R600_HDMI_AUDIO_DEBUG_0 0xe0
-#define R600_HDMI_AUDIO_DEBUG_1 0xe4
-#define R600_HDMI_AUDIO_DEBUG_2 0xe8
-#define R600_HDMI_AUDIO_DEBUG_3 0xec
+#define R600_HDMI_ENABLE 0x00
+#define R600_HDMI_STATUS 0x04
+# define R600_HDMI_INT_PENDING (1 << 29)
+#define R600_HDMI_CNTL 0x08
+# define R600_HDMI_INT_EN (1 << 28)
+# define R600_HDMI_INT_ACK (1 << 29)
+#define R600_HDMI_UNKNOWN_0 0x0C
+#define R600_HDMI_AUDIOCNTL 0x10
+#define R600_HDMI_VIDEOCNTL 0x14
+#define R600_HDMI_VERSION 0x18
+#define R600_HDMI_UNKNOWN_1 0x28
+#define R600_HDMI_VIDEOINFOFRAME_0 0x54
+#define R600_HDMI_VIDEOINFOFRAME_1 0x58
+#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
+#define R600_HDMI_VIDEOINFOFRAME_3 0x60
+#define R600_HDMI_32kHz_CTS 0xac
+#define R600_HDMI_32kHz_N 0xb0
+#define R600_HDMI_44_1kHz_CTS 0xb4
+#define R600_HDMI_44_1kHz_N 0xb8
+#define R600_HDMI_48kHz_CTS 0xbc
+#define R600_HDMI_48kHz_N 0xc0
+#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
+#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
+#define R600_HDMI_IEC60958_1 0xd4
+#define R600_HDMI_IEC60958_2 0xd8
+#define R600_HDMI_UNKNOWN_2 0xdc
+#define R600_HDMI_AUDIO_DEBUG_0 0xe0
+#define R600_HDMI_AUDIO_DEBUG_1 0xe4
+#define R600_HDMI_AUDIO_DEBUG_2 0xe8
+#define R600_HDMI_AUDIO_DEBUG_3 0xec
/* HDMI additional config base register addresses */
#define R600_HDMI_CONFIG1 0x7600
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 034218c3dbb..66a37fb7583 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,7 +89,6 @@ extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
extern int radeon_new_pll;
-extern int radeon_dynpm;
extern int radeon_audio;
extern int radeon_disp_priority;
extern int radeon_hw_i2c;
@@ -99,6 +98,7 @@ extern int radeon_hw_i2c;
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
@@ -172,6 +172,8 @@ struct radeon_clock {
int radeon_pm_init(struct radeon_device *rdev);
void radeon_pm_fini(struct radeon_device *rdev);
void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_pm_suspend(struct radeon_device *rdev);
+void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
@@ -182,7 +184,8 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
atomic_t seq;
uint32_t last_seq;
- unsigned long count_timeout;
+ unsigned long last_jiffies;
+ unsigned long last_timeout;
wait_queue_head_t queue;
rwlock_t lock;
struct list_head created;
@@ -197,7 +200,6 @@ struct radeon_fence {
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
- unsigned long timeout;
bool emited;
bool signaled;
};
@@ -259,6 +261,7 @@ struct radeon_bo_list {
unsigned rdomain;
unsigned wdomain;
u32 tiling_flags;
+ bool reserved;
};
/*
@@ -371,10 +374,15 @@ struct radeon_irq {
bool installed;
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
- bool crtc_vblank_int[2];
+ bool crtc_vblank_int[6];
wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
+ bool gui_idle;
+ bool gui_idle_acked;
+ wait_queue_head_t idle_queue;
+ /* FIXME: use defines for max HDMI blocks */
+ bool hdmi[2];
spinlock_t sw_lock;
int sw_refcount;
};
@@ -462,7 +470,9 @@ int radeon_ib_test(struct radeon_device *rdev);
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
void radeon_ring_free_size(struct radeon_device *rdev);
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev);
void radeon_ring_unlock_commit(struct radeon_device *rdev);
void radeon_ring_unlock_undo(struct radeon_device *rdev);
int radeon_ring_test(struct radeon_device *rdev);
@@ -597,17 +607,24 @@ struct radeon_wb {
* Equation between gpu/memory clock and available bandwidth is hw dependent
* (type of memory, bus size, efficiency, ...)
*/
-enum radeon_pm_state {
- PM_STATE_DISABLED,
- PM_STATE_MINIMUM,
- PM_STATE_PAUSED,
- PM_STATE_ACTIVE
+
+enum radeon_pm_method {
+ PM_METHOD_PROFILE,
+ PM_METHOD_DYNPM,
+};
+
+enum radeon_dynpm_state {
+ DYNPM_STATE_DISABLED,
+ DYNPM_STATE_MINIMUM,
+ DYNPM_STATE_PAUSED,
+ DYNPM_STATE_ACTIVE
};
-enum radeon_pm_action {
- PM_ACTION_NONE,
- PM_ACTION_MINIMUM,
- PM_ACTION_DOWNCLOCK,
- PM_ACTION_UPCLOCK
+enum radeon_dynpm_action {
+ DYNPM_ACTION_NONE,
+ DYNPM_ACTION_MINIMUM,
+ DYNPM_ACTION_DOWNCLOCK,
+ DYNPM_ACTION_UPCLOCK,
+ DYNPM_ACTION_DEFAULT
};
enum radeon_voltage_type {
@@ -625,11 +642,25 @@ enum radeon_pm_state_type {
POWER_STATE_TYPE_PERFORMANCE,
};
-enum radeon_pm_clock_mode_type {
- POWER_MODE_TYPE_DEFAULT,
- POWER_MODE_TYPE_LOW,
- POWER_MODE_TYPE_MID,
- POWER_MODE_TYPE_HIGH,
+enum radeon_pm_profile_type {
+ PM_PROFILE_DEFAULT,
+ PM_PROFILE_AUTO,
+ PM_PROFILE_LOW,
+ PM_PROFILE_HIGH,
+};
+
+#define PM_PROFILE_DEFAULT_IDX 0
+#define PM_PROFILE_LOW_SH_IDX 1
+#define PM_PROFILE_HIGH_SH_IDX 2
+#define PM_PROFILE_LOW_MH_IDX 3
+#define PM_PROFILE_HIGH_MH_IDX 4
+#define PM_PROFILE_MAX 5
+
+struct radeon_pm_profile {
+ int dpms_off_ps_idx;
+ int dpms_on_ps_idx;
+ int dpms_off_cm_idx;
+ int dpms_on_cm_idx;
};
struct radeon_voltage {
@@ -646,12 +677,8 @@ struct radeon_voltage {
u32 voltage;
};
-struct radeon_pm_non_clock_info {
- /* pcie lanes */
- int pcie_lanes;
- /* standardized non-clock flags */
- u32 flags;
-};
+/* clock mode flags */
+#define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
struct radeon_pm_clock_info {
/* memory clock */
@@ -660,10 +687,13 @@ struct radeon_pm_clock_info {
u32 sclk;
/* voltage info */
struct radeon_voltage voltage;
- /* standardized clock flags - not sure we'll need these */
+ /* standardized clock flags */
u32 flags;
};
+/* state flags */
+#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
+
struct radeon_power_state {
enum radeon_pm_state_type type;
/* XXX: use a define for num clock modes */
@@ -671,9 +701,11 @@ struct radeon_power_state {
/* number of valid clock modes in this power state */
int num_clock_modes;
struct radeon_pm_clock_info *default_clock_mode;
- /* non clock info about this state */
- struct radeon_pm_non_clock_info non_clock_info;
- bool voltage_drop_active;
+ /* standardized state flags */
+ u32 flags;
+ u32 misc; /* vbios specific flags */
+ u32 misc2; /* vbios specific flags */
+ int pcie_lanes; /* pcie lanes */
};
/*
@@ -683,14 +715,11 @@ struct radeon_power_state {
struct radeon_pm {
struct mutex mutex;
- struct delayed_work idle_work;
- enum radeon_pm_state state;
- enum radeon_pm_action planned_action;
- unsigned long action_timeout;
- bool downclocked;
- int active_crtcs;
+ u32 active_crtcs;
+ int active_crtc_count;
int req_vblank;
bool vblank_sync;
+ bool gui_idle;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
@@ -707,12 +736,27 @@ struct radeon_pm {
struct radeon_power_state power_state[8];
/* number of valid power states */
int num_power_states;
- struct radeon_power_state *current_power_state;
- struct radeon_pm_clock_info *current_clock_mode;
- struct radeon_power_state *requested_power_state;
- struct radeon_pm_clock_info *requested_clock_mode;
- struct radeon_power_state *default_power_state;
+ int current_power_state_index;
+ int current_clock_mode_index;
+ int requested_power_state_index;
+ int requested_clock_mode_index;
+ int default_power_state_index;
+ u32 current_sclk;
+ u32 current_mclk;
struct radeon_i2c_chan *i2c_bus;
+ /* selected pm method */
+ enum radeon_pm_method pm_method;
+ /* dynpm power management */
+ struct delayed_work dynpm_idle_work;
+ enum radeon_dynpm_state dynpm_state;
+ enum radeon_dynpm_action dynpm_planned_action;
+ unsigned long dynpm_action_timeout;
+ bool dynpm_can_upclock;
+ bool dynpm_can_downclock;
+ /* profile-based power management */
+ enum radeon_pm_profile_type profile;
+ int profile_index;
+ struct radeon_pm_profile profiles[PM_PROFILE_MAX];
};
@@ -746,7 +790,8 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- int (*gpu_reset)(struct radeon_device *rdev);
+ bool (*gpu_is_lockup)(struct radeon_device *rdev);
+ int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -799,44 +844,84 @@ struct radeon_asic {
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+ bool (*gui_idle)(struct radeon_device *rdev);
+ /* power management */
+ void (*pm_misc)(struct radeon_device *rdev);
+ void (*pm_prepare)(struct radeon_device *rdev);
+ void (*pm_finish)(struct radeon_device *rdev);
+ void (*pm_init_profile)(struct radeon_device *rdev);
+ void (*pm_get_dynpm_state)(struct radeon_device *rdev);
};
/*
* Asic structures
*/
+struct r100_gpu_lockup {
+ unsigned long last_jiffies;
+ u32 last_cp_rptr;
+};
+
struct r100_asic {
- const unsigned *reg_safe_bm;
- unsigned reg_safe_bm_size;
- u32 hdp_cntl;
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 hdp_cntl;
+ struct r100_gpu_lockup lockup;
};
struct r300_asic {
- const unsigned *reg_safe_bm;
- unsigned reg_safe_bm_size;
- u32 resync_scratch;
- u32 hdp_cntl;
+ const unsigned *reg_safe_bm;
+ unsigned reg_safe_bm_size;
+ u32 resync_scratch;
+ u32 hdp_cntl;
+ struct r100_gpu_lockup lockup;
};
struct r600_asic {
- unsigned max_pipes;
- unsigned max_tile_pipes;
- unsigned max_simds;
- unsigned max_backends;
- unsigned max_gprs;
- unsigned max_threads;
- unsigned max_stack_entries;
- unsigned max_hw_contexts;
- unsigned max_gs_threads;
- unsigned sx_max_export_size;
- unsigned sx_max_export_pos_size;
- unsigned sx_max_export_smx_size;
- unsigned sq_num_cf_insts;
- unsigned tiling_nbanks;
- unsigned tiling_npipes;
- unsigned tiling_group_size;
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ struct r100_gpu_lockup lockup;
};
struct rv770_asic {
+ unsigned max_pipes;
+ unsigned max_tile_pipes;
+ unsigned max_simds;
+ unsigned max_backends;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_stack_entries;
+ unsigned max_hw_contexts;
+ unsigned max_gs_threads;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned sq_num_cf_insts;
+ unsigned sx_num_of_sets;
+ unsigned sc_prim_fifo_size;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_fize;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
+ struct r100_gpu_lockup lockup;
+};
+
+struct evergreen_asic {
+ unsigned num_ses;
unsigned max_pipes;
unsigned max_tile_pipes;
unsigned max_simds;
@@ -853,7 +938,7 @@ struct rv770_asic {
unsigned sx_num_of_sets;
unsigned sc_prim_fifo_size;
unsigned sc_hiz_tile_fifo_size;
- unsigned sc_earlyz_tile_fifo_fize;
+ unsigned sc_earlyz_tile_fifo_size;
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
@@ -864,6 +949,7 @@ union radeon_asic_config {
struct r100_asic r100;
struct r600_asic r600;
struct rv770_asic rv770;
+ struct evergreen_asic evergreen;
};
/*
@@ -927,9 +1013,6 @@ struct radeon_device {
bool is_atom_bios;
uint16_t bios_header_start;
struct radeon_bo *stollen_vga_memory;
- struct fb_info *fbdev_info;
- struct radeon_bo *fbdev_rbo;
- struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
@@ -974,6 +1057,7 @@ struct radeon_device {
struct work_struct hotplug_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+ struct mutex vram_mutex;
/* audio stuff */
struct timer_list audio_timer;
@@ -984,6 +1068,7 @@ struct radeon_device {
uint8_t audio_category_code;
bool powered_down;
+ struct notifier_block acpi_nb;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1145,7 +1230,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
+#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
@@ -1173,9 +1259,16 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
+#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
/* Common functions */
/* AGP */
+extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern void radeon_gart_restore(struct radeon_device *rdev);
@@ -1200,6 +1293,8 @@ extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
+extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
+extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1260,6 +1355,7 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
+extern int r600_cp_start(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
extern void r600_cp_fini(struct radeon_device *rdev);
@@ -1276,29 +1372,39 @@ extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
extern int r600_init_microcode(struct radeon_device *rdev);
-extern int r600_gpu_reset(struct radeon_device *rdev);
+extern int r600_asic_reset(struct radeon_device *rdev);
/* r600 irq */
extern int r600_irq_init(struct radeon_device *rdev);
extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev);
extern void r600_irq_suspend(struct radeon_device *rdev);
+extern void r600_disable_interrupts(struct radeon_device *rdev);
+extern void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
extern int r600_audio_init(struct radeon_device *rdev);
extern int r600_audio_tmds_index(struct drm_encoder *encoder);
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+extern int r600_audio_channels(struct radeon_device *rdev);
+extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
+extern int r600_audio_rate(struct radeon_device *rdev);
+extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
+extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
+extern void r600_audio_schedule_polling(struct radeon_device *rdev);
+extern void r600_audio_enable_polling(struct drm_encoder *encoder);
+extern void r600_audio_disable_polling(struct drm_encoder *encoder);
extern void r600_audio_fini(struct radeon_device *rdev);
extern void r600_hdmi_init(struct drm_encoder *encoder);
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
- int channels,
- int rate,
- int bps,
- uint8_t status_bits,
- uint8_t category_code);
+extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+
+extern void r700_cp_stop(struct radeon_device *rdev);
+extern void r700_cp_fini(struct radeon_device *rdev);
+extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+extern int evergreen_irq_set(struct radeon_device *rdev);
/* evergreen */
struct evergreen_mc_save {
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index c4457791dff..28e473f1f56 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -134,12 +134,10 @@ int radeon_agp_init(struct radeon_device *rdev)
int ret;
/* Acquire AGP. */
- if (!rdev->ddev->agp->acquired) {
- ret = drm_agp_acquire(rdev->ddev);
- if (ret) {
- DRM_ERROR("Unable to acquire AGP: %d\n", ret);
- return ret;
- }
+ ret = drm_agp_acquire(rdev->ddev);
+ if (ret) {
+ DRM_ERROR("Unable to acquire AGP: %d\n", ret);
+ return ret;
}
ret = drm_agp_info(rdev->ddev, &info);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a4b4bc9fa32..e57df08d4ae 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,8 @@ static struct radeon_asic r100_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r100_gpu_reset,
+ .gpu_is_lockup = &r100_gpu_is_lockup,
+ .asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -164,6 +165,12 @@ static struct radeon_asic r100_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r200_asic = {
@@ -172,7 +179,8 @@ static struct radeon_asic r200_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r100_gpu_reset,
+ .gpu_is_lockup = &r100_gpu_is_lockup,
+ .asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -201,6 +209,12 @@ static struct radeon_asic r200_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r300_asic = {
@@ -209,7 +223,8 @@ static struct radeon_asic r300_asic = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -239,6 +254,12 @@ static struct radeon_asic r300_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r300_asic_pcie = {
@@ -247,7 +268,8 @@ static struct radeon_asic r300_asic_pcie = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -276,6 +298,12 @@ static struct radeon_asic r300_asic_pcie = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r420_asic = {
@@ -284,7 +312,8 @@ static struct radeon_asic r420_asic = {
.suspend = &r420_suspend,
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -314,6 +343,12 @@ static struct radeon_asic r420_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rs400_asic = {
@@ -322,7 +357,8 @@ static struct radeon_asic rs400_asic = {
.suspend = &rs400_suspend,
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -352,6 +388,12 @@ static struct radeon_asic rs400_asic = {
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &r100_pm_misc,
+ .pm_prepare = &r100_pm_prepare,
+ .pm_finish = &r100_pm_finish,
+ .pm_init_profile = &r100_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rs600_asic = {
@@ -360,7 +402,8 @@ static struct radeon_asic rs600_asic = {
.suspend = &rs600_suspend,
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -390,6 +433,12 @@ static struct radeon_asic rs600_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rs690_asic = {
@@ -398,7 +447,8 @@ static struct radeon_asic rs690_asic = {
.suspend = &rs690_suspend,
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -428,6 +478,12 @@ static struct radeon_asic rs690_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic rv515_asic = {
@@ -436,7 +492,8 @@ static struct radeon_asic rv515_asic = {
.suspend = &rv515_suspend,
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -466,6 +523,12 @@ static struct radeon_asic rv515_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r520_asic = {
@@ -474,7 +537,8 @@ static struct radeon_asic r520_asic = {
.suspend = &rv515_suspend,
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
+ .gpu_is_lockup = &r300_gpu_is_lockup,
+ .asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
@@ -504,6 +568,12 @@ static struct radeon_asic r520_asic = {
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
+ .gui_idle = &r100_gui_idle,
+ .pm_misc = &rs600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r420_pm_init_profile,
+ .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
};
static struct radeon_asic r600_asic = {
@@ -513,7 +583,8 @@ static struct radeon_asic r600_asic = {
.resume = &r600_resume,
.cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
- .gpu_reset = &r600_gpu_reset,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
+ .asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
@@ -541,6 +612,12 @@ static struct radeon_asic r600_asic = {
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &r600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
static struct radeon_asic rs780_asic = {
@@ -549,8 +626,9 @@ static struct radeon_asic rs780_asic = {
.suspend = &r600_suspend,
.resume = &r600_resume,
.cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
- .gpu_reset = &r600_gpu_reset,
+ .asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
@@ -578,6 +656,12 @@ static struct radeon_asic rs780_asic = {
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &r600_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &rs780_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
static struct radeon_asic rv770_asic = {
@@ -586,7 +670,8 @@ static struct radeon_asic rv770_asic = {
.suspend = &rv770_suspend,
.resume = &rv770_resume,
.cp_commit = &r600_cp_commit,
- .gpu_reset = &rv770_gpu_reset,
+ .asic_reset = &r600_asic_reset,
+ .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
@@ -615,6 +700,12 @@ static struct radeon_asic rv770_asic = {
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
.ioctl_wait_idle = r600_ioctl_wait_idle,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &rv770_pm_misc,
+ .pm_prepare = &rs600_pm_prepare,
+ .pm_finish = &rs600_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
static struct radeon_asic evergreen_asic = {
@@ -622,16 +713,17 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = NULL,
- .gpu_reset = &evergreen_gpu_reset,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &evergreen_gpu_is_lockup,
+ .asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
- .ring_test = NULL,
- .ring_ib_execute = NULL,
- .irq_set = NULL,
- .irq_process = NULL,
- .get_vblank_counter = NULL,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = NULL,
.cs_parse = NULL,
.copy_blit = NULL,
@@ -650,6 +742,12 @@ static struct radeon_asic evergreen_asic = {
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
};
int radeon_asic_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index a0b8280663d..5c40a3dfaca 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -60,7 +60,8 @@ int r100_resume(struct radeon_device *rdev);
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
-int r100_gpu_reset(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev);
+int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -110,8 +111,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev);
void r100_wb_disable(struct radeon_device *rdev);
void r100_wb_fini(struct radeon_device *rdev);
int r100_wb_init(struct radeon_device *rdev);
-void r100_hdp_reset(struct radeon_device *rdev);
-int r100_rb2d_reset(struct radeon_device *rdev);
int r100_cp_reset(struct radeon_device *rdev);
void r100_vga_render_disable(struct radeon_device *rdev);
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
@@ -126,6 +125,13 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
unsigned idx);
void r100_enable_bm(struct radeon_device *rdev);
void r100_set_common_regs(struct radeon_device *rdev);
+void r100_bm_disable(struct radeon_device *rdev);
+extern bool r100_gui_idle(struct radeon_device *rdev);
+extern void r100_pm_misc(struct radeon_device *rdev);
+extern void r100_pm_prepare(struct radeon_device *rdev);
+extern void r100_pm_finish(struct radeon_device *rdev);
+extern void r100_pm_init_profile(struct radeon_device *rdev);
+extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
/*
* r200,rv250,rs300,rv280
@@ -134,7 +140,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
- struct radeon_fence *fence);
+ struct radeon_fence *fence);
/*
* r300,r350,rv350,rv380
@@ -143,7 +149,8 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern int r300_gpu_reset(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
@@ -162,6 +169,7 @@ extern int r420_init(struct radeon_device *rdev);
extern void r420_fini(struct radeon_device *rdev);
extern int r420_suspend(struct radeon_device *rdev);
extern int r420_resume(struct radeon_device *rdev);
+extern void r420_pm_init_profile(struct radeon_device *rdev);
/*
* rs400,rs480
@@ -178,6 +186,7 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
/*
* rs600.
*/
+extern int rs600_asic_reset(struct radeon_device *rdev);
extern int rs600_init(struct radeon_device *rdev);
extern void rs600_fini(struct radeon_device *rdev);
extern int rs600_suspend(struct radeon_device *rdev);
@@ -195,6 +204,9 @@ void rs600_hpd_fini(struct radeon_device *rdev);
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void rs600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+extern void rs600_pm_misc(struct radeon_device *rdev);
+extern void rs600_pm_prepare(struct radeon_device *rdev);
+extern void rs600_pm_finish(struct radeon_device *rdev);
/*
* rs690,rs740
@@ -212,7 +224,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev);
*/
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
-int rv515_gpu_reset(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
@@ -252,7 +263,8 @@ int r600_copy_dma(struct radeon_device *rdev,
struct radeon_fence *fence);
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_set(struct radeon_device *rdev);
-int r600_gpu_reset(struct radeon_device *rdev);
+bool r600_gpu_is_lockup(struct radeon_device *rdev);
+int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@@ -268,6 +280,11 @@ bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern bool r600_gui_idle(struct radeon_device *rdev);
+extern void r600_pm_misc(struct radeon_device *rdev);
+extern void r600_pm_init_profile(struct radeon_device *rdev);
+extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -276,20 +293,29 @@ int rv770_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
-int rv770_gpu_reset(struct radeon_device *rdev);
+extern void rv770_pm_misc(struct radeon_device *rdev);
/*
* evergreen
*/
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-int evergreen_gpu_reset(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int evergreen_irq_set(struct radeon_device *rdev);
+int evergreen_irq_process(struct radeon_device *rdev);
+extern void evergreen_pm_misc(struct radeon_device *rdev);
+extern void evergreen_pm_prepare(struct radeon_device *rdev);
+extern void evergreen_pm_finish(struct radeon_device *rdev);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5673665ff21..6e733fdc334 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
}
/* look up gpio for ddc, hpd */
+ ddc_bus.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
if ((le16_to_cpu(path->usDeviceTag) &
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
@@ -547,7 +549,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_I2C_RECORD *i2c_record;
ATOM_HPD_INT_RECORD *hpd_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
- hpd.hpd = RADEON_HPD_NONE;
while (record->ucRecordType > 0
&& record->
@@ -585,13 +586,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
break;
}
}
- } else {
- hpd.hpd = RADEON_HPD_NONE;
- ddc_bus.valid = false;
}
/* needed for aux chan transactions */
- ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
+ ddc_bus.hpd = hpd.hpd;
conn_id = le16_to_cpu(path->usConnObjectId);
@@ -1174,7 +1172,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
- le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+ le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
@@ -1264,7 +1262,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
switch (crev) {
case 1:
tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
- if (index > MAX_SUPPORTED_TV_TIMING)
+ if (index >= MAX_SUPPORTED_TV_TIMING)
return false;
mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
@@ -1302,7 +1300,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
break;
case 2:
tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset);
- if (index > MAX_SUPPORTED_TV_TIMING_V1_2)
+ if (index >= MAX_SUPPORTED_TV_TIMING_V1_2)
return false;
dtd_timings = &tv_info_v1_2->aModeTimings[index];
@@ -1442,26 +1440,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
static const char *thermal_controller_names[] = {
"NONE",
- "LM63",
- "ADM1032",
- "ADM1030",
- "MUA6649",
- "LM64",
- "F75375",
- "ASC7512",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
+ "asc7xxx",
};
static const char *pp_lib_thermal_controller_names[] = {
"NONE",
- "LM63",
- "ADM1032",
- "ADM1030",
- "MUA6649",
- "LM64",
- "F75375",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
"RV6xx",
"RV770",
- "ADT7473",
+ "adt7473",
+ "External GPIO",
+ "Evergreen",
+ "adt7473 with internal",
+
};
union power_info {
@@ -1485,7 +1487,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
int state_index = 0, mode_index = 0;
struct radeon_i2c_bus_rec i2c_bus;
- rdev->pm.default_power_state = NULL;
+ rdev->pm.default_power_state_index = -1;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
@@ -1498,10 +1500,19 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info.ucOverdriveControllerAddress >> 1);
i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = thermal_controller_names[power_info->info.
+ ucOverdriveThermalController];
+ info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
}
num_modes = power_info->info.ucNumOfPowerModeEntries;
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ /* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
@@ -1515,13 +1526,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
@@ -1542,6 +1547,8 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
@@ -1555,15 +1562,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
}
state_index++;
break;
@@ -1577,13 +1592,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1605,6 +1614,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
@@ -1618,18 +1630,29 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
+ if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
}
state_index++;
break;
@@ -1643,13 +1666,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
@@ -1677,6 +1694,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
}
}
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
/* order matters! */
if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
rdev->pm.power_state[state_index].type =
@@ -1690,42 +1710,76 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
- if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ }
if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BALANCED;
if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
+ } else if (state_index == 0) {
+ rdev->pm.power_state[state_index].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
}
state_index++;
break;
}
}
- } else if (frev == 4) {
+ /* last mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[state_index - 1].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = state_index - 1;
+ rdev->pm.power_state[state_index - 1].default_clock_mode =
+ &rdev->pm.power_state[state_index - 1].clock_info[0];
+ rdev->pm.power_state[state_index].flags &=
+ ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+ rdev->pm.power_state[state_index].misc = 0;
+ rdev->pm.power_state[state_index].misc2 = 0;
+ }
+ } else {
/* add the i2c bus for thermal/fan chip */
/* no support for internal controller yet */
- if (power_info->info_4.sThermalController.ucType > 0) {
- if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
- (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) {
+ ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
+ if (controller->ucType > 0) {
+ if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
+ (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
+ (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) {
DRM_INFO("Internal thermal controller %s fan control\n",
- (power_info->info_4.sThermalController.ucFanParameters &
+ (controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ } else if ((controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
+ DRM_INFO("Special thermal controller config\n");
} else {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
- pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
- power_info->info_4.sThermalController.ucI2cAddress >> 1,
- (power_info->info_4.sThermalController.ucFanParameters &
+ pp_lib_thermal_controller_names[controller->ucType],
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine);
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = pp_lib_thermal_controller_names[controller->ucType];
+ info.addr = controller->ucI2cAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ }
+
}
}
+ /* first mode is usually default, followed by low to high */
for (i = 0; i < power_info->info_4.ucNumStates; i++) {
mode_index = 0;
power_state = (struct _ATOM_PPLIB_STATE *)
@@ -1754,14 +1808,34 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
/* skip invalid modes */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
continue;
- /* skip overclock modes for now */
- if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->usVDDC;
+ mode_index++;
+ } else if (ASIC_IS_DCE4(rdev)) {
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
+ (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+ (power_state->ucClockStateIndices[j] *
+ power_info->info_4.ucClockInfoSize));
+ sclk = le16_to_cpu(clock_info->usEngineClockLow);
+ sclk |= clock_info->ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->usMemoryClockLow);
+ mclk |= clock_info->ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
continue;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
clock_info->usVDDC;
+ /* XXX usVDDCI */
mode_index++;
} else {
struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
@@ -1781,12 +1855,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
continue;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- continue;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
@@ -1798,7 +1866,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
if (mode_index) {
misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
misc2 = le16_to_cpu(non_clock_info->usClassification);
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
+ rdev->pm.power_state[state_index].pcie_lanes =
((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
@@ -1815,22 +1885,36 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
POWER_STATE_TYPE_PERFORMANCE;
break;
}
+ rdev->pm.power_state[state_index].flags = 0;
+ if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+ rdev->pm.power_state[state_index].flags |=
+ RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
}
state_index++;
}
}
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+ if (rdev->pm.power_state[i].num_clock_modes > 1)
+ rdev->pm.power_state[i].clock_info[0].flags |=
+ RADEON_PM_MODE_NO_DISPLAY;
+ }
+ /* first mode is usually default */
+ if (rdev->pm.default_power_state_index == -1) {
+ rdev->pm.power_state[0].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state_index = 0;
+ rdev->pm.power_state[0].default_clock_mode =
+ &rdev->pm.power_state[0].clock_info[0];
+ }
}
} else {
- /* XXX figure out some good default low power mode for cards w/out power tables */
- }
-
- if (rdev->pm.default_power_state == NULL) {
/* add the default mode */
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
@@ -1840,18 +1924,16 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[0];
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- if (rdev->asic->get_pcie_lanes)
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
- else
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.default_power_state_index = state_index;
+ rdev->pm.power_state[state_index].flags = 0;
state_index++;
}
+
rdev->pm.num_power_states = state_index;
- rdev->pm.current_power_state = rdev->pm.default_power_state;
- rdev->pm.current_clock_mode =
- rdev->pm.default_power_state->default_clock_mode;
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
}
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 8ad71f70131..fbba938f804 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -85,12 +85,11 @@ static bool radeon_read_bios(struct radeon_device *rdev)
pci_unmap_rom(rdev->pdev, bios);
return false;
}
- rdev->bios = kmalloc(size, GFP_KERNEL);
+ rdev->bios = kmemdup(bios, size, GFP_KERNEL);
if (rdev->bios == NULL) {
pci_unmap_rom(rdev->pdev, bios);
return false;
}
- memcpy(rdev->bios, bios, size);
pci_unmap_rom(rdev->pdev, bios);
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 37db8adb274..7b5e10d3e9c 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -450,17 +450,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
{
int edid_info;
struct edid *edid;
+ unsigned char *raw;
edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
if (!edid_info)
return false;
- edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
- GFP_KERNEL);
+ raw = rdev->bios + edid_info;
+ edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL);
if (edid == NULL)
return false;
- memcpy((unsigned char *)edid,
- (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
+ memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1));
if (!drm_edid_is_valid(edid)) {
kfree(edid);
@@ -600,7 +600,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
}
i2c.mm_i2c = false;
i2c.i2c_id = 0;
- i2c.hpd_id = 0;
+ i2c.hpd = RADEON_HPD_NONE;
if (ddc_line)
i2c.valid = true;
@@ -1113,18 +1113,20 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
break;
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
- (RBIOS16(tmp + 2) ==
- lvds->native_mode.vdisplay)) {
- lvds->native_mode.htotal = RBIOS16(tmp + 17) * 8;
- lvds->native_mode.hsync_start = RBIOS16(tmp + 21) * 8;
- lvds->native_mode.hsync_end = (RBIOS8(tmp + 23) +
- RBIOS16(tmp + 21)) * 8;
-
- lvds->native_mode.vtotal = RBIOS16(tmp + 24);
- lvds->native_mode.vsync_start = RBIOS16(tmp + 28) & 0x7ff;
- lvds->native_mode.vsync_end =
- ((RBIOS16(tmp + 28) & 0xf800) >> 11) +
- (RBIOS16(tmp + 28) & 0x7ff);
+ (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+ lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+ lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+ (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+ lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ (RBIOS8(tmp + 23) * 8);
+
+ lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+ (RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
+ lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+ ((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
+ lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+ ((RBIOS16(tmp + 28) & 0xf800) >> 11);
lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
lvds->native_mode.flags = 0;
@@ -2196,7 +2198,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT);
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
- hpd.hpd = RADEON_HPD_NONE;
+ hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_connector(dev,
0,
ATOM_DEVICE_CRT1_SUPPORT |
@@ -2366,7 +2368,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
u8 rev, blocks, tmp;
int state_index = 0;
- rdev->pm.default_power_state = NULL;
+ rdev->pm.default_power_state_index = -1;
if (rdev->flags & RADEON_IS_MOBILITY) {
offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
@@ -2380,17 +2382,13 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
(rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
goto default_mode;
- /* skip overclock modes for now */
- if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
- rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
- (rdev->pm.power_state[state_index].clock_info[0].sclk >
- rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
- goto default_mode;
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_BATTERY;
misc = RBIOS16(offset + 0x5 + 0x0);
if (rev > 4)
misc2 = RBIOS16(offset + 0x5 + 0xe);
+ rdev->pm.power_state[state_index].misc = misc;
+ rdev->pm.power_state[state_index].misc2 = misc2;
if (misc & 0x4) {
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
if (misc & 0x8)
@@ -2437,8 +2435,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
} else
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
if (rev > 6)
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ rdev->pm.power_state[state_index].pcie_lanes =
RBIOS8(offset + 0x5 + 0x10);
+ rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
state_index++;
} else {
/* XXX figure out some good default low power mode for mobility cards w/out power tables */
@@ -2456,16 +2455,13 @@ default_mode:
rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
- if (rdev->asic->get_pcie_lanes)
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
- else
- rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
- rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].pcie_lanes = 16;
+ rdev->pm.power_state[state_index].flags = 0;
+ rdev->pm.default_power_state_index = state_index;
rdev->pm.num_power_states = state_index + 1;
- rdev->pm.current_power_state = rdev->pm.default_power_state;
- rdev->pm.current_clock_mode =
- rdev->pm.default_power_state->default_clock_mode;
+ rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ rdev->pm.current_clock_mode_index = 0;
}
void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 1331351c517..0c7ccc6961a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1041,7 +1041,6 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_connector_atom_dig *radeon_dig_connector;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
- int ret;
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1076,9 +1075,7 @@ radeon_add_atom_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
@@ -1088,12 +1085,11 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1113,9 +1109,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1141,9 +1135,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
if (!radeon_connector->ddc_bus)
@@ -1163,9 +1155,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -1191,9 +1181,7 @@ radeon_add_atom_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
@@ -1211,9 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
@@ -1226,6 +1212,12 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
}
+ if (hpd->hpd == RADEON_HPD_NONE) {
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ } else
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
return;
@@ -1250,7 +1242,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
uint32_t subpixel_order = SubPixelNone;
- int ret;
/* fixme - tv/cv/din */
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -1278,9 +1269,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
if (!radeon_connector->ddc_bus)
@@ -1290,12 +1279,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
@@ -1309,13 +1297,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
if (!radeon_connector->ddc_bus)
goto failed;
+ }
+ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
@@ -1328,9 +1316,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
case DRM_MODE_CONNECTOR_9PinDIN:
if (radeon_tv == 1) {
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
radeon_connector->dac_load_detect = true;
/* RS400,RC410,RS480 chipset seems to report a lot
* of false positive on load detect, we haven't yet
@@ -1349,9 +1335,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
- if (ret)
- goto failed;
+ drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
if (!radeon_connector->ddc_bus)
@@ -1364,6 +1348,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
}
+ if (hpd->hpd == RADEON_HPD_NONE) {
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ } else
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 419630dd207..2f042a3c0e6 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -435,14 +435,19 @@ static void radeon_init_pipes(struct drm_device *dev)
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
+ /* SE cards have 1 pipe */
+ if ((dev->pdev->device == 0x5e4c) ||
+ (dev->pdev->device == 0x5e4f))
+ dev_priv->num_gb_pipes = 1;
} else {
/* R3xx */
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
dev->pdev->device != 0x4144) ||
- ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
+ ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 &&
+ dev->pdev->device != 0x4148)) {
dev_priv->num_gb_pipes = 2;
} else {
- /* RV3xx/R300 AD */
+ /* RV3xx/R300 AD/R350 AH */
dev_priv->num_gb_pipes = 1;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index f9b0fe002c0..ae0fb7356e6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -220,10 +220,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
int r;
mutex_lock(&rdev->cs_mutex);
- if (rdev->gpu_lockup) {
- mutex_unlock(&rdev->cs_mutex);
- return -EINVAL;
- }
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7b629e30556..a20b612ffe7 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -299,24 +299,24 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev)
sclk = radeon_get_engine_clock(rdev);
mclk = rdev->clock.default_mclk;
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = rfixed_const(mclk);
- rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
- a.full = rfixed_const(16);
+ a.full = dfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
- rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
+ rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
} else {
sclk = radeon_get_engine_clock(rdev);
mclk = radeon_get_memory_clock(rdev);
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
- rdev->pm.mclk.full = rfixed_const(mclk);
- rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+ a.full = dfixed_const(100);
+ rdev->pm.sclk.full = dfixed_const(sclk);
+ rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = dfixed_const(mclk);
+ rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
}
}
@@ -599,9 +599,11 @@ int radeon_device_init(struct radeon_device *rdev,
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
+ mutex_init(&rdev->vram_mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
+ init_waitqueue_head(&rdev->irq.idle_queue);
/* setup workqueue */
rdev->wq = create_workqueue("radeon");
@@ -671,7 +673,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
*/
- radeon_gpu_reset(rdev);
+ radeon_asic_reset(rdev);
radeon_fini(rdev);
radeon_agp_disable(rdev);
r = radeon_init(rdev);
@@ -691,6 +693,8 @@ void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
+ /* evict vram memory */
+ radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
destroy_workqueue(rdev->wq);
vga_switcheroo_unregister_client(rdev->pdev);
@@ -728,9 +732,10 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
continue;
}
robj = rfb->obj->driver_private;
- if (robj != rdev->fbdev_rbo) {
+ /* don't unpin kernel fb objects */
+ if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
- if (unlikely(r == 0)) {
+ if (r == 0) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
@@ -743,6 +748,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
radeon_save_bios_scratch_regs(rdev);
+ radeon_pm_suspend(rdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
/* evict remaining vram memory */
@@ -755,7 +761,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
pci_set_power_state(dev->pdev, PCI_D3hot);
}
acquire_console_sem();
- fb_set_suspend(rdev->fbdev_info, 1);
+ radeon_fbdev_set_suspend(rdev, 1);
release_console_sem();
return 0;
}
@@ -778,8 +784,9 @@ int radeon_resume_kms(struct drm_device *dev)
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
+ radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
- fb_set_suspend(rdev->fbdev_info, 0);
+ radeon_fbdev_set_suspend(rdev, 0);
release_console_sem();
/* reset hpd state */
@@ -789,6 +796,26 @@ int radeon_resume_kms(struct drm_device *dev)
return 0;
}
+int radeon_gpu_reset(struct radeon_device *rdev)
+{
+ int r;
+
+ radeon_save_bios_scratch_regs(rdev);
+ radeon_suspend(rdev);
+
+ r = radeon_asic_reset(rdev);
+ if (!r) {
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ radeon_resume(rdev);
+ radeon_restore_bios_scratch_regs(rdev);
+ drm_helper_resume_force_mode(rdev->ddev);
+ return 0;
+ }
+ /* bad news, how to tell it to userspace ? */
+ dev_info(rdev->dev, "GPU reset failed\n");
+ return r;
+}
+
/*
* Debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b8d67282824..1006549d157 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -86,12 +86,12 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
- WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id);
- WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007);
+ WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
- WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0);
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
for (i = 0; i < 256; i++) {
- WREG32(EVERGREEN_DC_LUT_30_COLOR,
+ WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
(radeon_crtc->lut_r[i] << 20) |
(radeon_crtc->lut_g[i] << 10) |
(radeon_crtc->lut_b[i] << 0));
@@ -633,37 +633,37 @@ calc_fb_div(struct radeon_pll *pll,
vco_freq = freq * post_div;
/* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
- a.full = rfixed_const(pll->reference_freq);
- feedback_divider.full = rfixed_const(vco_freq);
- feedback_divider.full = rfixed_div(feedback_divider, a);
- a.full = rfixed_const(ref_div);
- feedback_divider.full = rfixed_mul(feedback_divider, a);
+ a.full = dfixed_const(pll->reference_freq);
+ feedback_divider.full = dfixed_const(vco_freq);
+ feedback_divider.full = dfixed_div(feedback_divider, a);
+ a.full = dfixed_const(ref_div);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
/* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
- a.full = rfixed_const(10);
- feedback_divider.full = rfixed_mul(feedback_divider, a);
- feedback_divider.full += rfixed_const_half(0);
- feedback_divider.full = rfixed_floor(feedback_divider);
- feedback_divider.full = rfixed_div(feedback_divider, a);
+ a.full = dfixed_const(10);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
+ feedback_divider.full += dfixed_const_half(0);
+ feedback_divider.full = dfixed_floor(feedback_divider);
+ feedback_divider.full = dfixed_div(feedback_divider, a);
/* *fb_div = floor(feedback_divider); */
- a.full = rfixed_floor(feedback_divider);
- *fb_div = rfixed_trunc(a);
+ a.full = dfixed_floor(feedback_divider);
+ *fb_div = dfixed_trunc(a);
/* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
- a.full = rfixed_const(10);
- b.full = rfixed_mul(feedback_divider, a);
+ a.full = dfixed_const(10);
+ b.full = dfixed_mul(feedback_divider, a);
- feedback_divider.full = rfixed_floor(feedback_divider);
- feedback_divider.full = rfixed_mul(feedback_divider, a);
+ feedback_divider.full = dfixed_floor(feedback_divider);
+ feedback_divider.full = dfixed_mul(feedback_divider, a);
feedback_divider.full = b.full - feedback_divider.full;
- *fb_div_frac = rfixed_trunc(feedback_divider);
+ *fb_div_frac = dfixed_trunc(feedback_divider);
} else {
/* *fb_div = floor(feedback_divider + 0.5); */
- feedback_divider.full += rfixed_const_half(0);
- feedback_divider.full = rfixed_floor(feedback_divider);
+ feedback_divider.full += dfixed_const_half(0);
+ feedback_divider.full = dfixed_floor(feedback_divider);
- *fb_div = rfixed_trunc(feedback_divider);
+ *fb_div = dfixed_trunc(feedback_divider);
*fb_div_frac = 0;
}
@@ -693,10 +693,10 @@ calc_fb_ref_div(struct radeon_pll *pll,
pll_out_max = pll->pll_out_max;
}
- ffreq.full = rfixed_const(freq);
+ ffreq.full = dfixed_const(freq);
/* max_error = ffreq * 0.0025; */
- a.full = rfixed_const(400);
- max_error.full = rfixed_div(ffreq, a);
+ a.full = dfixed_const(400);
+ max_error.full = dfixed_div(ffreq, a);
for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
@@ -707,9 +707,9 @@ calc_fb_ref_div(struct radeon_pll *pll,
continue;
/* pll_out = vco / post_div; */
- a.full = rfixed_const(post_div);
- pll_out.full = rfixed_const(vco);
- pll_out.full = rfixed_div(pll_out, a);
+ a.full = dfixed_const(post_div);
+ pll_out.full = dfixed_const(vco);
+ pll_out.full = dfixed_div(pll_out, a);
if (pll_out.full >= ffreq.full) {
error.full = pll_out.full - ffreq.full;
@@ -831,10 +831,6 @@ void radeon_compute_pll(struct radeon_pll *pll,
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
- struct drm_device *dev = fb->dev;
-
- if (fb->fbdev)
- radeonfb_remove(dev, fb);
if (radeon_fb->obj)
drm_gem_object_unreference_unlocked(radeon_fb->obj);
@@ -856,21 +852,15 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
.create_handle = radeon_user_framebuffer_create_handle,
};
-struct drm_framebuffer *
-radeon_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj)
+void
+radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj)
{
- struct radeon_framebuffer *radeon_fb;
-
- radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL) {
- return NULL;
- }
- drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
- drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
- radeon_fb->obj = obj;
- return &radeon_fb->base;
+ rfb->obj = obj;
+ drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
}
static struct drm_framebuffer *
@@ -879,6 +869,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_gem_object *obj;
+ struct radeon_framebuffer *radeon_fb;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (obj == NULL) {
@@ -886,12 +877,26 @@ radeon_user_framebuffer_create(struct drm_device *dev,
"can't create framebuffer\n", mode_cmd->handle);
return NULL;
}
- return radeon_framebuffer_create(dev, mode_cmd, obj);
+
+ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+ if (radeon_fb == NULL) {
+ return NULL;
+ }
+
+ radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+
+ return &radeon_fb->base;
+}
+
+static void radeon_output_poll_changed(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ radeon_fb_output_poll_changed(rdev);
}
static const struct drm_mode_config_funcs radeon_mode_funcs = {
.fb_create = radeon_user_framebuffer_create,
- .fb_changed = radeonfb_probe,
+ .output_poll_changed = radeon_output_poll_changed
};
struct drm_prop_enum_list {
@@ -978,8 +983,11 @@ void radeon_update_display_priority(struct radeon_device *rdev)
/* set display priority to high for r3xx, rv515 chips
* this avoids flickering due to underflow to the
* display controllers during heavy acceleration.
+ * Don't force high on rs4xx igp chips as it seems to
+ * affect the sound card. See kernel bug 15982.
*/
- if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
+ if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+ !(rdev->flags & RADEON_IS_IGP))
rdev->disp_priority = 2;
else
rdev->disp_priority = 0;
@@ -1031,15 +1039,24 @@ int radeon_modeset_init(struct radeon_device *rdev)
}
/* initialize hpd */
radeon_hpd_init(rdev);
- drm_helper_initial_config(rdev->ddev);
+
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
+ radeon_fbdev_init(rdev);
+ drm_kms_helper_poll_init(rdev->ddev);
+
return 0;
}
void radeon_modeset_fini(struct radeon_device *rdev)
{
+ radeon_fbdev_fini(rdev);
kfree(rdev->mode_info.bios_hardcoded_edid);
+ radeon_pm_fini(rdev);
if (rdev->mode_info.mode_config_initialized) {
+ drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
@@ -1089,15 +1106,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
}
if (radeon_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
- a.full = rfixed_const(crtc->mode.vdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.hdisplay);
- radeon_crtc->vsc.full = rfixed_div(a, b);
- a.full = rfixed_const(crtc->mode.hdisplay);
- b.full = rfixed_const(radeon_crtc->native_mode.vdisplay);
- radeon_crtc->hsc.full = rfixed_div(a, b);
+ a.full = dfixed_const(crtc->mode.vdisplay);
+ b.full = dfixed_const(radeon_crtc->native_mode.hdisplay);
+ radeon_crtc->vsc.full = dfixed_div(a, b);
+ a.full = dfixed_const(crtc->mode.hdisplay);
+ b.full = dfixed_const(radeon_crtc->native_mode.vdisplay);
+ radeon_crtc->hsc.full = dfixed_div(a, b);
} else {
- radeon_crtc->vsc.full = rfixed_const(1);
- radeon_crtc->hsc.full = rfixed_const(1);
+ radeon_crtc->vsc.full = dfixed_const(1);
+ radeon_crtc->hsc.full = dfixed_const(1);
}
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 4b05563d99e..902d1731a65 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -44,9 +44,10 @@
* - 2.1.0 - add square tiling interface
* - 2.2.0 - add r6xx/r7xx const buffer support
* - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
+ * - 2.4.0 - add crtc id query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 3
+#define KMS_DRIVER_MINOR 4
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -91,7 +92,6 @@ int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
int radeon_new_pll = -1;
-int radeon_dynpm = -1;
int radeon_audio = 1;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
@@ -132,9 +132,6 @@ module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(new_pll, "Select new PLL code");
module_param_named(new_pll, radeon_new_pll, int, 0444);
-MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
-module_param_named(dynpm, radeon_dynpm, int, 0444);
-
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
module_param_named(audio, radeon_audio, int, 0444);
@@ -216,6 +213,7 @@ static struct drm_driver driver_old = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+ .read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_compat_ioctl,
#endif
@@ -304,6 +302,7 @@ static struct drm_driver kms_driver = {
.mmap = radeon_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+ .read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_kms_compat_ioctl,
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 30293bec080..1ebb100015b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -254,6 +254,53 @@ radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
return dig_connector;
}
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ unsigned hblank = native_mode->htotal - native_mode->hdisplay;
+ unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
+ unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
+ unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
+ unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
+ unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
+
+ adjusted_mode->clock = native_mode->clock;
+ adjusted_mode->flags = native_mode->flags;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->hdisplay = native_mode->hdisplay;
+ adjusted_mode->vdisplay = native_mode->vdisplay;
+ }
+
+ adjusted_mode->htotal = native_mode->hdisplay + hblank;
+ adjusted_mode->hsync_start = native_mode->hdisplay + hover;
+ adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
+
+ adjusted_mode->vtotal = native_mode->vdisplay + vblank;
+ adjusted_mode->vsync_start = native_mode->vdisplay + vover;
+ adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
+
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
+ adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
+ }
+
+ adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
+ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
+ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
+
+ adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
+ adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
+
+}
+
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -262,9 +309,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- /* adjust pm to upcoming mode change */
- radeon_pm_compute_clocks(rdev);
-
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -275,18 +319,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
/* get the native mode for LVDS */
- if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
- struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
- int mode_id = adjusted_mode->base.id;
- *adjusted_mode = *native_mode;
- if (!ASIC_IS_AVIVO(rdev)) {
- adjusted_mode->hdisplay = mode->hdisplay;
- adjusted_mode->vdisplay = mode->vdisplay;
- adjusted_mode->crtc_hdisplay = mode->hdisplay;
- adjusted_mode->crtc_vdisplay = mode->vdisplay;
- }
- adjusted_mode->base.id = mode_id;
- }
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
/* get the native mode for TV */
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
@@ -1074,8 +1108,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
}
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
union crtc_source_param {
@@ -1326,7 +1358,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
radeon_encoder->pixel_clock = adjusted_mode->clock;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
atombios_yuv_setup(encoder, true);
else
@@ -1509,10 +1541,49 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ if (ASIC_IS_DCE4(rdev))
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+ }
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ atombios_ddia_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ atombios_external_tmds_setup(encoder, ATOM_DISABLE);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ atombios_dac_setup(encoder, ATOM_DISABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+ atombios_tv_setup(encoder, ATOM_DISABLE);
+ break;
+ }
+
if (radeon_encoder_is_digital(encoder)) {
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
r600_hdmi_disable(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 9ac57a09784..e192acfbf0c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -23,10 +23,6 @@
* Authors:
* David Airlie
*/
- /*
- * Modularization
- */
-
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
@@ -42,17 +38,21 @@
#include <linux/vga_switcheroo.h>
-struct radeon_fb_device {
+/* object hierarchy -
+ this contains a helper + a radeon fb
+ the helper contains a pointer to radeon framebuffer baseclass.
+*/
+struct radeon_fbdev {
struct drm_fb_helper helper;
- struct radeon_framebuffer *rfb;
- struct radeon_device *rdev;
+ struct radeon_framebuffer rfb;
+ struct list_head fbdev_list;
+ struct radeon_device *rdev;
};
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -61,45 +61,6 @@ static struct fb_ops radeonfb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
-/**
- * Currently it is assumed that the old framebuffer is reused.
- *
- * LOCKING
- * caller should hold the mode config lock.
- *
- */
-int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode = crtc->desired_mode;
-
- fb = crtc->fb;
- if (fb == NULL) {
- return 1;
- }
- info = fb->fbdev;
- if (info == NULL) {
- return 1;
- }
- if (mode == NULL) {
- return 1;
- }
- info->var.xres = mode->hdisplay;
- info->var.right_margin = mode->hsync_start - mode->hdisplay;
- info->var.hsync_len = mode->hsync_end - mode->hsync_start;
- info->var.left_margin = mode->htotal - mode->hsync_end;
- info->var.yres = mode->vdisplay;
- info->var.lower_margin = mode->vsync_start - mode->vdisplay;
- info->var.vsync_len = mode->vsync_end - mode->vsync_start;
- info->var.upper_margin = mode->vtotal - mode->vsync_end;
- info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
- /* avoid overflow */
- info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-
- return 0;
-}
-EXPORT_SYMBOL(radeonfb_resize);
static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
@@ -125,57 +86,44 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
return aligned;
}
-static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
- .gamma_set = radeon_crtc_fb_gamma_set,
- .gamma_get = radeon_crtc_fb_gamma_get,
-};
+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+ struct radeon_bo *rbo = gobj->driver_private;
+ int ret;
+
+ ret = radeon_bo_reserve(rbo, false);
+ if (likely(ret == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+ drm_gem_object_unreference_unlocked(gobj);
+}
-int radeonfb_create(struct drm_device *dev,
- uint32_t fb_width, uint32_t fb_height,
- uint32_t surface_width, uint32_t surface_height,
- uint32_t surface_depth, uint32_t surface_bpp,
- struct drm_framebuffer **fb_p)
+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object **gobj_p)
{
- struct radeon_device *rdev = dev->dev_private;
- struct fb_info *info;
- struct radeon_fb_device *rfbdev;
- struct drm_framebuffer *fb = NULL;
- struct radeon_framebuffer *rfb;
- struct drm_mode_fb_cmd mode_cmd;
+ struct radeon_device *rdev = rfbdev->rdev;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
- struct device *device = &rdev->pdev->dev;
- int size, aligned_size, ret;
- u64 fb_gpuaddr;
- void *fbptr = NULL;
- unsigned long tmp;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
+ int ret;
+ int aligned_size, size;
- mode_cmd.width = surface_width;
- mode_cmd.height = surface_height;
-
- /* avivo can't scanout real 24bpp */
- if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
- surface_bpp = 32;
-
- mode_cmd.bpp = surface_bpp;
/* need to align pitch with crtc limits */
- mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
- mode_cmd.depth = surface_depth;
+ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd->pitch * mode_cmd->height;
aligned_size = ALIGN(size, PAGE_SIZE);
-
ret = radeon_gem_object_create(rdev, aligned_size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
- &gobj);
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_kernel,
+ &gobj);
if (ret) {
- printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
- surface_width, surface_height);
- ret = -ENOMEM;
- goto out;
+ printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
+ aligned_size);
+ return -ENOMEM;
}
rbo = gobj->driver_private;
@@ -183,7 +131,7 @@ int radeonfb_create(struct drm_device *dev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (mode_cmd.bpp) {
+ switch (mode_cmd->bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
@@ -196,57 +144,81 @@ int radeonfb_create(struct drm_device *dev,
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
- tiling_flags | RADEON_TILING_SURFACE,
- mode_cmd.pitch);
+ tiling_flags | RADEON_TILING_SURFACE,
+ mode_cmd->pitch);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
- mutex_lock(&rdev->ddev->struct_mutex);
- fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
- if (fb == NULL) {
- DRM_ERROR("failed to allocate fb.\n");
- ret = -ENOMEM;
- goto out_unref;
- }
+
+
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
- ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
if (ret) {
radeon_bo_unreserve(rbo);
goto out_unref;
}
if (fb_tiled)
radeon_bo_check_tiling(rbo, 0, 0);
- ret = radeon_bo_kmap(rbo, &fbptr);
+ ret = radeon_bo_kmap(rbo, NULL);
radeon_bo_unreserve(rbo);
if (ret) {
goto out_unref;
}
- list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
+ *gobj_p = gobj;
+ return 0;
+out_unref:
+ radeonfb_destroy_pinned_object(gobj);
+ *gobj_p = NULL;
+ return ret;
+}
+
+static int radeonfb_create(struct radeon_fbdev *rfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_device *rdev = rfbdev->rdev;
+ struct fb_info *info;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_mode_fb_cmd mode_cmd;
+ struct drm_gem_object *gobj = NULL;
+ struct radeon_bo *rbo = NULL;
+ struct device *device = &rdev->pdev->dev;
+ int ret;
+ unsigned long tmp;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ /* avivo can't scanout real 24bpp */
+ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+ sizes->surface_bpp = 32;
+
+ mode_cmd.bpp = sizes->surface_bpp;
+ mode_cmd.depth = sizes->surface_depth;
- *fb_p = fb;
- rfb = to_radeon_framebuffer(fb);
- rdev->fbdev_rfb = rfb;
- rdev->fbdev_rbo = rbo;
+ ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+ rbo = gobj->driver_private;
- info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
+ /* okay we have an object now allocate the framebuffer */
+ info = framebuffer_alloc(0, device);
if (info == NULL) {
ret = -ENOMEM;
goto out_unref;
}
- rdev->fbdev_info = info;
- rfbdev = info->par;
- rfbdev->helper.funcs = &radeon_fb_helper_funcs;
- rfbdev->helper.dev = dev;
- ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
- RADEONFB_CONN_LIMIT);
- if (ret)
- goto out_unref;
+ info->par = rfbdev;
+
+ radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
- memset_io(fbptr, 0x0, aligned_size);
+ fb = &rfbdev->rfb.base;
+
+ /* setup helper */
+ rfbdev->helper.fb = fb;
+ rfbdev->helper.fbdev = info;
+
+ memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
strcpy(info->fix.id, "radeondrmfb");
@@ -255,17 +227,22 @@ int radeonfb_create(struct drm_device *dev,
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
- tmp = fb_gpuaddr - rdev->mc.vram_start;
+ tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
info->fix.smem_start = rdev->mc.aper_base + tmp;
- info->fix.smem_len = size;
- info->screen_base = fbptr;
- info->screen_size = size;
+ info->fix.smem_len = radeon_bo_size(rbo);
+ info->screen_base = rbo->kptr;
+ info->screen_size = radeon_bo_size(rbo);
- drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+ drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
- info->aperture_base = rdev->ddev->mode_config.fb_base;
- info->aperture_size = rdev->mc.real_vram_size;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
+ info->apertures->ranges[0].size = rdev->mc.real_vram_size;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
@@ -274,44 +251,55 @@ int radeonfb_create(struct drm_device *dev,
info->pixmap.access_align = 32;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
+
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_unref;
}
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
- DRM_INFO("size %lu\n", (unsigned long)size);
+ DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitch);
- fb->fbdev = info;
- rfbdev->rfb = rfb;
- rfbdev->rdev = rdev;
-
- mutex_unlock(&rdev->ddev->struct_mutex);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
out_unref:
if (rbo) {
- ret = radeon_bo_reserve(rbo, false);
- if (likely(ret == 0)) {
- radeon_bo_kunmap(rbo);
- radeon_bo_unreserve(rbo);
- }
+
}
if (fb && ret) {
- list_del(&fb->filp_head);
drm_gem_object_unreference(gobj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
- drm_gem_object_unreference(gobj);
- mutex_unlock(&rdev->ddev->struct_mutex);
-out:
return ret;
}
+static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = radeonfb_create(rfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
static char *mode_option;
int radeon_parse_options(char *options)
{
@@ -328,46 +316,102 @@ int radeon_parse_options(char *options)
return 0;
}
-int radeonfb_probe(struct drm_device *dev)
+void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
- struct radeon_device *rdev = dev->dev_private;
- int bpp_sel = 32;
-
- /* select 8 bpp console on RN50 or 16MB cards */
- if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
- bpp_sel = 8;
-
- return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
+ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
-int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
struct fb_info *info;
- struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
+ struct radeon_framebuffer *rfb = &rfbdev->rfb;
struct radeon_bo *rbo;
int r;
- if (!fb) {
- return -EINVAL;
+ if (rfbdev->helper.fbdev) {
+ info = rfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
}
- info = fb->fbdev;
- if (info) {
- struct radeon_fb_device *rfbdev = info->par;
+
+ if (rfb->obj) {
rbo = rfb->obj->driver_private;
- unregister_framebuffer(info);
r = radeon_bo_reserve(rbo, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
- drm_fb_helper_free(&rfbdev->helper);
- framebuffer_release(info);
+ drm_gem_object_unreference_unlocked(rfb->obj);
}
+ drm_fb_helper_fini(&rfbdev->helper);
+ drm_framebuffer_cleanup(&rfb->base);
- printk(KERN_INFO "unregistered panic notifier\n");
+ return 0;
+}
+
+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+ .gamma_set = radeon_crtc_fb_gamma_set,
+ .gamma_get = radeon_crtc_fb_gamma_get,
+ .fb_probe = radeon_fb_find_or_create_single,
+};
+
+int radeon_fbdev_init(struct radeon_device *rdev)
+{
+ struct radeon_fbdev *rfbdev;
+ int bpp_sel = 32;
+
+ /* select 8 bpp console on RN50 or 16MB cards */
+ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ bpp_sel = 8;
+
+ rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
+ if (!rfbdev)
+ return -ENOMEM;
+
+ rfbdev->rdev = rdev;
+ rdev->mode_info.rfbdev = rfbdev;
+ rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+ drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+ rdev->num_crtc,
+ RADEONFB_CONN_LIMIT);
+ drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+ drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
}
-EXPORT_SYMBOL(radeonfb_remove);
-MODULE_LICENSE("GPL");
+
+void radeon_fbdev_fini(struct radeon_device *rdev)
+{
+ if (!rdev->mode_info.rfbdev)
+ return;
+
+ radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+ kfree(rdev->mode_info.rfbdev);
+ rdev->mode_info.rfbdev = NULL;
+}
+
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+{
+ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+}
+
+int radeon_fbdev_total_size(struct radeon_device *rdev)
+{
+ struct radeon_bo *robj;
+ int size = 0;
+
+ robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
+ size += radeon_bo_size(robj);
+ return size;
+}
+
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+{
+ if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
+ return true;
+ return false;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index d90f95b405c..b1f9a81b5d1 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -58,7 +58,6 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
radeon_fence_ring_emit(rdev, fence);
fence->emited = true;
- fence->timeout = jiffies + ((2000 * HZ) / 1000);
list_del(&fence->list);
list_add_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
@@ -71,15 +70,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
struct list_head *i, *n;
uint32_t seq;
bool wake = false;
+ unsigned long cjiffies;
- if (rdev == NULL) {
- return true;
- }
- if (rdev->shutdown) {
- return true;
- }
seq = RREG32(rdev->fence_drv.scratch_reg);
- rdev->fence_drv.last_seq = seq;
+ if (seq != rdev->fence_drv.last_seq) {
+ rdev->fence_drv.last_seq = seq;
+ rdev->fence_drv.last_jiffies = jiffies;
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ } else {
+ cjiffies = jiffies;
+ if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
+ cjiffies -= rdev->fence_drv.last_jiffies;
+ if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+ /* update the timeout */
+ rdev->fence_drv.last_timeout -= cjiffies;
+ } else {
+ /* the 500ms timeout is elapsed we should test
+ * for GPU lockup
+ */
+ rdev->fence_drv.last_timeout = 1;
+ }
+ } else {
+ /* wrap around update last jiffies, we will just wait
+ * a little longer
+ */
+ rdev->fence_drv.last_jiffies = cjiffies;
+ }
+ return false;
+ }
n = NULL;
list_for_each(i, &rdev->fence_drv.emited) {
fence = list_entry(i, struct radeon_fence, list);
@@ -171,9 +189,8 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
- unsigned long cur_jiffies;
- unsigned long timeout;
- bool expired = false;
+ unsigned long irq_flags, timeout;
+ u32 seq;
int r;
if (fence == NULL) {
@@ -184,21 +201,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (radeon_fence_signaled(fence)) {
return 0;
}
-
+ timeout = rdev->fence_drv.last_timeout;
retry:
- cur_jiffies = jiffies;
- timeout = HZ / 100;
- if (time_after(fence->timeout, cur_jiffies)) {
- timeout = fence->timeout - cur_jiffies;
- }
-
+ /* save current sequence used to check for GPU lockup */
+ seq = rdev->fence_drv.last_seq;
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev);
- if (unlikely(r < 0))
+ if (unlikely(r < 0)) {
return r;
+ }
} else {
radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
@@ -206,38 +220,36 @@ retry:
radeon_irq_kms_sw_irq_put(rdev);
}
if (unlikely(!radeon_fence_signaled(fence))) {
- if (unlikely(r == 0)) {
- expired = true;
+ /* we were interrupted for some reason and fence isn't
+ * isn't signaled yet, resume wait
+ */
+ if (r) {
+ timeout = r;
+ goto retry;
}
- if (unlikely(expired)) {
- timeout = 1;
- if (time_after(cur_jiffies, fence->timeout)) {
- timeout = cur_jiffies - fence->timeout;
- }
- timeout = jiffies_to_msecs(timeout);
- if (timeout > 500) {
- DRM_ERROR("fence(%p:0x%08X) %lums timeout "
- "going to reset GPU\n",
- fence, fence->seq, timeout);
- radeon_gpu_reset(rdev);
- WREG32(rdev->fence_drv.scratch_reg, fence->seq);
- }
+ /* don't protect read access to rdev->fence_drv.last_seq
+ * if we experiencing a lockup the value doesn't change
+ */
+ if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+ /* good news we believe it's a lockup */
+ WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+ /* FIXME: what should we do ? marking everyone
+ * as signaled for now
+ */
+ rdev->gpu_lockup = true;
+ r = radeon_gpu_reset(rdev);
+ if (r)
+ return r;
+ WREG32(rdev->fence_drv.scratch_reg, fence->seq);
+ rdev->gpu_lockup = false;
}
+ timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ rdev->fence_drv.last_jiffies = jiffies;
+ write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
goto retry;
}
- if (unlikely(expired)) {
- rdev->fence_drv.count_timeout++;
- cur_jiffies = jiffies;
- timeout = 1;
- if (time_after(cur_jiffies, fence->timeout)) {
- timeout = cur_jiffies - fence->timeout;
- }
- timeout = jiffies_to_msecs(timeout);
- DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
- fence, fence->seq, timeout);
- DRM_ERROR("last signaled fence(0x%08X)\n",
- rdev->fence_drv.last_seq);
- }
return 0;
}
@@ -333,7 +345,6 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
INIT_LIST_HEAD(&rdev->fence_drv.created);
INIT_LIST_HEAD(&rdev->fence_drv.emited);
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
- rdev->fence_drv.count_timeout = 0;
init_waitqueue_head(&rdev->fence_drv.queue);
rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
deleted file mode 100644
index 3d4d84e078a..00000000000
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- */
-#ifndef RADEON_FIXED_H
-#define RADEON_FIXED_H
-
-typedef union rfixed {
- u32 full;
-} fixed20_12;
-
-
-#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
-#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
-#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
-#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
-#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
-#define fixed_init(A) { .full = rfixed_const((A)) }
-#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
-#define rfixed_trunc(A) ((A).full >> 12)
-
-static inline u32 rfixed_floor(fixed20_12 A)
-{
- u32 non_frac = rfixed_trunc(A);
-
- return rfixed_const(non_frac);
-}
-
-static inline u32 rfixed_ceil(fixed20_12 A)
-{
- u32 non_frac = rfixed_trunc(A);
-
- if (A.full > rfixed_const(non_frac))
- return rfixed_const(non_frac + 1);
- else
- return rfixed_const(non_frac);
-}
-
-static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
-{
- u64 tmp = ((u64)A.full << 13);
-
- do_div(tmp, B.full);
- tmp += 1;
- tmp /= 2;
- return lower_32_bits(tmp);
-}
-#endif
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 1770d3c07fd..e65b90317fa 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -173,7 +173,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int i, j;
if (!rdev->gart.ready) {
- DRM_ERROR("trying to bind memory to unitialized GART !\n");
+ WARN(1, "trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
t = offset / RADEON_GPU_PAGE_SIZE;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ef92d147d8f..a72a3ee5d69 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,6 +44,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
radeon_bo_unref(&robj);
}
+
+ drm_gem_object_release(gobj);
+ kfree(gobj);
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -158,8 +161,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
args->vram_visible = rdev->mc.real_vram_size;
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
- if (rdev->fbdev_rbo)
- args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+ args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
RADEON_IB_POOL_SIZE*64*1024;
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a212041e8b0..059bfa4098d 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
#include "drmP.h"
+#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
@@ -55,7 +56,7 @@ static void radeon_hotplug_work_func(struct work_struct *work)
radeon_connector_hotplug(connector);
}
/* Just fire off a uevent and let userspace tell us what to do */
- drm_sysfs_hotplug_event(dev);
+ drm_helper_hpd_irq_event(dev);
}
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
@@ -67,6 +68,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
+ rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
for (i = 0; i < 6; i++)
@@ -96,6 +98,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
}
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
+ rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
for (i = 0; i < 6; i++)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d3657dcfdd2..04068352ccd 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -98,11 +98,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_info *info;
+ struct radeon_mode_info *minfo = &rdev->mode_info;
uint32_t *value_ptr;
uint32_t value;
+ struct drm_crtc *crtc;
+ int i, found;
info = data;
value_ptr = (uint32_t *)((unsigned long)info->value);
+ value = *value_ptr;
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
value = dev->pci_device;
@@ -116,6 +120,20 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_ACCEL_WORKING:
value = rdev->accel_working;
break;
+ case RADEON_INFO_CRTC_FROM_ID:
+ for (i = 0, found = 0; i < rdev->num_crtc; i++) {
+ crtc = (struct drm_crtc *)minfo->crtcs[i];
+ if (crtc && crtc->base.id == value) {
+ value = i;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ DRM_DEBUG("unknown crtc id %d\n", value);
+ return -EINVAL;
+ }
+ break;
default:
DRM_DEBUG("Invalid request %d\n", info->request);
return -EINVAL;
@@ -165,7 +183,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
- if (crtc < 0 || crtc > 1) {
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
@@ -177,7 +195,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
- if (crtc < 0 || crtc > 1) {
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
@@ -191,7 +209,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
- if (crtc < 0 || crtc > 1) {
+ if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return;
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 88865e38fe3..e1e5255396a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -26,7 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
-#include "radeon_fixed.h"
+#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
@@ -314,6 +314,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
+ radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
else {
@@ -335,6 +338,9 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
}
+ radeon_crtc->enabled = false;
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
break;
}
}
@@ -966,6 +972,12 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 2441cca7d77..5a13b3eeef1 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -116,8 +116,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -217,27 +215,14 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- /* adjust pm to upcoming mode change */
- radeon_pm_compute_clocks(rdev);
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
/* get the native mode for LVDS */
- if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
- struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
- int mode_id = adjusted_mode->base.id;
- *adjusted_mode = *native_mode;
- adjusted_mode->hdisplay = mode->hdisplay;
- adjusted_mode->vdisplay = mode->vdisplay;
- adjusted_mode->crtc_hdisplay = mode->hdisplay;
- adjusted_mode->crtc_vdisplay = mode->vdisplay;
- adjusted_mode->base.id = mode_id;
- }
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
return true;
}
@@ -294,8 +279,6 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -482,8 +465,6 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -650,8 +631,6 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -860,8 +839,6 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
- /* adjust pm to dpms change */
- radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 0b8e32776b1..67358baf28b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -34,11 +34,12 @@
#include <drm_mode.h>
#include <drm_edid.h>
#include <drm_dp_helper.h>
+#include <drm_fixed.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
-#include "radeon_fixed.h"
+struct radeon_bo;
struct radeon_device;
#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
@@ -65,6 +66,16 @@ enum radeon_tv_std {
TV_STD_PAL_N,
};
+enum radeon_hpd_id {
+ RADEON_HPD_1 = 0,
+ RADEON_HPD_2,
+ RADEON_HPD_3,
+ RADEON_HPD_4,
+ RADEON_HPD_5,
+ RADEON_HPD_6,
+ RADEON_HPD_NONE = 0xff,
+};
+
/* radeon gpio-based i2c
* 1. "mask" reg and bits
* grabs the gpio pins for software use
@@ -84,7 +95,7 @@ struct radeon_i2c_bus_rec {
/* id used by atom */
uint8_t i2c_id;
/* id used by atom */
- uint8_t hpd_id;
+ enum radeon_hpd_id hpd;
/* can be used with hw i2c engine */
bool hw_capable;
/* uses multi-media i2c engine */
@@ -202,6 +213,8 @@ enum radeon_dvo_chip {
DVO_SIL1178,
};
+struct radeon_fbdev;
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
@@ -218,6 +231,9 @@ struct radeon_mode_info {
struct drm_property *tmds_pll_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
+
+ /* pointer to fbdev info structure */
+ struct radeon_fbdev *rfbdev;
};
#define MAX_H_CODE_TIMING_LEN 32
@@ -339,6 +355,7 @@ struct radeon_encoder {
enum radeon_rmx_type rmx_type;
struct drm_display_mode native_mode;
void *enc_priv;
+ int audio_polling_active;
int hdmi_offset;
int hdmi_config_offset;
int hdmi_audio_workaround;
@@ -363,16 +380,6 @@ struct radeon_gpio_rec {
u32 mask;
};
-enum radeon_hpd_id {
- RADEON_HPD_NONE = 0,
- RADEON_HPD_1,
- RADEON_HPD_2,
- RADEON_HPD_3,
- RADEON_HPD_4,
- RADEON_HPD_5,
- RADEON_HPD_6,
-};
-
struct radeon_hpd {
enum radeon_hpd_id hpd;
u8 plugged_state;
@@ -532,11 +539,10 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj);
-
-int radeonfb_probe(struct drm_device *dev);
+void radeon_framebuffer_init(struct drm_device *dev,
+ struct radeon_framebuffer *rfb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_gem_object *obj);
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
@@ -558,6 +564,8 @@ extern int radeon_static_clocks_init(struct drm_device *dev);
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc);
/* legacy tv */
@@ -573,4 +581,13 @@ void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+
+/* fbdev layer */
+int radeon_fbdev_init(struct radeon_device *rdev);
+void radeon_fbdev_fini(struct radeon_device *rdev);
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_total_size(struct radeon_device *rdev);
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 122774742bd..d5b9373ce06 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -112,9 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
+ mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, 0, 0, !kernel, NULL, size,
&radeon_ttm_bo_destroy);
+ mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(rdev->dev,
@@ -166,11 +168,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
+ struct radeon_device *rdev;
if ((*bo) == NULL)
return;
+ rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
+ mutex_lock(&rdev->vram_mutex);
ttm_bo_unref(&tbo);
+ mutex_unlock(&rdev->vram_mutex);
if (tbo == NULL)
*bo = NULL;
}
@@ -192,7 +198,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
}
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@@ -216,7 +222,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
@@ -295,6 +301,7 @@ int radeon_bo_list_reserve(struct list_head *head)
r = radeon_bo_reserve(lobj->bo, false);
if (unlikely(r != 0))
return r;
+ lobj->reserved = true;
}
return 0;
}
@@ -305,7 +312,7 @@ void radeon_bo_list_unreserve(struct list_head *head)
list_for_each_entry(lobj, head, list) {
/* only unreserve object we successfully reserved */
- if (radeon_bo_is_reserved(lobj->bo))
+ if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
radeon_bo_unreserve(lobj->bo);
}
}
@@ -316,6 +323,9 @@ int radeon_bo_list_validate(struct list_head *head)
struct radeon_bo *bo;
int r;
+ list_for_each_entry(lobj, head, list) {
+ lobj->reserved = false;
+ }
r = radeon_bo_list_reserve(head);
if (unlikely(r != 0)) {
return r;
@@ -331,7 +341,7 @@ int radeon_bo_list_validate(struct list_head *head)
lobj->rdomain);
}
r = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
+ true, false, false);
if (unlikely(r))
return r;
}
@@ -499,11 +509,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
radeon_bo_check_tiling(rbo, 0, 1);
}
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
+ struct radeon_device *rdev;
struct radeon_bo *rbo;
+ unsigned long offset, size;
+ int r;
+
if (!radeon_ttm_bo_is_radeon_bo(bo))
- return;
+ return 0;
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
+ rdev = rbo->rdev;
+ if (bo->mem.mem_type == TTM_PL_VRAM) {
+ size = bo->mem.num_pages << PAGE_SHIFT;
+ offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ if ((offset + size) > rdev->mc.visible_vram_size) {
+ /* hurrah the memory is not visible ! */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+ if (unlikely(r != 0))
+ return r;
+ offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ /* this should not happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ }
+ }
+ return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 7ab43de1e24..353998dc2c0 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index a4b57493aa7..a8d162c6f82 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,164 +23,122 @@
#include "drmP.h"
#include "radeon.h"
#include "avivod.h"
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200
+#define RADEON_WAIT_IDLE_TIMEOUT 200
+static void radeon_dynpm_idle_work_handler(struct work_struct *work);
+static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
+static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
-static void radeon_pm_idle_work_handler(struct work_struct *work);
-static int radeon_debugfs_pm_init(struct radeon_device *rdev);
-
-static const char *pm_state_names[4] = {
- "PM_STATE_DISABLED",
- "PM_STATE_MINIMUM",
- "PM_STATE_PAUSED",
- "PM_STATE_ACTIVE"
-};
-static const char *pm_state_types[5] = {
- "Default",
- "Powersave",
- "Battery",
- "Balanced",
- "Performance",
-};
+#define ACPI_AC_CLASS "ac_adapter"
-static void radeon_print_power_mode_info(struct radeon_device *rdev)
+#ifdef CONFIG_ACPI
+static int radeon_acpi_event(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
{
- int i, j;
- bool is_default;
+ struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
+ struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
- DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
- for (i = 0; i < rdev->pm.num_power_states; i++) {
- if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
- is_default = true;
+ if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+ if (power_supply_is_system_supplied() > 0)
+ DRM_DEBUG("pm: AC\n");
else
- is_default = false;
- DRM_INFO("State %d %s %s\n", i,
- pm_state_types[rdev->pm.power_state[i].type],
- is_default ? "(default)" : "");
- if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
- DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
- DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
- for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
- if (rdev->flags & RADEON_IS_IGP)
- DRM_INFO("\t\t%d engine: %d\n",
- j,
- rdev->pm.power_state[i].clock_info[j].sclk * 10);
- else
- DRM_INFO("\t\t%d engine/memory: %d/%d\n",
- j,
- rdev->pm.power_state[i].clock_info[j].sclk * 10,
- rdev->pm.power_state[i].clock_info[j].mclk * 10);
+ DRM_DEBUG("pm: DC\n");
+
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (rdev->pm.profile == PM_PROFILE_AUTO) {
+ mutex_lock(&rdev->pm.mutex);
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
}
}
+
+ return NOTIFY_OK;
}
+#endif
-static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type type)
+static void radeon_pm_update_profile(struct radeon_device *rdev)
{
- int i, j;
- enum radeon_pm_state_type wanted_types[2];
- int wanted_count;
-
- switch (type) {
- case POWER_STATE_TYPE_DEFAULT:
- default:
- return rdev->pm.default_power_state;
- case POWER_STATE_TYPE_POWERSAVE:
- if (rdev->flags & RADEON_IS_MOBILITY) {
- wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
- wanted_types[1] = POWER_STATE_TYPE_BATTERY;
- wanted_count = 2;
- } else {
- wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
- wanted_count = 1;
- }
+ switch (rdev->pm.profile) {
+ case PM_PROFILE_DEFAULT:
+ rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
break;
- case POWER_STATE_TYPE_BATTERY:
- if (rdev->flags & RADEON_IS_MOBILITY) {
- wanted_types[0] = POWER_STATE_TYPE_BATTERY;
- wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
- wanted_count = 2;
+ case PM_PROFILE_AUTO:
+ if (power_supply_is_system_supplied() > 0) {
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
} else {
- wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
- wanted_count = 1;
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
}
break;
- case POWER_STATE_TYPE_BALANCED:
- case POWER_STATE_TYPE_PERFORMANCE:
- wanted_types[0] = type;
- wanted_count = 1;
+ case PM_PROFILE_LOW:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+ else
+ rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
break;
- }
-
- for (i = 0; i < wanted_count; i++) {
- for (j = 0; j < rdev->pm.num_power_states; j++) {
- if (rdev->pm.power_state[j].type == wanted_types[i])
- return &rdev->pm.power_state[j];
- }
- }
-
- return rdev->pm.default_power_state;
-}
-
-static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
- struct radeon_power_state *power_state,
- enum radeon_pm_clock_mode_type type)
-{
- switch (type) {
- case POWER_MODE_TYPE_DEFAULT:
- default:
- return power_state->default_clock_mode;
- case POWER_MODE_TYPE_LOW:
- return &power_state->clock_info[0];
- case POWER_MODE_TYPE_MID:
- if (power_state->num_clock_modes > 2)
- return &power_state->clock_info[1];
+ case PM_PROFILE_HIGH:
+ if (rdev->pm.active_crtc_count > 1)
+ rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
else
- return &power_state->clock_info[0];
+ rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
break;
- case POWER_MODE_TYPE_HIGH:
- return &power_state->clock_info[power_state->num_clock_modes - 1];
}
+ if (rdev->pm.active_crtc_count == 0) {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
+ } else {
+ rdev->pm.requested_power_state_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
+ rdev->pm.requested_clock_mode_index =
+ rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
+ }
}
-static void radeon_get_power_state(struct radeon_device *rdev,
- enum radeon_pm_action action)
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
{
- switch (action) {
- case PM_ACTION_MINIMUM:
- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
- rdev->pm.requested_clock_mode =
- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
- break;
- case PM_ACTION_DOWNCLOCK:
- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
- rdev->pm.requested_clock_mode =
- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
- break;
- case PM_ACTION_UPCLOCK:
- rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
- rdev->pm.requested_clock_mode =
- radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
- break;
- case PM_ACTION_NONE:
- default:
- DRM_ERROR("Requested mode for not defined action\n");
+ struct radeon_bo *bo, *n;
+
+ if (list_empty(&rdev->gem.objects))
return;
+
+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ ttm_bo_unmap_virtual(&bo->tbo);
}
- DRM_INFO("Requested: e: %d m: %d p: %d\n",
- rdev->pm.requested_clock_mode->sclk,
- rdev->pm.requested_clock_mode->mclk,
- rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+
+ if (rdev->gart.table.vram.robj)
+ ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
+
+ if (rdev->stollen_vga_memory)
+ ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
+
+ if (rdev->r600_blit.shader_obj)
+ ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
}
-static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
+static void radeon_sync_with_vblank(struct radeon_device *rdev)
{
if (rdev->pm.active_crtcs) {
rdev->pm.vblank_sync = false;
@@ -192,73 +150,332 @@ static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
static void radeon_set_power_state(struct radeon_device *rdev)
{
- /* if *_clock_mode are the same, *_power_state are as well */
- if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
+ u32 sclk, mclk;
+
+ if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+ (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
return;
- DRM_INFO("Setting: e: %d m: %d p: %d\n",
- rdev->pm.requested_clock_mode->sclk,
- rdev->pm.requested_clock_mode->mclk,
- rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
-
- /* set pcie lanes */
- /* TODO */
-
- /* set voltage */
- /* TODO */
-
- /* set engine clock */
- radeon_sync_with_vblank(rdev);
- radeon_pm_debug_check_in_vbl(rdev, false);
- radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
- radeon_pm_debug_check_in_vbl(rdev, true);
-
-#if 0
- /* set memory clock */
- if (rdev->asic->set_memory_clock) {
- radeon_sync_with_vblank(rdev);
- radeon_pm_debug_check_in_vbl(rdev, false);
- radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
- radeon_pm_debug_check_in_vbl(rdev, true);
+ if (radeon_gui_idle(rdev)) {
+ sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].sclk;
+ if (sclk > rdev->clock.default_sclk)
+ sclk = rdev->clock.default_sclk;
+
+ mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+ clock_info[rdev->pm.requested_clock_mode_index].mclk;
+ if (mclk > rdev->clock.default_mclk)
+ mclk = rdev->clock.default_mclk;
+
+ /* voltage, pcie lanes, etc.*/
+ radeon_pm_misc(rdev);
+
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ radeon_sync_with_vblank(rdev);
+
+ if (!radeon_pm_in_vbl(rdev))
+ return;
+
+ radeon_pm_prepare(rdev);
+ /* set engine clock */
+ if (sclk != rdev->pm.current_sclk) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_engine_clock(rdev, sclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_sclk = sclk;
+ DRM_DEBUG("Setting: e: %d\n", sclk);
+ }
+
+ /* set memory clock */
+ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_memory_clock(rdev, mclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.current_mclk = mclk;
+ DRM_DEBUG("Setting: m: %d\n", mclk);
+ }
+ radeon_pm_finish(rdev);
+ } else {
+ /* set engine clock */
+ if (sclk != rdev->pm.current_sclk) {
+ radeon_sync_with_vblank(rdev);
+ radeon_pm_prepare(rdev);
+ radeon_set_engine_clock(rdev, sclk);
+ radeon_pm_finish(rdev);
+ rdev->pm.current_sclk = sclk;
+ DRM_DEBUG("Setting: e: %d\n", sclk);
+ }
+ /* set memory clock */
+ if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ radeon_sync_with_vblank(rdev);
+ radeon_pm_prepare(rdev);
+ radeon_set_memory_clock(rdev, mclk);
+ radeon_pm_finish(rdev);
+ rdev->pm.current_mclk = mclk;
+ DRM_DEBUG("Setting: m: %d\n", mclk);
+ }
+ }
+
+ rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
+ rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
+ } else
+ DRM_DEBUG("pm: GUI not idle!!!\n");
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+ int i;
+
+ mutex_lock(&rdev->ddev->struct_mutex);
+ mutex_lock(&rdev->vram_mutex);
+ mutex_lock(&rdev->cp.mutex);
+
+ /* gui idle int has issues on older chips it seems */
+ if (rdev->family >= CHIP_R600) {
+ if (rdev->irq.installed) {
+ /* wait for GPU idle */
+ rdev->pm.gui_idle = false;
+ rdev->irq.gui_idle = true;
+ radeon_irq_set(rdev);
+ wait_event_interruptible_timeout(
+ rdev->irq.idle_queue, rdev->pm.gui_idle,
+ msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
+ rdev->irq.gui_idle = false;
+ radeon_irq_set(rdev);
+ }
+ } else {
+ if (rdev->cp.ready) {
+ struct radeon_fence *fence;
+ radeon_ring_alloc(rdev, 64);
+ radeon_fence_create(rdev, &fence);
+ radeon_fence_emit(rdev, fence);
+ radeon_ring_commit(rdev);
+ radeon_fence_wait(fence, false);
+ radeon_fence_unref(&fence);
+ }
}
-#endif
+ radeon_unmap_vram_bos(rdev);
+
+ if (rdev->irq.installed) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->pm.active_crtcs & (1 << i)) {
+ rdev->pm.req_vblank |= (1 << i);
+ drm_vblank_get(rdev->ddev, i);
+ }
+ }
+ }
+
+ radeon_set_power_state(rdev);
+
+ if (rdev->irq.installed) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->pm.req_vblank & (1 << i)) {
+ rdev->pm.req_vblank &= ~(1 << i);
+ drm_vblank_put(rdev->ddev, i);
+ }
+ }
+ }
+
+ /* update display watermarks based on new power state */
+ radeon_update_bandwidth_info(rdev);
+ if (rdev->pm.active_crtc_count)
+ radeon_bandwidth_update(rdev);
+
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+
+ mutex_unlock(&rdev->cp.mutex);
+ mutex_unlock(&rdev->vram_mutex);
+ mutex_unlock(&rdev->ddev->struct_mutex);
+}
+
+static ssize_t radeon_get_pm_profile(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ int cp = rdev->pm.profile;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (cp == PM_PROFILE_AUTO) ? "auto" :
+ (cp == PM_PROFILE_LOW) ? "low" :
+ (cp == PM_PROFILE_HIGH) ? "high" : "default");
+}
+
+static ssize_t radeon_set_pm_profile(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (strncmp("default", buf, strlen("default")) == 0)
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ else if (strncmp("auto", buf, strlen("auto")) == 0)
+ rdev->pm.profile = PM_PROFILE_AUTO;
+ else if (strncmp("low", buf, strlen("low")) == 0)
+ rdev->pm.profile = PM_PROFILE_LOW;
+ else if (strncmp("high", buf, strlen("high")) == 0)
+ rdev->pm.profile = PM_PROFILE_HIGH;
+ else {
+ DRM_ERROR("invalid power profile!\n");
+ goto fail;
+ }
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ }
+fail:
+ mutex_unlock(&rdev->pm.mutex);
+
+ return count;
+}
+
+static ssize_t radeon_get_pm_method(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ int pm = rdev->pm.pm_method;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
+}
+
+static ssize_t radeon_set_pm_method(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+
+
+ if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.pm_method = PM_METHOD_DYNPM;
+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ mutex_unlock(&rdev->pm.mutex);
+ } else if (strncmp("profile", buf, strlen("profile")) == 0) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ /* disable dynpm */
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ mutex_unlock(&rdev->pm.mutex);
+ } else {
+ DRM_ERROR("invalid power method!\n");
+ goto fail;
+ }
+ radeon_pm_compute_clocks(rdev);
+fail:
+ return count;
+}
+
+static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
+static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
- rdev->pm.current_power_state = rdev->pm.requested_power_state;
- rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
+void radeon_pm_suspend(struct radeon_device *rdev)
+{
+ mutex_lock(&rdev->pm.mutex);
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+ rdev->pm.current_power_state_index = -1;
+ rdev->pm.current_clock_mode_index = -1;
+ rdev->pm.current_sclk = 0;
+ rdev->pm.current_mclk = 0;
+ mutex_unlock(&rdev->pm.mutex);
+}
+
+void radeon_pm_resume(struct radeon_device *rdev)
+{
+ radeon_pm_compute_clocks(rdev);
}
int radeon_pm_init(struct radeon_device *rdev)
{
- rdev->pm.state = PM_STATE_DISABLED;
- rdev->pm.planned_action = PM_ACTION_NONE;
- rdev->pm.downclocked = false;
+ int ret;
+ /* default to profile method */
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ rdev->pm.dynpm_can_upclock = true;
+ rdev->pm.dynpm_can_downclock = true;
+ rdev->pm.current_sclk = 0;
+ rdev->pm.current_mclk = 0;
if (rdev->bios) {
if (rdev->is_atom_bios)
radeon_atombios_get_power_modes(rdev);
else
radeon_combios_get_power_modes(rdev);
- radeon_print_power_mode_info(rdev);
+ radeon_pm_init_profile(rdev);
+ rdev->pm.current_power_state_index = -1;
+ rdev->pm.current_clock_mode_index = -1;
}
- if (radeon_debugfs_pm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for PM!\n");
- }
+ if (rdev->pm.num_power_states > 1) {
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ mutex_unlock(&rdev->pm.mutex);
+ }
+
+ /* where's the best place to put these? */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
+
+#ifdef CONFIG_ACPI
+ rdev->acpi_nb.notifier_call = radeon_acpi_event;
+ register_acpi_notifier(&rdev->acpi_nb);
+#endif
+ INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
- INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
+ if (radeon_debugfs_pm_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for PM!\n");
+ }
- if (radeon_dynpm != -1 && radeon_dynpm) {
- rdev->pm.state = PM_STATE_PAUSED;
- DRM_INFO("radeon: dynamic power management enabled\n");
+ DRM_INFO("radeon: power management initialized\n");
}
- DRM_INFO("radeon: power management initialized\n");
-
return 0;
}
void radeon_pm_fini(struct radeon_device *rdev)
{
+ if (rdev->pm.num_power_states > 1) {
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ rdev->pm.profile = PM_PROFILE_DEFAULT;
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ /* cancel work */
+ cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+ /* reset default clocks */
+ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ radeon_pm_set_clocks(rdev);
+ }
+ mutex_unlock(&rdev->pm.mutex);
+
+ device_remove_file(rdev->dev, &dev_attr_power_profile);
+ device_remove_file(rdev->dev, &dev_attr_power_method);
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&rdev->acpi_nb);
+#endif
+ }
+
if (rdev->pm.i2c_bus)
radeon_i2c_destroy(rdev->pm.i2c_bus);
}
@@ -266,146 +483,167 @@ void radeon_pm_fini(struct radeon_device *rdev)
void radeon_pm_compute_clocks(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
- struct drm_connector *connector;
+ struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
- int count = 0;
- if (rdev->pm.state == PM_STATE_DISABLED)
+ if (rdev->pm.num_power_states < 2)
return;
mutex_lock(&rdev->pm.mutex);
rdev->pm.active_crtcs = 0;
- list_for_each_entry(connector,
- &ddev->mode_config.connector_list, head) {
- if (connector->encoder &&
- connector->encoder->crtc &&
- connector->dpms != DRM_MODE_DPMS_OFF) {
- radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
+ rdev->pm.active_crtc_count = 0;
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
- ++count;
+ rdev->pm.active_crtc_count++;
}
}
- if (count > 1) {
- if (rdev->pm.state == PM_STATE_ACTIVE) {
- cancel_delayed_work(&rdev->pm.idle_work);
-
- rdev->pm.state = PM_STATE_PAUSED;
- rdev->pm.planned_action = PM_ACTION_UPCLOCK;
- if (rdev->pm.downclocked)
- radeon_pm_set_clocks(rdev);
-
- DRM_DEBUG("radeon: dynamic power management deactivated\n");
- }
- } else if (count == 1) {
- /* TODO: Increase clocks if needed for current mode */
-
- if (rdev->pm.state == PM_STATE_MINIMUM) {
- rdev->pm.state = PM_STATE_ACTIVE;
- rdev->pm.planned_action = PM_ACTION_UPCLOCK;
- radeon_pm_set_clocks(rdev);
-
- queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
- }
- else if (rdev->pm.state == PM_STATE_PAUSED) {
- rdev->pm.state = PM_STATE_ACTIVE;
- queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
- msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
- DRM_DEBUG("radeon: dynamic power management activated\n");
- }
- }
- else { /* count == 0 */
- if (rdev->pm.state != PM_STATE_MINIMUM) {
- cancel_delayed_work(&rdev->pm.idle_work);
-
- rdev->pm.state = PM_STATE_MINIMUM;
- rdev->pm.planned_action = PM_ACTION_MINIMUM;
- radeon_pm_set_clocks(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ radeon_pm_update_profile(rdev);
+ radeon_pm_set_clocks(rdev);
+ } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+ if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
+ if (rdev->pm.active_crtc_count > 1) {
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+ rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+
+ DRM_DEBUG("radeon: dynamic power management deactivated\n");
+ }
+ } else if (rdev->pm.active_crtc_count == 1) {
+ /* TODO: Increase clocks if needed for current mode */
+
+ if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ DRM_DEBUG("radeon: dynamic power management activated\n");
+ }
+ } else { /* count == 0 */
+ if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
+ cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+ rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
+ radeon_pm_get_dynpm_state(rdev);
+ radeon_pm_set_clocks(rdev);
+ }
+ }
}
}
mutex_unlock(&rdev->pm.mutex);
}
-static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+static bool radeon_pm_in_vbl(struct radeon_device *rdev)
{
- u32 stat_crtc1 = 0, stat_crtc2 = 0;
+ u32 stat_crtc = 0, vbl = 0, position = 0;
bool in_vbl = true;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 2)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 3)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 4)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 5)) {
+ vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+ position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+ EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
+ }
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
+ position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
+ position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
+ }
+ if (position < vbl && position > 1)
+ in_vbl = false;
+ } else {
if (rdev->pm.active_crtcs & (1 << 0)) {
- stat_crtc1 = RREG32(D1CRTC_STATUS);
- if (!(stat_crtc1 & 1))
+ stat_crtc = RREG32(RADEON_CRTC_STATUS);
+ if (!(stat_crtc & 1))
in_vbl = false;
}
if (rdev->pm.active_crtcs & (1 << 1)) {
- stat_crtc2 = RREG32(D2CRTC_STATUS);
- if (!(stat_crtc2 & 1))
+ stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+ if (!(stat_crtc & 1))
in_vbl = false;
}
}
- if (in_vbl == false)
- DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
- stat_crtc2, finish ? "exit" : "entry");
- return in_vbl;
-}
-static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
-{
- /*radeon_fence_wait_last(rdev);*/
- switch (rdev->pm.planned_action) {
- case PM_ACTION_UPCLOCK:
- rdev->pm.downclocked = false;
- break;
- case PM_ACTION_DOWNCLOCK:
- rdev->pm.downclocked = true;
- break;
- case PM_ACTION_MINIMUM:
- break;
- case PM_ACTION_NONE:
- DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
- break;
- }
- radeon_set_power_state(rdev);
- rdev->pm.planned_action = PM_ACTION_NONE;
+ if (position < vbl && position > 1)
+ in_vbl = false;
+
+ return in_vbl;
}
-static void radeon_pm_set_clocks(struct radeon_device *rdev)
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
{
- radeon_get_power_state(rdev, rdev->pm.planned_action);
- mutex_lock(&rdev->cp.mutex);
+ u32 stat_crtc = 0;
+ bool in_vbl = radeon_pm_in_vbl(rdev);
- if (rdev->pm.active_crtcs & (1 << 0)) {
- rdev->pm.req_vblank |= (1 << 0);
- drm_vblank_get(rdev->ddev, 0);
- }
- if (rdev->pm.active_crtcs & (1 << 1)) {
- rdev->pm.req_vblank |= (1 << 1);
- drm_vblank_get(rdev->ddev, 1);
- }
- radeon_pm_set_clocks_locked(rdev);
- if (rdev->pm.req_vblank & (1 << 0)) {
- rdev->pm.req_vblank &= ~(1 << 0);
- drm_vblank_put(rdev->ddev, 0);
- }
- if (rdev->pm.req_vblank & (1 << 1)) {
- rdev->pm.req_vblank &= ~(1 << 1);
- drm_vblank_put(rdev->ddev, 1);
- }
-
- mutex_unlock(&rdev->cp.mutex);
+ if (in_vbl == false)
+ DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc,
+ finish ? "exit" : "entry");
+ return in_vbl;
}
-static void radeon_pm_idle_work_handler(struct work_struct *work)
+static void radeon_dynpm_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev;
+ int resched;
rdev = container_of(work, struct radeon_device,
- pm.idle_work.work);
+ pm.dynpm_idle_work.work);
+ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
mutex_lock(&rdev->pm.mutex);
- if (rdev->pm.state == PM_STATE_ACTIVE) {
+ if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
unsigned long irq_flags;
int not_processed = 0;
@@ -421,35 +659,40 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (not_processed >= 3) { /* should upclock */
- if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
- rdev->pm.planned_action = PM_ACTION_NONE;
- } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
- rdev->pm.downclocked) {
- rdev->pm.planned_action =
- PM_ACTION_UPCLOCK;
- rdev->pm.action_timeout = jiffies +
+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+ rdev->pm.dynpm_can_upclock) {
+ rdev->pm.dynpm_planned_action =
+ DYNPM_ACTION_UPCLOCK;
+ rdev->pm.dynpm_action_timeout = jiffies +
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
}
} else if (not_processed == 0) { /* should downclock */
- if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
- rdev->pm.planned_action = PM_ACTION_NONE;
- } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
- !rdev->pm.downclocked) {
- rdev->pm.planned_action =
- PM_ACTION_DOWNCLOCK;
- rdev->pm.action_timeout = jiffies +
+ if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
+ rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+ rdev->pm.dynpm_can_downclock) {
+ rdev->pm.dynpm_planned_action =
+ DYNPM_ACTION_DOWNCLOCK;
+ rdev->pm.dynpm_action_timeout = jiffies +
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
}
}
- if (rdev->pm.planned_action != PM_ACTION_NONE &&
- jiffies > rdev->pm.action_timeout) {
+ /* Note, radeon_pm_set_clocks is called with static_switch set
+ * to false since we want to wait for vbl to avoid flicker.
+ */
+ if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
+ jiffies > rdev->pm.dynpm_action_timeout) {
+ radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
}
}
mutex_unlock(&rdev->pm.mutex);
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
- queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
+ queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
@@ -464,7 +707,6 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index eabbc9cf30a..c332f46340d 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -553,7 +553,6 @@
# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16)
#define RADEON_CRTC2_CRNT_FRAME 0x0314
#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318
-#define RADEON_CRTC2_STATUS 0x03fc
#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310
#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */
#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */
@@ -995,6 +994,7 @@
# define RADEON_FP_DETECT_MASK (1 << 4)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
# define RADEON_FP2_DETECT_MASK (1 << 10)
+# define RADEON_GUI_IDLE_MASK (1 << 19)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
@@ -1006,6 +1006,8 @@
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_FP2_DETECT_STAT (1 << 10)
# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
+# define RADEON_GUI_IDLE_STAT (1 << 19)
+# define RADEON_GUI_IDLE_STAT_ACK (1 << 19)
# define RADEON_SW_INT_FIRE (1 << 26)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index f6e1e8d4d98..261e98a276d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -219,24 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
int r;
+ struct radeon_bo *robj;
if (!rdev->ib_pool.ready) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
radeon_ib_bogus_cleanup(rdev);
+ robj = rdev->ib_pool.robj;
+ rdev->ib_pool.robj = NULL;
+ mutex_unlock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.robj) {
- r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (robj) {
+ r = radeon_bo_reserve(robj, false);
if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->ib_pool.robj);
- radeon_bo_unpin(rdev->ib_pool.robj);
- radeon_bo_unreserve(rdev->ib_pool.robj);
+ radeon_bo_kunmap(robj);
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
}
- radeon_bo_unref(&rdev->ib_pool.robj);
- rdev->ib_pool.robj = NULL;
+ radeon_bo_unref(&robj);
}
- mutex_unlock(&rdev->ib_pool.mutex);
}
@@ -258,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev)
}
}
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
{
int r;
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
- mutex_lock(&rdev->cp.mutex);
while (ndw > (rdev->cp.ring_free_dw - 1)) {
radeon_ring_free_size(rdev);
if (ndw < rdev->cp.ring_free_dw) {
break;
}
r = radeon_fence_wait_next(rdev);
- if (r) {
- mutex_unlock(&rdev->cp.mutex);
+ if (r)
return r;
- }
}
rdev->cp.count_dw = ndw;
rdev->cp.wptr_old = rdev->cp.wptr;
return 0;
}
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+{
+ int r;
+
+ mutex_lock(&rdev->cp.mutex);
+ r = radeon_ring_alloc(rdev, ndw);
+ if (r) {
+ mutex_unlock(&rdev->cp.mutex);
+ return r;
+ }
+ return 0;
+}
+
+void radeon_ring_commit(struct radeon_device *rdev)
{
unsigned count_dw_pad;
unsigned i;
@@ -295,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev)
}
DRM_MEMORYBARRIER();
radeon_cp_commit(rdev);
+}
+
+void radeon_ring_unlock_commit(struct radeon_device *rdev)
+{
+ radeon_ring_commit(rdev);
mutex_unlock(&rdev->cp.mutex);
}
@@ -344,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
void radeon_ring_fini(struct radeon_device *rdev)
{
int r;
+ struct radeon_bo *ring_obj;
mutex_lock(&rdev->cp.mutex);
- if (rdev->cp.ring_obj) {
- r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ ring_obj = rdev->cp.ring_obj;
+ rdev->cp.ring = NULL;
+ rdev->cp.ring_obj = NULL;
+ mutex_unlock(&rdev->cp.mutex);
+
+ if (ring_obj) {
+ r = radeon_bo_reserve(ring_obj, false);
if (likely(r == 0)) {
- radeon_bo_kunmap(rdev->cp.ring_obj);
- radeon_bo_unpin(rdev->cp.ring_obj);
- radeon_bo_unreserve(rdev->cp.ring_obj);
+ radeon_bo_kunmap(ring_obj);
+ radeon_bo_unpin(ring_obj);
+ radeon_bo_unreserve(ring_obj);
}
- radeon_bo_unref(&rdev->cp.ring_obj);
- rdev->cp.ring = NULL;
- rdev->cp.ring_obj = NULL;
+ radeon_bo_unref(&ring_obj);
}
- mutex_unlock(&rdev->cp.mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 40ab6d9c373..cc5316dcf58 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -424,7 +424,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
(*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
- offset = *cmd << 10;
+ offset = *cmd3 << 10;
if (radeon_check_and_fixup_offset
(dev_priv, file_priv, &offset)) {
DRM_ERROR("Invalid second packet offset\n");
@@ -2895,9 +2895,12 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
return rv;
rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
cmdbuf->bufsz);
- if (rv)
+ if (rv) {
+ drm_buffer_free(cmdbuf->buffer);
return rv;
- }
+ }
+ } else
+ goto done;
orig_nbox = cmdbuf->nbox;
@@ -2905,8 +2908,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
int temp;
temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
- if (cmdbuf->bufsz != 0)
- drm_buffer_free(cmdbuf->buffer);
+ drm_buffer_free(cmdbuf->buffer);
return temp;
}
@@ -3012,16 +3014,15 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
}
}
- if (cmdbuf->bufsz != 0)
- drm_buffer_free(cmdbuf->buffer);
+ drm_buffer_free(cmdbuf->buffer);
+ done:
DRM_DEBUG("DONE\n");
COMMIT_RING();
return 0;
err:
- if (cmdbuf->bufsz != 0)
- drm_buffer_free(cmdbuf->buffer);
+ drm_buffer_free(cmdbuf->buffer);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d031b686308..e9918d88f5b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -33,6 +33,7 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+#include <ttm/ttm_page_alloc.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
@@ -162,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
(unsigned)type);
return -EINVAL;
}
- man->io_offset = rdev->mc.agp_base;
- man->io_size = rdev->mc.gtt_size;
- man->io_addr = NULL;
if (!rdev->ddev->agp->cant_use_aperture)
- man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
- TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- } else
-#endif
- {
- man->io_offset = 0;
- man->io_size = 0;
- man->io_addr = NULL;
}
+#endif
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- man->io_addr = NULL;
- man->io_offset = rdev->mc.aper_base;
- man->io_size = rdev->mc.aper_size;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -244,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
- bool evict, int no_wait,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+ bool evict, int no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
@@ -290,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
- evict, no_wait, new_mem);
+ evict, no_wait_reserve, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -317,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait);
+ interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
@@ -331,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -349,7 +338,8 @@ out_cleanup:
}
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@@ -369,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
+ r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -394,8 +384,9 @@ out_cleanup:
}
static int radeon_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible, bool no_wait,
- struct ttm_mem_reg *new_mem)
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -422,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible,
- no_wait, new_mem);
+ no_wait_reserve, no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible,
- no_wait, new_mem);
+ no_wait_reserve, no_wait_gpu, new_mem);
} else {
- r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
+ r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
-
return r;
}
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_TT:
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ /* RADEON_IS_AGP is set only if AGP is active */
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = rdev->mc.agp_base;
+ mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+ }
+#endif
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ /* check if it's visible */
+ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+ mem->bus.base = rdev->mc.aper_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
@@ -479,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
.sync_obj_ref = &radeon_sync_obj_ref,
.move_notify = &radeon_bo_move_notify,
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+ .io_mem_reserve = &radeon_ttm_io_mem_reserve,
+ .io_mem_free = &radeon_ttm_io_mem_free,
};
int radeon_ttm_init(struct radeon_device *rdev)
@@ -571,13 +607,17 @@ static const struct vm_operations_struct *ttm_vm_ops = NULL;
static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
+ struct radeon_device *rdev;
int r;
- bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ bo = (struct ttm_buffer_object *)vma->vm_private_data;
if (bo == NULL) {
return VM_FAULT_NOPAGE;
}
+ rdev = radeon_get_rdev(bo->bdev);
+ mutex_lock(&rdev->vram_mutex);
r = ttm_vm_ops->fault(vma, vmf);
+ mutex_unlock(&rdev->vram_mutex);
return r;
}
@@ -745,8 +785,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
unsigned i;
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -763,7 +803,13 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
}
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
+ /* Add ttm page pool to debugfs */
+ sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i].data = NULL;
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
#endif
return 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 1a41cb268b7..9e4240b3bf0 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -243,8 +243,6 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
void rs400_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs400 ? */
- r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs400_mc_wait_for_idle(rdev)) {
@@ -433,7 +431,7 @@ int rs400_resume(struct radeon_device *rdev)
/* setup MC before calling post tables */
rs400_mc_program(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -458,7 +456,6 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -497,7 +494,7 @@ int rs400_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -509,8 +506,6 @@ int rs400_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize memory controller */
rs400_mc_init(rdev);
/* Fence driver */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index a81bc7a21e1..79887cac5b5 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,135 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+void rs600_pm_misc(struct radeon_device *rdev)
+{
+ int requested_index = rdev->pm.requested_power_state_index;
+ struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+ struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+ u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
+ u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
+
+ if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+ if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp |= voltage->gpio.mask;
+ else
+ tmp &= ~(voltage->gpio.mask);
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ } else {
+ tmp = RREG32(voltage->gpio.reg);
+ if (voltage->active_high)
+ tmp &= ~voltage->gpio.mask;
+ else
+ tmp |= voltage->gpio.mask;
+ WREG32(voltage->gpio.reg, tmp);
+ if (voltage->delay)
+ udelay(voltage->delay);
+ }
+ }
+
+ dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
+ dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
+ } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
+ }
+ } else {
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
+ dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
+ }
+ WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
+
+ dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+ dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
+ if (voltage->delay) {
+ dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
+ dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
+ } else
+ dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
+ } else
+ dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
+ WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
+
+ hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+ hdp_dyn_cntl &= ~HDP_FORCEON;
+ else
+ hdp_dyn_cntl |= HDP_FORCEON;
+ WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
+#if 0
+ /* mc_host_dyn seems to cause hangs from time to time */
+ mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
+ mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
+ else
+ mc_host_dyn_cntl |= MC_HOST_FORCEON;
+ WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
+#endif
+ dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
+ if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
+ dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
+ else
+ dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
+ WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
+
+ /* set pcie lanes */
+ if ((rdev->flags & RADEON_IS_PCIE) &&
+ !(rdev->flags & RADEON_IS_IGP) &&
+ rdev->asic->set_pcie_lanes &&
+ (ps->pcie_lanes !=
+ rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ radeon_set_pcie_lanes(rdev,
+ ps->pcie_lanes);
+ DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+ }
+}
+
+void rs600_pm_prepare(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* disable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
+void rs600_pm_finish(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 tmp;
+
+ /* enable any active CRTCs */
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+ tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+ }
+ }
+}
+
/* hpd for digital panel detect/disconnect */
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
@@ -147,6 +276,78 @@ void rs600_hpd_fini(struct radeon_device *rdev)
}
}
+void rs600_bm_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ /* disable bus mastering */
+ pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ mdelay(1);
+}
+
+int rs600_asic_reset(struct radeon_device *rdev)
+{
+ u32 status, tmp;
+
+ struct rv515_mc_save save;
+
+ /* Stops all mc clients */
+ rv515_mc_stop(rdev, &save);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ if (!G_000E40_GUI_ACTIVE(status)) {
+ return 0;
+ }
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* stop CP */
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+ WREG32(RADEON_CP_RB_RPTR_WR, 0);
+ WREG32(RADEON_CP_RB_WPTR, 0);
+ WREG32(RADEON_CP_RB_CNTL, tmp);
+ pci_save_state(rdev->pdev);
+ /* disable bus mastering */
+ rs600_bm_disable(rdev);
+ /* reset GA+VAP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ S_0000F0_SOFT_RESET_GA(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset CP */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* reset MC */
+ WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
+ RREG32(R_0000F0_RBBM_SOFT_RESET);
+ mdelay(500);
+ WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+ mdelay(1);
+ status = RREG32(R_000E40_RBBM_STATUS);
+ dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+ /* restore PCI & busmastering */
+ pci_restore_state(rdev->pdev);
+ /* Check if GPU is idle */
+ if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+ dev_err(rdev->dev, "failed to reset GPU\n");
+ rdev->gpu_lockup = true;
+ return -1;
+ }
+ rv515_mc_resume(rdev, &save);
+ dev_info(rdev->dev, "GPU reset succeed\n");
+ return 0;
+}
+
/*
* GART.
*/
@@ -310,6 +511,9 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
}
+ if (rdev->irq.gui_idle) {
+ tmp |= S_000040_GUI_IDLE(1);
+ }
if (rdev->irq.crtc_vblank_int[0]) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
@@ -332,9 +536,15 @@ int rs600_irq_set(struct radeon_device *rdev)
static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
- uint32_t irq_mask = ~C_000044_SW_INT;
+ uint32_t irq_mask = S_000044_SW_INT(1);
u32 tmp;
+ /* the interrupt works, but the status bit is permanently asserted */
+ if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
+ if (!rdev->irq.gui_idle_acked)
+ irq_mask |= S_000044_GUI_IDLE_STAT(1);
+ }
+
if (G_000044_DISPLAY_INT_STAT(irqs)) {
*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
@@ -382,6 +592,9 @@ int rs600_irq_process(struct radeon_device *rdev)
uint32_t r500_disp_int;
bool queue_hotplug = false;
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
+
status = rs600_irq_ack(rdev, &r500_disp_int);
if (!status && !r500_disp_int) {
return IRQ_NONE;
@@ -390,6 +603,12 @@ int rs600_irq_process(struct radeon_device *rdev)
/* SW interrupt */
if (G_000044_SW_INT(status))
radeon_fence_process(rdev);
+ /* GUI idle */
+ if (G_000040_GUI_IDLE(status)) {
+ rdev->irq.gui_idle_acked = true;
+ rdev->pm.gui_idle = true;
+ wake_up(&rdev->irq.idle_queue);
+ }
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 0);
@@ -411,6 +630,8 @@ int rs600_irq_process(struct radeon_device *rdev)
}
status = rs600_irq_ack(rdev, &r500_disp_int);
}
+ /* reset gui idle ack. the status bit is broken */
+ rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
@@ -454,7 +675,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev)
{
- r100_hdp_reset(rdev);
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
@@ -601,7 +821,7 @@ int rs600_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -626,7 +846,6 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -664,7 +883,7 @@ int rs600_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -676,8 +895,6 @@ int rs600_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize memory controller */
rs600_mc_init(rdev);
rs600_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index e52d2695510..a27c13ac47c 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -178,6 +178,52 @@
#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_000074_MC_IND_DATA 0x00000000
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_000134_HDP_FB_LOCATION 0x000134
#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
@@ -588,4 +634,38 @@
#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+/* PLL regs */
+#define GENERAL_PWRMGT 0x8
+#define GLOBAL_PWRMGT_EN (1 << 0)
+#define MOBILE_SU (1 << 2)
+#define DYN_PWRMGT_SCLK_LENGTH 0xc
+#define NORMAL_POWER_SCLK_HILEN(x) ((x) << 0)
+#define NORMAL_POWER_SCLK_LOLEN(x) ((x) << 4)
+#define REDUCED_POWER_SCLK_HILEN(x) ((x) << 8)
+#define REDUCED_POWER_SCLK_LOLEN(x) ((x) << 12)
+#define POWER_D1_SCLK_HILEN(x) ((x) << 16)
+#define POWER_D1_SCLK_LOLEN(x) ((x) << 20)
+#define STATIC_SCREEN_HILEN(x) ((x) << 24)
+#define STATIC_SCREEN_LOLEN(x) ((x) << 28)
+#define DYN_SCLK_VOL_CNTL 0xe
+#define IO_CG_VOLTAGE_DROP (1 << 0)
+#define VOLTAGE_DROP_SYNC (1 << 2)
+#define VOLTAGE_DELAY_SEL(x) ((x) << 3)
+#define HDP_DYN_CNTL 0x10
+#define HDP_FORCEON (1 << 0)
+#define MC_HOST_DYN_CNTL 0x1e
+#define MC_HOST_FORCEON (1 << 0)
+#define DYN_BACKBIAS_CNTL 0x29
+#define IO_CG_BACKBIAS_EN (1 << 0)
+
+/* mmreg */
+#define DOUT_POWER_MANAGEMENT_CNTL 0x7ee0
+#define PWRDN_WAIT_BUSY_OFF (1 << 0)
+#define PWRDN_WAIT_PWRSEQ_OFF (1 << 4)
+#define PWRDN_WAIT_PPLL_OFF (1 << 8)
+#define PWRUP_WAIT_PPLL_ON (1 << 12)
+#define PWRUP_WAIT_MEM_INIT_DONE (1 << 16)
+#define PM_ASSERT_RESET (1 << 20)
+#define PM_PWRDN_PPLL (1 << 24)
+
#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index bbf3da790fd..bcc33195ebc 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -48,8 +48,6 @@ static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
static void rs690_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs690 ? */
- r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs690_mc_wait_for_idle(rdev)) {
@@ -78,59 +76,59 @@ void rs690_pm_info(struct radeon_device *rdev)
/* Get various system informations from bios */
switch (crev) {
case 1:
- tmp.full = rfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock);
- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
- rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock));
- rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth);
+ tmp.full = dfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
+ rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
break;
case 2:
- tmp.full = rfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock);
- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq);
- rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
- rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+ tmp.full = dfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
+ rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
+ rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
break;
default:
- tmp.full = rfixed_const(100);
+ tmp.full = dfixed_const(100);
/* We assume the slower possible clock ie worst case */
/* DDR 333Mhz */
- rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
/* FIXME: system clock ? */
- rdev->pm.igp_system_mclk.full = rfixed_const(100);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
- rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+ rdev->pm.igp_system_mclk.full = dfixed_const(100);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
break;
}
} else {
- tmp.full = rfixed_const(100);
+ tmp.full = dfixed_const(100);
/* We assume the slower possible clock ie worst case */
/* DDR 333Mhz */
- rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+ rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
/* FIXME: system clock ? */
- rdev->pm.igp_system_mclk.full = rfixed_const(100);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
- rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+ rdev->pm.igp_system_mclk.full = dfixed_const(100);
+ rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
+ rdev->pm.igp_ht_link_width.full = dfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
}
/* Compute various bandwidth */
/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
- tmp.full = rfixed_const(4);
- rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp);
+ tmp.full = dfixed_const(4);
+ rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
* = ht_clk * ht_width / 5
*/
- tmp.full = rfixed_const(5);
- rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk,
+ tmp.full = dfixed_const(5);
+ rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
rdev->pm.igp_ht_link_width);
- rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp);
+ rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
if (tmp.full < rdev->pm.max_bandwidth.full) {
/* HT link is a limiting factor */
rdev->pm.max_bandwidth.full = tmp.full;
@@ -138,10 +136,10 @@ void rs690_pm_info(struct radeon_device *rdev)
/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
* = (sideport_clk * 14) / 10
*/
- tmp.full = rfixed_const(14);
- rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
- tmp.full = rfixed_const(10);
- rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
+ tmp.full = dfixed_const(14);
+ rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
+ tmp.full = dfixed_const(10);
+ rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
}
void rs690_mc_init(struct radeon_device *rdev)
@@ -241,20 +239,20 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
return;
}
- if (crtc->vsc.full > rfixed_const(2))
- wm->num_line_pair.full = rfixed_const(2);
+ if (crtc->vsc.full > dfixed_const(2))
+ wm->num_line_pair.full = dfixed_const(2);
else
- wm->num_line_pair.full = rfixed_const(1);
-
- b.full = rfixed_const(mode->crtc_hdisplay);
- c.full = rfixed_const(256);
- a.full = rfixed_div(b, c);
- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
- request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
- if (a.full < rfixed_const(4)) {
+ wm->num_line_pair.full = dfixed_const(1);
+
+ b.full = dfixed_const(mode->crtc_hdisplay);
+ c.full = dfixed_const(256);
+ a.full = dfixed_div(b, c);
+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+ if (a.full < dfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
}
/* Determine consumption rate
@@ -263,23 +261,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* vsc = vertical scaling ratio, defined as source/destination
* hsc = horizontal scaling ration, defined as source/destination
*/
- a.full = rfixed_const(mode->clock);
- b.full = rfixed_const(1000);
- a.full = rfixed_div(a, b);
- pclk.full = rfixed_div(b, a);
+ a.full = dfixed_const(mode->clock);
+ b.full = dfixed_const(1000);
+ a.full = dfixed_div(a, b);
+ pclk.full = dfixed_div(b, a);
if (crtc->rmx_type != RMX_OFF) {
- b.full = rfixed_const(2);
+ b.full = dfixed_const(2);
if (crtc->vsc.full > b.full)
b.full = crtc->vsc.full;
- b.full = rfixed_mul(b, crtc->hsc);
- c.full = rfixed_const(2);
- b.full = rfixed_div(b, c);
- consumption_time.full = rfixed_div(pclk, b);
+ b.full = dfixed_mul(b, crtc->hsc);
+ c.full = dfixed_const(2);
+ b.full = dfixed_div(b, c);
+ consumption_time.full = dfixed_div(pclk, b);
} else {
consumption_time.full = pclk.full;
}
- a.full = rfixed_const(1);
- wm->consumption_rate.full = rfixed_div(a, consumption_time);
+ a.full = dfixed_const(1);
+ wm->consumption_rate.full = dfixed_div(a, consumption_time);
/* Determine line time
@@ -287,18 +285,18 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* LineTime = total number of horizontal pixels
* pclk = pixel clock period(ns)
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- line_time.full = rfixed_mul(a, pclk);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ line_time.full = dfixed_mul(a, pclk);
/* Determine active time
* ActiveTime = time of active region of display within one line,
* hactive = total number of horizontal active pixels
* htotal = total number of horizontal pixels
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->active_time.full = rfixed_mul(line_time, b);
- wm->active_time.full = rfixed_div(wm->active_time, a);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->active_time.full = dfixed_mul(line_time, b);
+ wm->active_time.full = dfixed_div(wm->active_time, a);
/* Maximun bandwidth is the minimun bandwidth of all component */
rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
@@ -306,8 +304,8 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
rdev->pm.sideport_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
- read_delay_latency.full = rfixed_div(read_delay_latency,
+ read_delay_latency.full = dfixed_const(370 * 800 * 1000);
+ read_delay_latency.full = dfixed_div(read_delay_latency,
rdev->pm.igp_sideport_mclk);
} else {
if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
@@ -316,23 +314,23 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
rdev->pm.ht_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
- read_delay_latency.full = rfixed_const(5000);
+ read_delay_latency.full = dfixed_const(5000);
}
/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
- a.full = rfixed_const(16);
- rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a);
- a.full = rfixed_const(1000);
- rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk);
+ a.full = dfixed_const(16);
+ rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
+ a.full = dfixed_const(1000);
+ rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
* to the LB which consists of pipeline delay and inter chunk gap
* sclk = system clock(ns)
*/
- a.full = rfixed_const(256 * 13);
- chunk_time.full = rfixed_mul(rdev->pm.sclk, a);
- a.full = rfixed_const(10);
- chunk_time.full = rfixed_div(chunk_time, a);
+ a.full = dfixed_const(256 * 13);
+ chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
+ a.full = dfixed_const(10);
+ chunk_time.full = dfixed_div(chunk_time, a);
/* Determine the worst case latency
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -342,13 +340,13 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
* which consists of pipeline delay and inter chunk gap
*/
- if (rfixed_trunc(wm->num_line_pair) > 1) {
- a.full = rfixed_const(3);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ if (dfixed_trunc(wm->num_line_pair) > 1) {
+ a.full = dfixed_const(3);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
} else {
- a.full = rfixed_const(2);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ a.full = dfixed_const(2);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
}
@@ -362,34 +360,34 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
* of data to the LB which consists of
* pipeline delay and inter chunk gap
*/
- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
tolerable_latency.full = line_time.full;
} else {
- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = line_time.full - tolerable_latency.full;
}
/* We assume worst case 32bits (4 bytes) */
- wm->dbpp.full = rfixed_const(4 * 8);
+ wm->dbpp.full = dfixed_const(4 * 8);
/* Determine the maximum priority mark
* width = viewport width in pixels
*/
- a.full = rfixed_const(16);
- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+ a.full = dfixed_const(16);
+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
- estimated_width.full = rfixed_div(estimated_width, consumption_time);
- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
- wm->priority_mark.full = rfixed_const(10);
+ estimated_width.full = dfixed_div(estimated_width, consumption_time);
+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+ wm->priority_mark.full = dfixed_const(10);
} else {
- a.full = rfixed_const(16);
- wm->priority_mark.full = rfixed_div(estimated_width, a);
- wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+ a.full = dfixed_const(16);
+ wm->priority_mark.full = dfixed_div(estimated_width, a);
+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
@@ -441,58 +439,58 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
if (mode0 && mode1) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- if (rfixed_trunc(wm1.dbpp) > 64)
- b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1.num_line_pair.full;
a.full += b.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
@@ -502,32 +500,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -537,32 +535,32 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
S_006D4C_D2MODE_PRIORITY_B_OFF(1));
} else {
- if (rfixed_trunc(wm1.dbpp) > 64)
- a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1.num_line_pair.full;
- fill_rate.full = rfixed_div(wm1.sclk, a);
+ fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
@@ -653,7 +651,7 @@ int rs690_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -678,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -717,7 +714,7 @@ int rs690_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -729,8 +726,6 @@ int rs690_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize memory controller */
rs690_mc_init(rdev);
rv515_debugfs(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 9035121f4b5..7d9a7b0a180 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -147,16 +147,11 @@ void rv515_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
- r100_hdp_reset(rdev);
- r100_rb2d_reset(rdev);
-
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"reseting GPU. Bad things might happen.\n");
}
-
rv515_vga_render_disable(rdev);
-
r420_pipes_init(rdev);
gb_pipe_select = RREG32(0x402C);
tmp = RREG32(0x170C);
@@ -174,91 +169,6 @@ void rv515_gpu_init(struct radeon_device *rdev)
}
}
-int rv515_ga_reset(struct radeon_device *rdev)
-{
- uint32_t tmp;
- bool reinit_cp;
- int i;
-
- reinit_cp = rdev->cp.ready;
- rdev->cp.ready = false;
- for (i = 0; i < rdev->usec_timeout; i++) {
- WREG32(CP_CSQ_MODE, 0);
- WREG32(CP_CSQ_CNTL, 0);
- WREG32(RBBM_SOFT_RESET, 0x32005);
- (void)RREG32(RBBM_SOFT_RESET);
- udelay(200);
- WREG32(RBBM_SOFT_RESET, 0);
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RBBM_STATUS);
- if (tmp & ((1 << 20) | (1 << 26))) {
- DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
- /* GA still busy soft reset it */
- WREG32(0x429C, 0x200);
- WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(0x43E0, 0);
- WREG32(0x43E4, 0);
- WREG32(0x24AC, 0);
- }
- /* Wait to prevent race in RBBM_STATUS */
- mdelay(1);
- tmp = RREG32(RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- break;
- }
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(RBBM_STATUS);
- if (!(tmp & ((1 << 20) | (1 << 26)))) {
- DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
- tmp);
- DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
- DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
- DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
- if (reinit_cp) {
- return r100_cp_init(rdev, rdev->cp.ring_size);
- }
- return 0;
- }
- DRM_UDELAY(1);
- }
- tmp = RREG32(RBBM_STATUS);
- DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
- return -1;
-}
-
-int rv515_gpu_reset(struct radeon_device *rdev)
-{
- uint32_t status;
-
- /* reset order likely matter */
- status = RREG32(RBBM_STATUS);
- /* reset HDP */
- r100_hdp_reset(rdev);
- /* reset rb2d */
- if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
- r100_rb2d_reset(rdev);
- }
- /* reset GA */
- if (status & ((1 << 20) | (1 << 26))) {
- rv515_ga_reset(rdev);
- }
- /* reset CP */
- status = RREG32(RBBM_STATUS);
- if (status & (1 << 16)) {
- r100_cp_reset(rdev);
- }
- /* Check if GPU is idle */
- status = RREG32(RBBM_STATUS);
- if (status & (1 << 31)) {
- DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
- return -1;
- }
- DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
- return 0;
-}
-
static void rv515_vram_get_type(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -335,7 +245,7 @@ static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
tmp = RREG32(0x2140);
seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
- radeon_gpu_reset(rdev);
+ radeon_asic_reset(rdev);
tmp = RREG32(0x425C);
seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
return 0;
@@ -503,7 +413,7 @@ int rv515_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@@ -535,7 +445,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -573,7 +482,7 @@ int rv515_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
- if (radeon_gpu_reset(rdev)) {
+ if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@@ -584,8 +493,6 @@ int rv515_init(struct radeon_device *rdev)
return -EINVAL;
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
- /* Initialize power management */
- radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
@@ -885,20 +792,20 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
return;
}
- if (crtc->vsc.full > rfixed_const(2))
- wm->num_line_pair.full = rfixed_const(2);
+ if (crtc->vsc.full > dfixed_const(2))
+ wm->num_line_pair.full = dfixed_const(2);
else
- wm->num_line_pair.full = rfixed_const(1);
-
- b.full = rfixed_const(mode->crtc_hdisplay);
- c.full = rfixed_const(256);
- a.full = rfixed_div(b, c);
- request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
- request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
- if (a.full < rfixed_const(4)) {
+ wm->num_line_pair.full = dfixed_const(1);
+
+ b.full = dfixed_const(mode->crtc_hdisplay);
+ c.full = dfixed_const(256);
+ a.full = dfixed_div(b, c);
+ request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+ if (a.full < dfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
- wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
+ wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
}
/* Determine consumption rate
@@ -907,23 +814,23 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* vsc = vertical scaling ratio, defined as source/destination
* hsc = horizontal scaling ration, defined as source/destination
*/
- a.full = rfixed_const(mode->clock);
- b.full = rfixed_const(1000);
- a.full = rfixed_div(a, b);
- pclk.full = rfixed_div(b, a);
+ a.full = dfixed_const(mode->clock);
+ b.full = dfixed_const(1000);
+ a.full = dfixed_div(a, b);
+ pclk.full = dfixed_div(b, a);
if (crtc->rmx_type != RMX_OFF) {
- b.full = rfixed_const(2);
+ b.full = dfixed_const(2);
if (crtc->vsc.full > b.full)
b.full = crtc->vsc.full;
- b.full = rfixed_mul(b, crtc->hsc);
- c.full = rfixed_const(2);
- b.full = rfixed_div(b, c);
- consumption_time.full = rfixed_div(pclk, b);
+ b.full = dfixed_mul(b, crtc->hsc);
+ c.full = dfixed_const(2);
+ b.full = dfixed_div(b, c);
+ consumption_time.full = dfixed_div(pclk, b);
} else {
consumption_time.full = pclk.full;
}
- a.full = rfixed_const(1);
- wm->consumption_rate.full = rfixed_div(a, consumption_time);
+ a.full = dfixed_const(1);
+ wm->consumption_rate.full = dfixed_div(a, consumption_time);
/* Determine line time
@@ -931,27 +838,27 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* LineTime = total number of horizontal pixels
* pclk = pixel clock period(ns)
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- line_time.full = rfixed_mul(a, pclk);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ line_time.full = dfixed_mul(a, pclk);
/* Determine active time
* ActiveTime = time of active region of display within one line,
* hactive = total number of horizontal active pixels
* htotal = total number of horizontal pixels
*/
- a.full = rfixed_const(crtc->base.mode.crtc_htotal);
- b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->active_time.full = rfixed_mul(line_time, b);
- wm->active_time.full = rfixed_div(wm->active_time, a);
+ a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+ b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->active_time.full = dfixed_mul(line_time, b);
+ wm->active_time.full = dfixed_div(wm->active_time, a);
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
* to the LB which consists of pipeline delay and inter chunk gap
* sclk = system clock(Mhz)
*/
- a.full = rfixed_const(600 * 1000);
- chunk_time.full = rfixed_div(a, rdev->pm.sclk);
- read_delay_latency.full = rfixed_const(1000);
+ a.full = dfixed_const(600 * 1000);
+ chunk_time.full = dfixed_div(a, rdev->pm.sclk);
+ read_delay_latency.full = dfixed_const(1000);
/* Determine the worst case latency
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
@@ -961,9 +868,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* ChunkTime = time it takes the DCP to send one chunk of data to the LB
* which consists of pipeline delay and inter chunk gap
*/
- if (rfixed_trunc(wm->num_line_pair) > 1) {
- a.full = rfixed_const(3);
- wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
+ if (dfixed_trunc(wm->num_line_pair) > 1) {
+ a.full = dfixed_const(3);
+ wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
wm->worst_case_latency.full += read_delay_latency.full;
} else {
wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
@@ -979,34 +886,34 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* of data to the LB which consists of
* pipeline delay and inter chunk gap
*/
- if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
+ if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
tolerable_latency.full = line_time.full;
} else {
- tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
+ tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
- tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
+ tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
tolerable_latency.full = line_time.full - tolerable_latency.full;
}
/* We assume worst case 32bits (4 bytes) */
- wm->dbpp.full = rfixed_const(2 * 16);
+ wm->dbpp.full = dfixed_const(2 * 16);
/* Determine the maximum priority mark
* width = viewport width in pixels
*/
- a.full = rfixed_const(16);
- wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
- wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
- wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+ a.full = dfixed_const(16);
+ wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+ wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
- estimated_width.full = rfixed_div(estimated_width, consumption_time);
- if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+ estimated_width.full = dfixed_div(estimated_width, consumption_time);
+ if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
wm->priority_mark.full = wm->priority_mark_max.full;
} else {
- a.full = rfixed_const(16);
- wm->priority_mark.full = rfixed_div(estimated_width, a);
- wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+ a.full = dfixed_const(16);
+ wm->priority_mark.full = dfixed_div(estimated_width, a);
+ wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
@@ -1035,58 +942,58 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
if (mode0 && mode1) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- if (rfixed_trunc(wm1.dbpp) > 64)
- b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1.num_line_pair.full;
a.full += b.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
@@ -1096,32 +1003,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
- if (rfixed_trunc(wm0.dbpp) > 64)
- a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0.num_line_pair.full;
- fill_rate.full = rfixed_div(wm0.sclk, a);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm0.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm0.worst_case_latency,
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm0.worst_case_latency,
+ a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
- b.full = rfixed_const(16);
- priority_mark02.full = rfixed_div(a, b);
+ b.full = dfixed_const(16);
+ priority_mark02.full = dfixed_div(a, b);
}
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
- if (rfixed_trunc(priority_mark02) < 0)
+ if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
- d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
@@ -1129,32 +1036,32 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
} else {
- if (rfixed_trunc(wm1.dbpp) > 64)
- a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1.num_line_pair.full;
- fill_rate.full = rfixed_div(wm1.sclk, a);
+ fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
- b.full = rfixed_mul(b, wm1.active_time);
- a.full = rfixed_const(16);
- b.full = rfixed_div(b, a);
- a.full = rfixed_mul(wm1.worst_case_latency,
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_const(16);
+ b.full = dfixed_div(b, a);
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
- a.full = rfixed_mul(wm1.worst_case_latency,
+ a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
- b.full = rfixed_const(16 * 1000);
- priority_mark12.full = rfixed_div(a, b);
+ b.full = dfixed_const(16 * 1000);
+ priority_mark12.full = dfixed_div(a, b);
}
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
- if (rfixed_trunc(priority_mark12) < 0)
+ if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index fc216e49384..590309a710b 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -217,6 +217,52 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
+#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
+#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
+#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
+#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
+#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
+#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
+#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
+#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
+#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
+#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
+#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
+#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
+#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
+#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
+#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
+#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
+#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
+#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
+#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
+#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
+#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
+#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
+#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
+#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
+#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
+#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
+#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
+#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
+#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
+#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
+#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
+#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
+#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
+#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
+#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
+#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
+#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
+#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
+#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
+#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
+#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
+#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
+#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
+#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
+#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 97958a64df1..253f24aec03 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,10 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
+void rv770_pm_misc(struct radeon_device *rdev)
+{
+
+}
/*
* GART
@@ -237,7 +241,6 @@ void r700_cp_stop(struct radeon_device *rdev)
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
}
-
static int rv770_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -272,6 +275,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
return 0;
}
+void r700_cp_fini(struct radeon_device *rdev)
+{
+ r700_cp_stop(rdev);
+ radeon_ring_fini(rdev);
+}
/*
* Core functions
@@ -906,23 +914,12 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- /* FIXME remove this once we support unmappable VRAM */
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
- rdev->mc.real_vram_size = rdev->mc.aper_size;
- }
r600_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
}
-int rv770_gpu_reset(struct radeon_device *rdev)
-{
- /* FIXME: implement any rv770 specific bits */
- return r600_gpu_reset(rdev);
-}
-
static int rv770_startup(struct radeon_device *rdev)
{
int r;
@@ -1094,8 +1091,6 @@ int rv770_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
- /* Initialize power management */
- radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@@ -1132,7 +1127,7 @@ int rv770_init(struct radeon_device *rdev)
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -1164,9 +1159,8 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r600_blit_fini(rdev);
- r600_cp_fini(rdev);
+ r700_cp_fini(rdev);
r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bff6fc2524c..2d0c9ca484c 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -539,11 +539,10 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_savage_private_t *dev_priv;
- dev_priv = kmalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
+ dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
- memset(dev_priv, 0, sizeof(drm_savage_private_t));
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = (enum savage_family)chipset;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 1e138f5bae0..4256e200647 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,6 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index dd47b2a9a79..555ebb12ace 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
- printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
- printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
man->available_caching);
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
- bool evict, bool interruptible, bool no_wait)
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+ ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
- no_wait, mem);
+ no_wait_reserve, no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
if (ret)
goto out_err;
@@ -605,8 +604,22 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
}
EXPORT_SYMBOL(ttm_bo_unref);
+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+{
+ return cancel_delayed_work_sync(&bdev->wq);
+}
+EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
+
+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+{
+ if (resched)
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
+
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait)
+ bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +628,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
int ret = 0;
spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
@@ -631,6 +644,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
+ evict_mem.bus.io_reserved = false;
placement.fpfn = 0;
placement.lpfn = 0;
@@ -638,7 +652,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
@@ -650,7 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +684,8 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +702,11 @@ retry:
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
- if (likely(!no_wait))
+ if (likely(!no_wait_gpu))
ret = ttm_bo_wait_unreserved(bo, interruptible);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +728,7 @@ retry:
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- ret = ttm_bo_evict(bo, interruptible, no_wait);
+ ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +779,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ bool interruptible,
+ bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +802,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
}
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
- no_wait);
+ no_wait_reserve, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -855,7 +872,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@@ -952,7 +970,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
- interruptible, no_wait);
+ interruptible, no_wait_reserve, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
mem->mm_node->private = bo;
@@ -978,7 +996,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
@@ -992,20 +1011,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* instead of doing it here.
*/
spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
+ mem.bus.io_reserved = false;
/*
* Determine where to move the buffer.
*/
- ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
goto out_unlock;
- ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
@@ -1039,7 +1059,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait_reserve,
+ bool no_wait_gpu)
{
int ret;
@@ -1054,7 +1075,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
} else {
@@ -1153,6 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
+ bo->mem.bus.io_reserved = false;
bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1197,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
goto out_err;
}
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false, false);
if (ret)
goto out_err;
@@ -1249,7 +1271,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
if (ret) {
if (allow_errors) {
return ret;
@@ -1553,26 +1575,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return true;
}
-int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- unsigned long *bus_base,
- unsigned long *bus_offset, unsigned long *bus_size)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- *bus_size = 0;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
-
- if (ttm_mem_reg_is_pci(bdev, mem)) {
- *bus_offset = mem->mm_node->start << PAGE_SHIFT;
- *bus_size = mem->num_pages << PAGE_SHIFT;
- *bus_base = man->io_offset;
- }
-
- return 0;
-}
-
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1583,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
if (!bdev->dev_mapping)
return;
-
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+ ttm_mem_io_free(bdev, &bo->mem);
}
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
@@ -1716,40 +1718,12 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_wait);
-void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
-{
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
-}
-
-int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
- bool no_wait)
-{
- int ret;
-
- while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
- if (no_wait)
- return -EBUSY;
- else if (interruptible) {
- ret = wait_event_interruptible
- (bo->event_queue, atomic_read(&bo->reserved) == 0);
- if (unlikely(ret != 0))
- return ret;
- } else {
- wait_event(bo->event_queue,
- atomic_read(&bo->reserved) == 0);
- }
- }
- return 0;
-}
-
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
int ret = 0;
/*
- * Using ttm_bo_reserve instead of ttm_bo_block_reservation
- * makes sure the lru lists are updated.
+ * Using ttm_bo_reserve makes sure the lru lists are updated.
*/
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
@@ -1839,7 +1813,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
- false, false);
+ false, false, false);
if (unlikely(ret != 0))
goto out;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d764e82e799..13012a1f148 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ int ret;
+
+ if (!mem->bus.io_reserved) {
+ mem->bus.io_reserved = true;
+ ret = bdev->driver->io_mem_reserve(bdev, mem);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ if (bdev->driver->io_mem_reserve) {
+ if (mem->bus.io_reserved) {
+ mem->bus.io_reserved = false;
+ bdev->driver->io_mem_free(bdev, mem);
+ }
+ }
+}
+
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
int ret;
void *addr;
*virtual = NULL;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
- if (ret || bus_size == 0)
+ ret = ttm_mem_io_reserve(bdev, mem);
+ if (ret || !mem->bus.is_iomem)
return ret;
- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
- addr = (void *)(((u8 *) man->io_addr) + bus_offset);
- else {
+ if (mem->bus.addr) {
+ addr = mem->bus.addr;
+ } else {
if (mem->placement & TTM_PL_FLAG_WC)
- addr = ioremap_wc(bus_base + bus_offset, bus_size);
+ addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
- addr = ioremap_nocache(bus_base + bus_offset, bus_size);
- if (!addr)
+ addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+ if (!addr) {
+ ttm_mem_io_free(bdev, mem);
return -ENOMEM;
+ }
}
*virtual = addr;
return 0;
@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
man = &bdev->man[mem->mem_type];
- if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
+ if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
+ ttm_mem_io_free(bdev, mem);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
+ bool evict, bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
- unsigned long bus_base,
- unsigned long bus_offset,
- unsigned long bus_size,
+ unsigned long offset,
+ unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg *mem = &bo->mem;
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+ if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
- map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
+ map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC)
- map->virtual = ioremap_wc(bus_base + bus_offset,
- bus_size);
+ map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
else
- map->virtual = ioremap_nocache(bus_base + bus_offset,
- bus_size);
+ map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
+ unsigned long offset, size;
int ret;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
+ map->bo = bo;
if (num_pages > bo->num_pages)
return -EINVAL;
if (start_page > bo->num_pages)
@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
- ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
- &bus_offset, &bus_size);
+ ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
if (ret)
return ret;
- if (bus_size == 0) {
+ if (!bo->mem.bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
- bus_offset += start_page << PAGE_SHIFT;
- bus_size = num_pages << PAGE_SHIFT;
- return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+ offset = start_page << PAGE_SHIFT;
+ size = num_pages << PAGE_SHIFT;
+ return ttm_bo_ioremap(bo, offset, size, map);
}
}
EXPORT_SYMBOL(ttm_bo_kmap);
@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
+ ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
}
EXPORT_SYMBOL(ttm_bo_kunmap);
-int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
- unsigned long dst_offset,
- unsigned long *pfn, pgprot_t *prot)
-{
- struct ttm_mem_reg *mem = &bo->mem;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
- int ret;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
- &bus_size);
- if (ret)
- return -EINVAL;
- if (bus_size != 0)
- *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
- else
- if (!bo->ttm)
- return -EINVAL;
- else
- *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
- dst_offset >>
- PAGE_SHIFT));
- *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
- PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
-
- return 0;
-}
-
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
void *sync_obj_arg,
- bool evict, bool no_wait,
+ bool evict, bool no_wait_reserve,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8b8dd..fe6cb77899f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
unsigned long page_offset;
unsigned long page_last;
unsigned long pfn;
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
int ret;
int i;
- bool is_iomem;
unsigned long address = (unsigned long)vmf->virtual_address;
int retval = VM_FAULT_NOPAGE;
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
- if (bdev->driver->fault_reserve_notify)
- bdev->driver->fault_reserve_notify(bo);
+ if (bdev->driver->fault_reserve_notify) {
+ ret = bdev->driver->fault_reserve_notify(bo);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ set_need_resched();
+ case -ERESTARTSYS:
+ retval = VM_FAULT_NOPAGE;
+ goto out_unlock;
+ default:
+ retval = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+ }
/*
* Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spin_unlock(&bo->lock);
- ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
- &bus_size);
- if (unlikely(ret != 0)) {
+ ret = ttm_mem_io_reserve(bdev, &bo->mem);
+ if (ret) {
retval = VM_FAULT_SIGBUS;
goto out_unlock;
}
- is_iomem = (bus_size != 0);
-
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff;
page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* vma->vm_page_prot when the object changes caching policy, with
* the correct locks held.
*/
-
- if (is_iomem) {
+ if (bo->mem.bus.is_iomem) {
vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
vma->vm_page_prot);
} else {
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
-
- if (is_iomem)
- pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
- page_offset;
+ if (bo->mem.bus.is_iomem)
+ pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
else {
page = ttm_tt_get_page(ttm, page_offset);
if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
retval =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
goto out_unlock;
-
}
address += PAGE_SIZE;
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
static void ttm_bo_vm_close(struct vm_area_struct *vma)
{
- struct ttm_buffer_object *bo =
- (struct ttm_buffer_object *)vma->vm_private_data;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
ttm_bo_unref(&bo);
vma->vm_private_data = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 3d172ef04ee..de41e55a944 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -204,7 +204,6 @@ static int __ttm_vt_unlock(struct ttm_lock *lock)
lock->flags &= ~TTM_VT_LOCK;
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
- printk(KERN_INFO TTM_PFX "vt unlock.\n");
return ret;
}
@@ -265,10 +264,8 @@ int ttm_vt_lock(struct ttm_lock *lock,
ttm_lock_type, &ttm_vt_lock_remove, NULL);
if (ret)
(void)__ttm_vt_unlock(lock);
- else {
+ else
lock->vt_holder = tfile;
- printk(KERN_INFO TTM_PFX "vt lock.\n");
- }
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 801b702566e..e70ddd82dc0 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -27,6 +27,7 @@
#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
+#include "ttm/ttm_page_alloc.h"
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
"Zone %7s: Available graphics memory: %llu kiB.\n",
zone->name, (unsigned long long) zone->max_mem >> 10);
}
+ ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
unsigned int i;
struct ttm_mem_zone *zone;
+ /* let the page allocator first stop the shrink work. */
+ ttm_page_alloc_fini();
+
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
- }
+ }
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 00000000000..0d9a42c2394
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ * Pauli Nieminen <suokkos@gmail.com>
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+
+#include <asm/atomic.h>
+#include <asm/agp.h>
+
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION 16
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define PAGE_FREE_INTERVAL 1000
+
+/**
+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
+ *
+ * @lock: Protects the shared pool from concurrnet access. Must be used with
+ * irqsave/irqrestore variants because pool allocator maybe called from
+ * delayed work.
+ * @fill_lock: Prevent concurrent calls to fill.
+ * @list: Pool of free uc/wc pages for fast reuse.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @npages: Number of pages in pool.
+ */
+struct ttm_page_pool {
+ spinlock_t lock;
+ bool fill_lock;
+ struct list_head list;
+ int gfp_flags;
+ unsigned npages;
+ char *name;
+ unsigned long nfrees;
+ unsigned long nrefills;
+};
+
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialiazation to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+#define NUM_POOLS 4
+
+/**
+ * struct ttm_pool_manager - Holds memory pools for fst allocation
+ *
+ * Manager is read only object for pool code so it doesn't need locking.
+ *
+ * @free_interval: minimum number of jiffies between freeing pages from pool.
+ * @page_alloc_inited: reference counting for pool allocation.
+ * @work: Work that is used to shrink the pool. Work is only run when there is
+ * some pages to free.
+ * @small_allocation: Limit in number of pages what is small allocation.
+ *
+ * @pools: All pool objects in use.
+ **/
+struct ttm_pool_manager {
+ struct kobject kobj;
+ struct shrinker mm_shrink;
+ atomic_t page_alloc_inited;
+ struct ttm_pool_opts options;
+
+ union {
+ struct ttm_page_pool pools[NUM_POOLS];
+ struct {
+ struct ttm_page_pool wc_pool;
+ struct ttm_page_pool uc_pool;
+ struct ttm_page_pool wc_pool_dma32;
+ struct ttm_page_pool uc_pool_dma32;
+ } ;
+ };
+};
+
+static struct attribute ttm_page_pool_max = {
+ .name = "pool_max_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+ .name = "pool_small_allocation",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+ .name = "pool_allocation_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+ &ttm_page_pool_max,
+ &ttm_page_pool_small,
+ &ttm_page_pool_alloc_size,
+ NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ (void)m;
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj,
+ struct attribute *attr, const char *buffer, size_t size)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ printk(KERN_ERR "[ttm] Setting allocation size to %lu "
+ "is not allowed. Recomended size is "
+ "%lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ printk(KERN_WARNING "[ttm] Setting allocation size to "
+ "larger than %lu is not recomended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+ .show = &ttm_pool_show,
+ .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+ .release = &ttm_pool_kobj_release,
+ .sysfs_ops = &ttm_pool_sysfs_ops,
+ .default_attrs = ttm_pool_attrs,
+};
+
+static struct ttm_pool_manager _manager = {
+ .page_alloc_inited = ATOMIC_INIT(0)
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+#endif
+
+/**
+ * Select the right pool or requested caching state and ttm flags. */
+static struct ttm_page_pool *ttm_get_pool(int flags,
+ enum ttm_caching_state cstate)
+{
+ int pool_index;
+
+ if (cstate == tt_cached)
+ return NULL;
+
+ if (cstate == tt_wc)
+ pool_index = 0x0;
+ else
+ pool_index = 0x1;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ pool_index |= 0x2;
+
+ return &_manager.pools[pool_index];
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_pages_put(struct page *pages[], unsigned npages)
+{
+ unsigned i;
+ if (set_pages_array_wb(pages, npages))
+ printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
+ npages);
+ for (i = 0; i < npages; ++i)
+ __free_page(pages[i]);
+}
+
+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages -= freed_pages;
+ pool->nfrees += freed_pages;
+}
+
+/**
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+ **/
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+{
+ unsigned long irq_flags;
+ struct page *p;
+ struct page **pages_to_free;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages_to_free) {
+ printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
+ return 0;
+ }
+
+restart:
+ spin_lock_irqsave(&pool->lock, irq_flags);
+
+ list_for_each_entry_reverse(p, &pool->list, lru) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ pages_to_free[freed_pages++] = p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+ /* remove range of pages from the pool */
+ __list_del(p->lru.prev, &pool->list);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ ttm_pages_put(pages_to_free, freed_pages);
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall tough or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ __list_del(&p->lru, &pool->list);
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (freed_pages)
+ ttm_pages_put(pages_to_free, freed_pages);
+out:
+ kfree(pages_to_free);
+ return nr_free;
+}
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+ unsigned i;
+ int total = 0;
+ for (i = 0; i < NUM_POOLS; ++i)
+ total += _manager.pools[i].npages;
+
+ return total;
+}
+
+/**
+ * Calback for mm to request pool to reduce number of page held.
+ */
+static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
+{
+ static atomic_t start_pool = ATOMIC_INIT(0);
+ unsigned i;
+ unsigned pool_offset = atomic_add_return(1, &start_pool);
+ struct ttm_page_pool *pool;
+
+ pool_offset = pool_offset % NUM_POOLS;
+ /* select start pool in round robin fashion */
+ for (i = 0; i < NUM_POOLS; ++i) {
+ unsigned nr_free = shrink_pages;
+ if (shrink_pages == 0)
+ break;
+ pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
+ shrink_pages = ttm_page_pool_free(pool, nr_free);
+ }
+ /* return estimated number of unused pages in pool */
+ return ttm_pool_get_num_unused_pages();
+}
+
+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+ manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+ manager->mm_shrink.seeks = 1;
+ register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+ unregister_shrinker(&manager->mm_shrink);
+}
+
+static int ttm_set_pages_caching(struct page **pages,
+ enum ttm_caching_state cstate, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ switch (cstate) {
+ case tt_uncached:
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
+ cpages);
+ break;
+ case tt_wc:
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
+ cpages);
+ break;
+ default:
+ break;
+ }
+ return r;
+}
+
+/**
+ * Free pages the pages that failed to change the caching state. If there is
+ * any pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_handle_caching_state_failure(struct list_head *pages,
+ int ttm_flags, enum ttm_caching_state cstate,
+ struct page **failed_pages, unsigned cpages)
+{
+ unsigned i;
+ /* Failed pages has to be reed */
+ for (i = 0; i < cpages; ++i) {
+ list_del(&failed_pages[i]->lru);
+ __free_page(failed_pages[i]);
+ }
+}
+
+/**
+ * Allocate new pages with correct caching.
+ *
+ * This function is reentrant if caller updates count depending on number of
+ * pages returned in pages array.
+ */
+static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+ struct page **caching_array;
+ struct page *p;
+ int r = 0;
+ unsigned i, cpages;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+ /* allocate array for page caching change */
+ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+ if (!caching_array) {
+ printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+ return -ENOMEM;
+ }
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ p = alloc_page(gfp_flags);
+
+ if (!p) {
+ printk(KERN_ERR "[ttm] unable to get page %u\n", i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+
+#ifdef CONFIG_HIGHMEM
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+
+ r = ttm_set_pages_caching(caching_array,
+ cstate, cpages);
+ if (r) {
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+
+ list_add(&p->lru, pages);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(caching_array, cstate, cpages);
+ if (r)
+ ttm_handle_caching_state_failure(pages,
+ ttm_flags, cstate,
+ caching_array, cpages);
+ }
+out:
+ kfree(caching_array);
+
+ return r;
+}
+
+/**
+ * Fill the given pool if there isn't enough pages and requested number of
+ * pages is small.
+ */
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+ int ttm_flags, enum ttm_caching_state cstate, unsigned count,
+ unsigned long *irq_flags)
+{
+ struct page *p;
+ int r;
+ unsigned cpages = 0;
+ /**
+ * Only allow one pool fill operation at a time.
+ * If pool doesn't have enough pages for the allocation new pages are
+ * allocated from outside of pool.
+ */
+ if (pool->fill_lock)
+ return;
+
+ pool->fill_lock = true;
+
+ /* If allocation request is small and there is not enough
+ * pages in pool we fill the pool first */
+ if (count < _manager.options.small
+ && count > pool->npages) {
+ struct list_head new_pages;
+ unsigned alloc_size = _manager.options.alloc_size;
+
+ /**
+ * Can't change page caching if in irqsave context. We have to
+ * drop the pool->lock.
+ */
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+ INIT_LIST_HEAD(&new_pages);
+ r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
+ cstate, alloc_size);
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+
+ if (!r) {
+ list_splice(&new_pages, &pool->list);
+ ++pool->nrefills;
+ pool->npages += alloc_size;
+ } else {
+ printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
+ /* If we have any pages left put them to the pool. */
+ list_for_each_entry(p, &pool->list, lru) {
+ ++cpages;
+ }
+ list_splice(&new_pages, &pool->list);
+ pool->npages += cpages;
+ }
+
+ }
+ pool->fill_lock = false;
+}
+
+/**
+ * Cut count nubmer of pages from the pool and put them to return list
+ *
+ * @return count of pages still to allocate to fill the request.
+ */
+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+ struct list_head *pages, int ttm_flags,
+ enum ttm_caching_state cstate, unsigned count)
+{
+ unsigned long irq_flags;
+ struct list_head *p;
+ unsigned i;
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+
+ if (count >= pool->npages) {
+ /* take all pages from the pool */
+ list_splice_init(&pool->list, pages);
+ count -= pool->npages;
+ pool->npages = 0;
+ goto out;
+ }
+ /* find the last pages to include for requested number of pages. Split
+ * pool to begin and halves to reduce search space. */
+ if (count <= pool->npages/2) {
+ i = 0;
+ list_for_each(p, &pool->list) {
+ if (++i == count)
+ break;
+ }
+ } else {
+ i = pool->npages + 1;
+ list_for_each_prev(p, &pool->list) {
+ if (--i == count)
+ break;
+ }
+ }
+ /* Cut count number of pages from pool */
+ list_cut_position(pages, &pool->list, p);
+ pool->npages -= count;
+ count = 0;
+out:
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ return count;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+int ttm_get_pages(struct list_head *pages, int flags,
+ enum ttm_caching_state cstate, unsigned count)
+{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p = NULL;
+ int gfp_flags = 0;
+ int r;
+
+ /* set zero flag for page allocation if required */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ /* No pool for cached pages */
+ if (pool == NULL) {
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags |= GFP_DMA32;
+ else
+ gfp_flags |= __GFP_HIGHMEM;
+
+ for (r = 0; r < count; ++r) {
+ p = alloc_page(gfp_flags);
+ if (!p) {
+
+ printk(KERN_ERR "[ttm] unable to allocate page.");
+ return -ENOMEM;
+ }
+
+ list_add(&p->lru, pages);
+ }
+ return 0;
+ }
+
+
+ /* combine zero flag to pool flags */
+ gfp_flags |= pool->gfp_flags;
+
+ /* First we take pages from the pool */
+ count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+
+ /* clear the pages coming from the pool if requested */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ list_for_each_entry(p, pages, lru) {
+ clear_page(page_address(p));
+ }
+ }
+
+ /* If pool didn't have enough pages allocate new one. */
+ if (count > 0) {
+ /* ttm_alloc_new_pages doesn't reference pool so we can run
+ * multiple requests in parallel.
+ **/
+ r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+ if (r) {
+ /* If there is any pages in the list put them back to
+ * the pool. */
+ printk(KERN_ERR "[ttm] Failed to allocate extra pages "
+ "for large request.");
+ ttm_put_pages(pages, 0, flags, cstate);
+ return r;
+ }
+ }
+
+
+ return 0;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
+ enum ttm_caching_state cstate)
+{
+ unsigned long irq_flags;
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p, *tmp;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ __free_page(p);
+ }
+ /* Make the pages list empty */
+ INIT_LIST_HEAD(pages);
+ return;
+ }
+ if (page_count == 0) {
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ ++page_count;
+ }
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ list_splice_init(pages, &pool->list);
+ pool->npages += page_count;
+ /* Check that we don't go over the pool limit */
+ page_count = 0;
+ if (pool->npages > _manager.options.max_size) {
+ page_count = pool->npages - _manager.options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (page_count < NUM_PAGES_TO_ALLOC)
+ page_count = NUM_PAGES_TO_ALLOC;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ if (page_count)
+ ttm_page_pool_free(pool, page_count);
+}
+
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+ char *name)
+{
+ spin_lock_init(&pool->lock);
+ pool->fill_lock = false;
+ INIT_LIST_HEAD(&pool->list);
+ pool->npages = pool->nfrees = 0;
+ pool->gfp_flags = flags;
+ pool->name = name;
+}
+
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+ int ret;
+ if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
+ return 0;
+
+ printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
+
+ ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
+
+ ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
+
+ ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
+ "wc dma");
+
+ ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
+ "uc dma");
+
+ _manager.options.max_size = max_pages;
+ _manager.options.small = SMALL_ALLOCATION;
+ _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
+ ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
+ if (unlikely(ret != 0)) {
+ kobject_put(&_manager.kobj);
+ return ret;
+ }
+
+ ttm_pool_mm_shrink_init(&_manager);
+
+ return 0;
+}
+
+void ttm_page_alloc_fini()
+{
+ int i;
+
+ if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
+ return;
+
+ printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
+ ttm_pool_mm_shrink_fini(&_manager);
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
+
+ kobject_put(&_manager.kobj);
+}
+
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct ttm_page_pool *p;
+ unsigned i;
+ char *h[] = {"pool", "refills", "pages freed", "size"};
+ if (atomic_read(&_manager.page_alloc_inited) == 0) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%6s %12s %13s %8s\n",
+ h[0], h[1], h[2], h[3]);
+ for (i = 0; i < NUM_POOLS; ++i) {
+ p = &_manager.pools[i];
+
+ seq_printf(m, "%6s %12ld %13ld %8d\n",
+ p->name, p->nrefills,
+ p->nfrees, p->npages);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5fd5b8faeb..a7bab87a548 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -39,6 +39,7 @@
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
static int ttm_tt_swapin(struct ttm_tt *ttm);
@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
ttm->pages = NULL;
}
-static struct page *ttm_tt_alloc_page(unsigned page_flags)
-{
- gfp_t gfp_flags = GFP_USER;
-
- if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- if (page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags |= __GFP_DMA32;
- else
- gfp_flags |= __GFP_HIGHMEM;
-
- return alloc_page(gfp_flags);
-}
-
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
{
int write;
@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
{
struct page *p;
+ struct list_head h;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
int ret;
while (NULL == (p = ttm->pages[index])) {
- p = ttm_tt_alloc_page(ttm->page_flags);
- if (!p)
+ INIT_LIST_HEAD(&h);
+
+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+
+ if (ret != 0)
return NULL;
+ p = list_first_entry(&h, struct page, lru);
+
ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
if (unlikely(ret != 0))
goto out_err;
@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
if (ttm->caching_state == c_state)
return 0;
- if (c_state != tt_cached) {
- ret = ttm_tt_populate(ttm);
- if (unlikely(ret != 0))
- return ret;
+ if (ttm->state == tt_unpopulated) {
+ /* Change caching but don't populate */
+ ttm->caching_state = c_state;
+ return 0;
}
if (ttm->caching_state == tt_cached)
@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{
int i;
+ unsigned count = 0;
+ struct list_head h;
struct page *cur_page;
struct ttm_backend *be = ttm->be;
+ INIT_LIST_HEAD(&h);
+
if (be)
be->func->clear(be);
- (void)ttm_tt_set_caching(ttm, tt_cached);
for (i = 0; i < ttm->num_pages; ++i) {
+
cur_page = ttm->pages[i];
ttm->pages[i] = NULL;
if (cur_page) {
@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
"Leaking pages.\n");
ttm_mem_global_free_page(ttm->glob->mem_glob,
cur_page);
- __free_page(cur_page);
+ list_add(&cur_page->lru, &h);
+ count++;
}
}
+ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6ec04ac1245..6efac8117c9 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -75,7 +75,7 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
DRM_DEBUG("\n");
- if (fx->lock > VIA_NR_XVMC_LOCKS)
+ if (fx->lock >= VIA_NR_XVMC_LOCKS)
return -EFAULT;
lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 825ebe3d89d..c4f5114aee7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
- struct vmw_private *dev_priv =
- container_of(bdev, struct vmw_private, bdev);
-
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = 0;
- man->io_offset = dev_priv->vram_start;
- man->io_size = dev_priv->vram_size;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
- man->io_addr = NULL;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_WC;
break;
@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
vmw_dmabuf_gmr_unbind(bo);
}
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.is_iomem = false;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->vram_start;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ return 0;
+}
+
/**
* FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead.
@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = {
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref,
.move_notify = vmw_move_notify,
- .swap_notify = vmw_swap_notify
+ .swap_notify = vmw_swap_notify,
+ .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+ .io_mem_reserve = &vmw_ttm_io_mem_reserve,
+ .io_mem_free = &vmw_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0897359b3e4..dbd36b8910c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* Put BO in VRAM, only if there is space.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
if (unlikely(ret == -ERESTARTSYS))
return ret;
@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a93367041cd..7421aaad8d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,8 +559,13 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->pixmap.scan_align = 1;
#endif
- info->aperture_base = vmw_priv->vram_start;
- info->aperture_size = vmw_priv->vram_size;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto err_aper;
+ }
+ info->apertures->ranges[0].base = vmw_priv->vram_start;
+ info->apertures->ranges[0].size = vmw_priv->vram_size;
/*
* Dirty & Deferred IO
@@ -580,6 +585,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
err_defio:
fb_deferred_io_cleanup(info);
+err_aper:
ttm_bo_kunmap(&par->map);
err_unref:
ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
@@ -628,7 +634,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+ ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
ttm_bo_unreserve(bo);
return ret;
@@ -652,7 +658,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
goto err_unlock;
- ret = ttm_bo_validate(bo, &ne_placement, false, false);
+ ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 31f9afed0a6..bbc7c4c30bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -752,14 +752,8 @@ err_not_scanout:
return NULL;
}
-static int vmw_kms_fb_changed(struct drm_device *dev)
-{
- return 0;
-}
-
static struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
- .fb_changed = vmw_kms_fb_changed,
};
int vmw_kms_init(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 5b6eabeb7f5..ad566c85b07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
if (pin)
overlay_placement = &vmw_vram_ne_placement;
- ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
+ ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
ttm_bo_unreserve(bo);