diff options
Diffstat (limited to 'drivers/gpu')
89 files changed, 8135 insertions, 2354 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index c961fe415ae..39b393d38bb 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -81,6 +81,7 @@ config DRM_I830 config DRM_I915 tristate "i915 driver" + depends on AGP_INTEL select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 4e89ab08b7b..fe23f29f7cb 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -16,6 +16,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm-$(CONFIG_COMPAT) += drm_ioc32.o obj-$(CONFIG_DRM) += drm.o +obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_TDFX) += tdfx/ obj-$(CONFIG_DRM_R128) += r128/ obj-$(CONFIG_DRM_RADEON)+= radeon/ @@ -26,4 +27,3 @@ obj-$(CONFIG_DRM_I915) += i915/ obj-$(CONFIG_DRM_SIS) += sis/ obj-$(CONFIG_DRM_SAVAGE)+= savage/ obj-$(CONFIG_DRM_VIA) +=via/ -obj-$(CONFIG_DRM_TTM) += ttm/ diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 8fab7890a36..33be210d672 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1461,7 +1461,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, goto out; } - if (crtc_req->count_connectors > 0 && !mode && !fb) { + if (crtc_req->count_connectors > 0 && (!mode || !fb)) { DRM_DEBUG("Count connectors is %d but no mode or fb set\n", crtc_req->count_connectors); ret = -EINVAL; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a6f73f1e99d..6aaa2cb2336 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -706,8 +706,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) struct drm_encoder **save_encoders, *new_encoder; struct drm_framebuffer *old_fb = NULL; bool save_enabled; - bool mode_changed = false; - bool fb_changed = false; + bool mode_changed = false; /* if true do a full mode set */ + bool fb_changed = false; /* if true and !mode_changed just do a flip */ struct drm_connector *connector; int count = 0, ro, fail = 0; struct drm_crtc_helper_funcs *crtc_funcs; @@ -758,6 +758,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (set->crtc->fb == NULL) { DRM_DEBUG("crtc has no fb, full mode set\n"); mode_changed = true; + } else if (set->fb == NULL) { + mode_changed = true; } else if ((set->fb->bits_per_pixel != set->crtc->fb->bits_per_pixel) || set->fb->depth != set->crtc->fb->depth) @@ -1090,6 +1092,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev) if (ret == false) DRM_ERROR("failed to set mode on crtc %p\n", crtc); } + /* disable the unused connectors while restoring the modesetting */ + drm_helper_disable_unused_functions(dev); return 0; } EXPORT_SYMBOL(drm_helper_resume_force_mode); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 2960b6d7345..9903f270e44 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -101,6 +101,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, continue; tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); + if (tmp == NULL) { + ret = -1; + goto fail; + } ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, root, tmp, &drm_debugfs_fops); if (!ent) { diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7d0835226f6..80cc6d06d61 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -294,10 +294,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; - unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 8 | pt->hsync_offset_lo; - unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 6 | pt->hsync_pulse_width_lo; - unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) | (pt->vsync_offset_pulse_width_lo & 0xf); - unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; + unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; + unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; + unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; + unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); /* ignore tiny modes */ if (hactive < 64 || vactive < 64) @@ -347,8 +347,8 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; - mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; - mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; + mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; + mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; if (quirks & EDID_QUIRK_DETAILED_IN_CM) { mode->width_mm *= 10; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8104ecaea26..ffe8f4394d5 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -134,26 +134,29 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) BUG_ON((size & (PAGE_SIZE - 1)) != 0); obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + goto free; obj->dev = dev; obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); - if (IS_ERR(obj->filp)) { - kfree(obj); - return NULL; - } + if (IS_ERR(obj->filp)) + goto free; kref_init(&obj->refcount); kref_init(&obj->handlecount); obj->size = size; if (dev->driver->gem_init_object != NULL && dev->driver->gem_init_object(obj) != 0) { - fput(obj->filp); - kfree(obj); - return NULL; + goto fput; } atomic_inc(&dev->object_count); atomic_add(obj->size, &dev->object_memory); return obj; +fput: + fput(obj->filp); +free: + kfree(obj); + return NULL; } EXPORT_SYMBOL(drm_gem_object_alloc); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index b4a3dbcebe9..f85aaf21e78 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -566,7 +566,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, ret = drm_vblank_get(dev, crtc); if (ret) { - DRM_ERROR("failed to acquire vblank counter, %d\n", ret); + DRM_DEBUG("failed to acquire vblank counter, %d\n", ret); return ret; } seq = drm_vblank_count(dev, crtc); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 54f492a488a..7914097b09c 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -566,6 +566,8 @@ void drm_mode_connector_list_update(struct drm_connector *connector) found_it = 1; /* if equal delete the probed mode */ mode->status = pmode->status; + /* Merge type bits together */ + mode->type |= pmode->type; list_del(&pmode->head); drm_mode_destroy(connector->dev, pmode); break; diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 155a5bbce68..55bb8a82d61 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -489,7 +489,7 @@ int drm_put_minor(struct drm_minor **minor_p) */ void drm_put_dev(struct drm_device *dev) { - struct drm_driver *driver = dev->driver; + struct drm_driver *driver; struct drm_map_list *r_list, *list_temp; DRM_DEBUG("\n"); @@ -498,6 +498,7 @@ void drm_put_dev(struct drm_device *dev) DRM_ERROR("cleanup called no dev\n"); return; } + driver = dev->driver; drm_vblank_cleanup(dev); diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 51c5a050aa7..30d6b99fb30 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -13,6 +13,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ intel_crt.o \ intel_lvds.o \ intel_bios.o \ + intel_dp.o \ + intel_dp_i2c.o \ intel_hdmi.o \ intel_sdvo.o \ intel_modes.o \ diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index e747ac42fe3..288fc50627e 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h @@ -37,7 +37,7 @@ struct intel_dvo_device { /* GPIO register used for i2c bus to control this device */ u32 gpio; int slave_addr; - struct intel_i2c_chan *i2c_bus; + struct i2c_adapter *i2c_bus; const struct intel_dvo_dev_ops *dev_ops; void *dev_priv; @@ -52,7 +52,7 @@ struct intel_dvo_dev_ops { * Returns NULL if the device does not exist. */ bool (*init)(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus); + struct i2c_adapter *i2cbus); /* * Called to allow the output a chance to create properties after the diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 03d4b4973b0..621815b531d 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c @@ -176,19 +176,20 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) { - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -208,10 +209,11 @@ static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val) static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) { - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -228,8 +230,9 @@ static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val) /** Probes for a CH7017 on the given bus and slave address. */ static bool ch7017_init(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus) + struct i2c_adapter *adapter) { + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); struct ch7017_priv *priv; uint8_t val; @@ -237,8 +240,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo, if (priv == NULL) return false; - dvo->i2c_bus = i2cbus; - dvo->i2c_bus->slave_addr = dvo->slave_addr; + dvo->i2c_bus = adapter; dvo->dev_priv = priv; if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) @@ -248,7 +250,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo, val != CH7018_DEVICE_ID_VALUE && val != CH7019_DEVICE_ID_VALUE) { DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", - val, i2cbus->adapter.name,i2cbus->slave_addr); + val, i2cbus->adapter.name,dvo->slave_addr); goto fail; } diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index d2fd95dbd03..a9b89628968 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -123,19 +123,20 @@ static char *ch7xxx_get_id(uint8_t vid) static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct ch7xxx_priv *ch7xxx= dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -152,7 +153,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) if (!ch7xxx->quiet) { DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; } @@ -161,10 +162,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct ch7xxx_priv *ch7xxx = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -178,14 +180,14 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) if (!ch7xxx->quiet) { DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; } static bool ch7xxx_init(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus) + struct i2c_adapter *adapter) { /* this will detect the CH7xxx chip on the specified i2c bus */ struct ch7xxx_priv *ch7xxx; @@ -196,8 +198,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, if (ch7xxx == NULL) return false; - dvo->i2c_bus = i2cbus; - dvo->i2c_bus->slave_addr = dvo->slave_addr; + dvo->i2c_bus = adapter; dvo->dev_priv = ch7xxx; ch7xxx->quiet = true; @@ -207,7 +208,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, name = ch7xxx_get_id(vendor); if (!name) { DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", - vendor, i2cbus->adapter.name, i2cbus->slave_addr); + vendor, adapter->name, dvo->slave_addr); goto out; } @@ -217,7 +218,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, if (device != CH7xxx_DID) { DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", - vendor, i2cbus->adapter.name, i2cbus->slave_addr); + vendor, adapter->name, dvo->slave_addr); goto out; } diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index 0c8d375e8e3..aa176f9921f 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c @@ -169,13 +169,14 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo); static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) { struct ivch_priv *priv = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[1]; u8 in_buf[2]; struct i2c_msg msgs[] = { { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 0, }, @@ -186,7 +187,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) .buf = out_buf, }, { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD | I2C_M_NOSTART, .len = 2, .buf = in_buf, @@ -202,7 +203,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) if (!priv->quiet) { DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; } @@ -211,10 +212,11 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) { struct ivch_priv *priv = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[3]; struct i2c_msg msg = { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 3, .buf = out_buf, @@ -229,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) if (!priv->quiet) { DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; @@ -237,7 +239,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) /** Probes the given bus and slave address for an ivch */ static bool ivch_init(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus) + struct i2c_adapter *adapter) { struct ivch_priv *priv; uint16_t temp; @@ -246,8 +248,7 @@ static bool ivch_init(struct intel_dvo_device *dvo, if (priv == NULL) return false; - dvo->i2c_bus = i2cbus; - dvo->i2c_bus->slave_addr = dvo->slave_addr; + dvo->i2c_bus = adapter; dvo->dev_priv = priv; priv->quiet = true; diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 033a4bb070b..e1c1f7341e5 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c @@ -76,19 +76,20 @@ struct sil164_priv { static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct sil164_priv *sil = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -105,7 +106,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) if (!sil->quiet) { DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; } @@ -113,10 +114,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct sil164_priv *sil= dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -130,7 +132,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) if (!sil->quiet) { DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; @@ -138,7 +140,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) /* Silicon Image 164 driver for chip on i2c bus */ static bool sil164_init(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus) + struct i2c_adapter *adapter) { /* this will detect the SIL164 chip on the specified i2c bus */ struct sil164_priv *sil; @@ -148,8 +150,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, if (sil == NULL) return false; - dvo->i2c_bus = i2cbus; - dvo->i2c_bus->slave_addr = dvo->slave_addr; + dvo->i2c_bus = adapter; dvo->dev_priv = sil; sil->quiet = true; @@ -158,7 +159,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, if (ch != (SIL164_VID & 0xff)) { DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", - ch, i2cbus->adapter.name, i2cbus->slave_addr); + ch, adapter->name, dvo->slave_addr); goto out; } @@ -167,7 +168,7 @@ static bool sil164_init(struct intel_dvo_device *dvo, if (ch != (SIL164_DID & 0xff)) { DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", - ch, i2cbus->adapter.name, i2cbus->slave_addr); + ch, adapter->name, dvo->slave_addr); goto out; } sil->quiet = false; diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 207fda806eb..9ecc907384e 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c @@ -101,19 +101,20 @@ struct tfp410_priv { static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct tfp410_priv *tfp = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, @@ -130,7 +131,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) if (!tfp->quiet) { DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; } @@ -138,10 +139,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct tfp410_priv *tfp = dvo->dev_priv; - struct intel_i2c_chan *i2cbus = dvo->i2c_bus; + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter); uint8_t out_buf[2]; struct i2c_msg msg = { - .addr = i2cbus->slave_addr, + .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, @@ -155,7 +157,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) if (!tfp->quiet) { DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", - addr, i2cbus->adapter.name, i2cbus->slave_addr); + addr, i2cbus->adapter.name, dvo->slave_addr); } return false; @@ -174,7 +176,7 @@ static int tfp410_getid(struct intel_dvo_device *dvo, int addr) /* Ti TFP410 driver for chip on i2c bus */ static bool tfp410_init(struct intel_dvo_device *dvo, - struct intel_i2c_chan *i2cbus) + struct i2c_adapter *adapter) { /* this will detect the tfp410 chip on the specified i2c bus */ struct tfp410_priv *tfp; @@ -184,20 +186,19 @@ static bool tfp410_init(struct intel_dvo_device *dvo, if (tfp == NULL) return false; - dvo->i2c_bus = i2cbus; - dvo->i2c_bus->slave_addr = dvo->slave_addr; + dvo->i2c_bus = adapter; dvo->dev_priv = tfp; tfp->quiet = true; if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", - id, i2cbus->adapter.name, i2cbus->slave_addr); + id, adapter->name, dvo->slave_addr); goto out; } if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", - id, i2cbus->adapter.name, i2cbus->slave_addr); + id, adapter->name, dvo->slave_addr); goto out; } tfp->quiet = false; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f112c769d53..50d1f782768 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -846,7 +846,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, return 0; } - printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); + DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); @@ -885,8 +885,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data, * some RAM for the framebuffer at early boot. This code figures out * how much was set aside so we can use it for our own purposes. */ -static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, - unsigned long *preallocated_size) +static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, + uint32_t *preallocated_size) { struct pci_dev *bridge_dev; u16 tmp = 0; @@ -984,10 +984,11 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size, return 0; } -static int i915_load_modeset_init(struct drm_device *dev) +static int i915_load_modeset_init(struct drm_device *dev, + unsigned long prealloc_size, + unsigned long agp_size) { struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long agp_size, prealloc_size; int fb_bar = IS_I9XX(dev) ? 2 : 0; int ret = 0; @@ -1002,10 +1003,6 @@ static int i915_load_modeset_init(struct drm_device *dev) if (IS_I965G(dev) || IS_G33(dev)) dev_priv->cursor_needs_physical = false; - ret = i915_probe_agp(dev, &agp_size, &prealloc_size); - if (ret) - goto out; - /* Basic memrange allocator for stolen space (aka vram) */ drm_mm_init(&dev_priv->vram, 0, prealloc_size); @@ -1082,6 +1079,44 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) master->driver_priv = NULL; } +static void i915_get_mem_freq(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 tmp; + + if (!IS_IGD(dev)) + return; + + tmp = I915_READ(CLKCFG); + + switch (tmp & CLKCFG_FSB_MASK) { + case CLKCFG_FSB_533: + dev_priv->fsb_freq = 533; /* 133*4 */ + break; + case CLKCFG_FSB_800: + dev_priv->fsb_freq = 800; /* 200*4 */ + break; + case CLKCFG_FSB_667: + dev_priv->fsb_freq = 667; /* 167*4 */ + break; + case CLKCFG_FSB_400: + dev_priv->fsb_freq = 400; /* 100*4 */ + break; + } + + switch (tmp & CLKCFG_MEM_MASK) { + case CLKCFG_MEM_533: + dev_priv->mem_freq = 533; + break; + case CLKCFG_MEM_667: + dev_priv->mem_freq = 667; + break; + case CLKCFG_MEM_800: + dev_priv->mem_freq = 800; + break; + } +} + /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -1098,6 +1133,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) struct drm_i915_private *dev_priv = dev->dev_private; resource_size_t base, size; int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; + uint32_t agp_size, prealloc_size; /* i915 has 4 more counters */ dev->counters += 4; @@ -1146,9 +1182,29 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) "performance may suffer.\n"); } + ret = i915_probe_agp(dev, &agp_size, &prealloc_size); + if (ret) + goto out_iomapfree; + + dev_priv->wq = create_workqueue("i915"); + if (dev_priv->wq == NULL) { + DRM_ERROR("Failed to create our workqueue.\n"); + ret = -ENOMEM; + goto out_iomapfree; + } + /* enable GEM by default */ dev_priv->has_gem = 1; + if (prealloc_size > agp_size * 3 / 4) { + DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " + "memory stolen.\n", + prealloc_size / 1024, agp_size / 1024); + DRM_ERROR("Disabling GEM. (try reducing stolen memory or " + "updating the BIOS to fix).\n"); + dev_priv->has_gem = 0; + } + dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ if (IS_G4X(dev) || IS_IGDNG(dev)) { @@ -1162,9 +1218,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!I915_NEED_GFX_HWS(dev)) { ret = i915_init_phys_hws(dev); if (ret != 0) - goto out_iomapfree; + goto out_workqueue_free; } + i915_get_mem_freq(dev); + /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there * according to the published specs. It doesn't appear to function @@ -1180,6 +1238,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) pci_enable_msi(dev->pdev); spin_lock_init(&dev_priv->user_irq_lock); + spin_lock_init(&dev_priv->error_lock); dev_priv->user_irq_refcount = 0; ret = drm_vblank_init(dev, I915_NUM_PIPE); @@ -1190,10 +1249,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_load_modeset_init(dev); + ret = i915_load_modeset_init(dev, prealloc_size, agp_size); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); - goto out_rmmap; + goto out_workqueue_free; } } @@ -1204,6 +1263,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) return 0; +out_workqueue_free: + destroy_workqueue(dev_priv->wq); out_iomapfree: io_mapping_free(dev_priv->mm.gtt_mapping); out_rmmap: @@ -1217,6 +1278,8 @@ int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + destroy_workqueue(dev_priv->wq); + io_mapping_free(dev_priv->mm.gtt_mapping); if (dev_priv->mm.gtt_mtrr >= 0) { mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 98560e1e899..fc4b68aa2d0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -35,6 +35,7 @@ #include "drm_pciids.h" #include <linux/console.h> +#include "drm_crtc_helper.h" static unsigned int i915_modeset = -1; module_param_named(modeset, i915_modeset, int, 0400); @@ -57,8 +58,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) struct drm_i915_private *dev_priv = dev->dev_private; if (!dev || !dev_priv) { - printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); - printk(KERN_ERR "DRM not initialized, aborting suspend.\n"); + DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv); + DRM_ERROR("DRM not initialized, aborting suspend.\n"); return -ENODEV; } @@ -67,8 +68,6 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) pci_save_state(dev->pdev); - i915_save_state(dev); - /* If KMS is active, we do the leavevt stuff here */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (i915_gem_idle(dev)) @@ -77,6 +76,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) drm_irq_uninstall(dev); } + i915_save_state(dev); + intel_opregion_free(dev, 1); if (state.event == PM_EVENT_SUSPEND) { @@ -115,6 +116,10 @@ static int i915_resume(struct drm_device *dev) drm_irq_install(dev); } + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + /* Resume the modeset for every activated CRTC */ + drm_helper_resume_force_mode(dev); + } return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7a84f04e843..7537f57d8a8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -133,6 +133,22 @@ struct sdvo_device_mapping { u8 initialized; }; +struct drm_i915_error_state { + u32 eir; + u32 pgtbl_er; + u32 pipeastat; + u32 pipebstat; + u32 ipeir; + u32 ipehr; + u32 instdone; + u32 acthd; + u32 instpm; + u32 instps; + u32 instdone1; + u32 seqno; + struct timeval time; +}; + typedef struct drm_i915_private { struct drm_device *dev; @@ -203,12 +219,20 @@ typedef struct drm_i915_private { unsigned int lvds_vbt:1; unsigned int int_crt_support:1; unsigned int lvds_use_ssc:1; + unsigned int edp_support:1; int lvds_ssc_freq; struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */ + unsigned int fsb_freq, mem_freq; + + spinlock_t error_lock; + struct drm_i915_error_state *first_error; + struct work_struct error_work; + struct workqueue_struct *wq; + /* Register state */ u8 saveLBB; u32 saveDSPACNTR; @@ -306,6 +330,17 @@ typedef struct drm_i915_private { u32 saveCURBPOS; u32 saveCURBBASE; u32 saveCURSIZE; + u32 saveDP_B; + u32 saveDP_C; + u32 saveDP_D; + u32 savePIPEA_GMCH_DATA_M; + u32 savePIPEB_GMCH_DATA_M; + u32 savePIPEA_GMCH_DATA_N; + u32 savePIPEB_GMCH_DATA_N; + u32 savePIPEA_DP_LINK_M; + u32 savePIPEB_DP_LINK_M; + u32 savePIPEA_DP_LINK_N; + u32 savePIPEB_DP_LINK_N; struct { struct drm_mm gtt_space; @@ -457,9 +492,6 @@ struct drm_i915_gem_object { */ int fence_reg; - /** Boolean whether this object has a valid gtt offset. */ - int gtt_bound; - /** How many users have pinned this object in GTT space */ int pin_count; @@ -644,6 +676,7 @@ void i915_gem_free_object(struct drm_gem_object *obj); int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); void i915_gem_object_unpin(struct drm_gem_object *obj); int i915_gem_object_unbind(struct drm_gem_object *obj); +void i915_gem_release_mmap(struct drm_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); uint32_t i915_get_gem_seqno(struct drm_device *dev); int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); @@ -857,7 +890,11 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) +#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) +#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) +/* dsparb controlled by hw only */ +#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) #define PRIMARY_RINGBUFFER_SIZE (128*1024) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fd2b8bdffe3..140bee142fc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1006,7 +1006,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); #if WATCH_BUF - DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", + DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n", obj, obj->size, read_domains, write_domain); #endif if (read_domains & I915_GEM_DOMAIN_GTT) { @@ -1050,7 +1050,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, } #if WATCH_BUF - DRM_INFO("%s: sw_finish %d (%p %d)\n", + DRM_INFO("%s: sw_finish %d (%p %zd)\n", __func__, args->handle, obj, obj->size); #endif obj_priv = obj->driver_private; @@ -1252,6 +1252,31 @@ out_free_list: return ret; } +/** + * i915_gem_release_mmap - remove physical page mappings + * @obj: obj in question + * + * Preserve the reservation of the mmaping with the DRM core code, but + * relinquish ownership of the pages back to the system. + * + * It is vital that we remove the page mapping if we have mapped a tiled + * object through the GTT and then lose the fence register due to + * resource pressure. Similarly if the object has been moved out of the + * aperture, than pages mapped into userspace must be revoked. Removing the + * mapping will then trigger a page fault on the next user access, allowing + * fixup by i915_gem_fault(). + */ +void +i915_gem_release_mmap(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + if (dev->dev_mapping) + unmap_mapping_range(dev->dev_mapping, + obj_priv->mmap_offset, obj->size, 1); +} + static void i915_gem_free_mmap_offset(struct drm_gem_object *obj) { @@ -1545,7 +1570,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, } if (was_empty && !dev_priv->mm.suspended) - schedule_delayed_work(&dev_priv->mm.retire_work, HZ); + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); return seqno; } @@ -1694,7 +1719,7 @@ i915_gem_retire_work_handler(struct work_struct *work) i915_gem_retire_requests(dev); if (!dev_priv->mm.suspended && !list_empty(&dev_priv->mm.request_list)) - schedule_delayed_work(&dev_priv->mm.retire_work, HZ); + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } @@ -1861,7 +1886,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; - loff_t offset; int ret = 0; #if WATCH_BUF @@ -1898,9 +1922,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) BUG_ON(obj_priv->active); /* blow away mappings if mapped through GTT */ - offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT; - if (dev->dev_mapping) - unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1); + i915_gem_release_mmap(obj); if (obj_priv->fence_reg != I915_FENCE_REG_NONE) i915_gem_clear_fence_reg(obj); @@ -2222,7 +2244,6 @@ try_again: /* None available, try to steal one or wait for a user to finish */ if (i == dev_priv->num_fence_regs) { uint32_t seqno = dev_priv->mm.next_gem_seqno; - loff_t offset; if (avail == 0) return -ENOSPC; @@ -2274,10 +2295,7 @@ try_again: * Zap this virtual mapping so we can set up a fence again * for this object next time we need it. */ - offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT; - if (dev->dev_mapping) - unmap_mapping_range(dev->dev_mapping, offset, - reg->obj->size, 1); + i915_gem_release_mmap(reg->obj); old_obj_priv->fence_reg = I915_FENCE_REG_NONE; } @@ -2423,7 +2441,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) } #if WATCH_BUF - DRM_INFO("Binding object of size %d at 0x%08x\n", + DRM_INFO("Binding object of size %zd at 0x%08x\n", obj->size, obj_priv->gtt_offset); #endif ret = i915_gem_object_get_pages(obj); @@ -4227,6 +4245,7 @@ i915_gem_lastclose(struct drm_device *dev) void i915_gem_load(struct drm_device *dev) { + int i; drm_i915_private_t *dev_priv = dev->dev_private; spin_lock_init(&dev_priv->mm.active_list_lock); @@ -4246,6 +4265,18 @@ i915_gem_load(struct drm_device *dev) else dev_priv->num_fence_regs = 8; + /* Initialize fence registers to zero */ + if (IS_I965G(dev)) { + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0); + } else { + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_830_0 + (i * 4), 0); + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); + } + i915_gem_detect_bit_6_swizzle(dev); } diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 8d0b943e2c5..e602614bd3f 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -87,7 +87,7 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, chunk_len = page_len - chunk; if (chunk_len > 128) chunk_len = 128; - i915_gem_dump_page(obj_priv->page_list[page], + i915_gem_dump_page(obj_priv->pages[page], chunk, chunk + chunk_len, obj_priv->gtt_offset + page * PAGE_SIZE, @@ -143,7 +143,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) uint32_t *backing_map = NULL; int bad_count = 0; - DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", + DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", __func__, obj, obj_priv->gtt_offset, handle, obj->size / 1024); @@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) for (page = 0; page < obj->size / PAGE_SIZE; page++) { int i; - backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); + backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); if (backing_map == NULL) { DRM_ERROR("failed to map backing page\n"); diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index 28146e405e8..cb3b97405fb 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -75,11 +75,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) case ACTIVE_LIST: seq_printf(m, "Active:\n"); lock = &dev_priv->mm.active_list_lock; - spin_lock(lock); head = &dev_priv->mm.active_list; break; case INACTIVE_LIST: - seq_printf(m, "Inctive:\n"); + seq_printf(m, "Inactive:\n"); head = &dev_priv->mm.inactive_list; break; case FLUSHING_LIST: @@ -91,6 +90,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return 0; } + if (lock) + spin_lock(lock); list_for_each_entry(obj_priv, head, list) { struct drm_gem_object *obj = obj_priv->obj; @@ -104,7 +105,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) if (obj->name) seq_printf(m, " (name: %d)", obj->name); if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - seq_printf(m, " (fence: %d)\n", obj_priv->fence_reg); + seq_printf(m, " (fence: %d)", obj_priv->fence_reg); + if (obj_priv->gtt_space != NULL) + seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset); + seq_printf(m, "\n"); } @@ -323,6 +327,41 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) return 0; } +static int i915_error_state(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->error_lock, flags); + if (!dev_priv->first_error) { + seq_printf(m, "no error state collected\n"); + goto out; + } + + error = dev_priv->first_error; + + seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, + error->time.tv_usec); + seq_printf(m, "EIR: 0x%08x\n", error->eir); + seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); + seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); + seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); + seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); + seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); + seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); + if (IS_I965G(dev)) { + seq_printf(m, " INSTPS: 0x%08x\n", error->instps); + seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); + } + +out: + spin_unlock_irqrestore(&dev_priv->error_lock, flags); + + return 0; +} static struct drm_info_list i915_gem_debugfs_list[] = { {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, @@ -336,6 +375,7 @@ static struct drm_info_list i915_gem_debugfs_list[] = { {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, {"i915_batchbuffers", i915_batchbuffer_info, 0}, + {"i915_error_state", i915_error_state, 0}, }; #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 5c1ceec49f5..a2d527b22ec 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -114,11 +114,13 @@ intel_alloc_mchbar_resource(struct drm_device *dev) mchbar_addr = ((u64)temp_hi << 32) | temp_lo; /* If ACPI doesn't have it, assume we need to allocate it ourselves */ +#ifdef CONFIG_PNP if (mchbar_addr && pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { ret = 0; goto out_put; } +#endif /* Get some space for it */ ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res, @@ -519,6 +521,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, goto err; } + /* If we've changed tiling, GTT-mappings of the object + * need to re-fault to ensure that the correct fence register + * setup is in place. + */ + i915_gem_release_mmap(obj); + obj_priv->tiling_mode = args->tiling_mode; obj_priv->stride = args->stride; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b86b7b7130c..7ebc84c2881 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -26,6 +26,7 @@ * */ +#include <linux/sysrq.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -41,9 +42,10 @@ * we leave them always unmasked in IMR and then control enabling them through * PIPESTAT alone. */ -#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) +#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) /** Interrupts that we mask and unmask at runtime. */ #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) @@ -188,7 +190,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; if (!i915_pipe_enabled(dev, pipe)) { - DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); + DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); return 0; } @@ -217,7 +219,7 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; if (!i915_pipe_enabled(dev, pipe)) { - DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); + DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); return 0; } @@ -232,7 +234,17 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, hotplug_work); struct drm_device *dev = dev_priv->dev; - + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + + if (mode_config->num_connector) { + list_for_each_entry(connector, &mode_config->connector_list, head) { + struct intel_output *intel_output = to_intel_output(connector); + + if (intel_output->hot_plug) + (*intel_output->hot_plug) (intel_output); + } + } /* Just fire off a uevent and let userspace tell us what to do */ drm_sysfs_hotplug_event(dev); } @@ -278,6 +290,201 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) return ret; } +/** + * i915_error_work_func - do process context error handling work + * @work: work struct + * + * Fire an error uevent so userspace can see that a hang or error + * was detected. + */ +static void i915_error_work_func(struct work_struct *work) +{ + drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, + error_work); + struct drm_device *dev = dev_priv->dev; + char *event_string = "ERROR=1"; + char *envp[] = { event_string, NULL }; + + DRM_DEBUG("generating error event\n"); + + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); +} + +/** + * i915_capture_error_state - capture an error record for later analysis + * @dev: drm device + * + * Should be called when an error is detected (either a hang or an error + * interrupt) to capture error state from the time of the error. Fills + * out a structure which becomes available in debugfs for user level tools + * to pick up. + */ +static void i915_capture_error_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->error_lock, flags); + if (dev_priv->first_error) + goto out; + + error = kmalloc(sizeof(*error), GFP_ATOMIC); + if (!error) { + DRM_DEBUG("out ot memory, not capturing error state\n"); + goto out; + } + + error->eir = I915_READ(EIR); + error->pgtbl_er = I915_READ(PGTBL_ER); + error->pipeastat = I915_READ(PIPEASTAT); + error->pipebstat = I915_READ(PIPEBSTAT); + error->instpm = I915_READ(INSTPM); + if (!IS_I965G(dev)) { + error->ipeir = I915_READ(IPEIR); + error->ipehr = I915_READ(IPEHR); + error->instdone = I915_READ(INSTDONE); + error->acthd = I915_READ(ACTHD); + } else { + error->ipeir = I915_READ(IPEIR_I965); + error->ipehr = I915_READ(IPEHR_I965); + error->instdone = I915_READ(INSTDONE_I965); + error->instps = I915_READ(INSTPS); + error->instdone1 = I915_READ(INSTDONE1); + error->acthd = I915_READ(ACTHD_I965); + } + + do_gettimeofday(&error->time); + + dev_priv->first_error = error; + +out: + spin_unlock_irqrestore(&dev_priv->error_lock, flags); +} + +/** + * i915_handle_error - handle an error interrupt + * @dev: drm device + * + * Do some basic checking of regsiter state at error interrupt time and + * dump it to the syslog. Also call i915_capture_error_state() to make + * sure we get a record and make it available in debugfs. Fire a uevent + * so userspace knows something bad happened (should trigger collection + * of a ring dump etc.). + */ +static void i915_handle_error(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 eir = I915_READ(EIR); + u32 pipea_stats = I915_READ(PIPEASTAT); + u32 pipeb_stats = I915_READ(PIPEBSTAT); + + i915_capture_error_state(dev); + + printk(KERN_ERR "render error detected, EIR: 0x%08x\n", + eir); + + if (IS_G4X(dev)) { + if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { + u32 ipeir = I915_READ(IPEIR_I965); + + printk(KERN_ERR " IPEIR: 0x%08x\n", + I915_READ(IPEIR_I965)); + printk(KERN_ERR " IPEHR: 0x%08x\n", + I915_READ(IPEHR_I965)); + printk(KERN_ERR " INSTDONE: 0x%08x\n", + I915_READ(INSTDONE_I965)); + printk(KERN_ERR " INSTPS: 0x%08x\n", + I915_READ(INSTPS)); + printk(KERN_ERR " INSTDONE1: 0x%08x\n", + I915_READ(INSTDONE1)); + printk(KERN_ERR " ACTHD: 0x%08x\n", + I915_READ(ACTHD_I965)); + I915_WRITE(IPEIR_I965, ipeir); + (void)I915_READ(IPEIR_I965); + } + if (eir & GM45_ERROR_PAGE_TABLE) { + u32 pgtbl_err = I915_READ(PGTBL_ER); + printk(KERN_ERR "page table error\n"); + printk(KERN_ERR " PGTBL_ER: 0x%08x\n", + pgtbl_err); + I915_WRITE(PGTBL_ER, pgtbl_err); + (void)I915_READ(PGTBL_ER); + } + } + + if (IS_I9XX(dev)) { + if (eir & I915_ERROR_PAGE_TABLE) { + u32 pgtbl_err = I915_READ(PGTBL_ER); + printk(KERN_ERR "page table error\n"); + printk(KERN_ERR " PGTBL_ER: 0x%08x\n", + pgtbl_err); + I915_WRITE(PGTBL_ER, pgtbl_err); + (void)I915_READ(PGTBL_ER); + } + } + + if (eir & I915_ERROR_MEMORY_REFRESH) { + printk(KERN_ERR "memory refresh error\n"); + printk(KERN_ERR "PIPEASTAT: 0x%08x\n", + pipea_stats); + printk(KERN_ERR "PIPEBSTAT: 0x%08x\n", + pipeb_stats); + /* pipestat has already been acked */ + } + if (eir & I915_ERROR_INSTRUCTION) { + printk(KERN_ERR "instruction error\n"); + printk(KERN_ERR " INSTPM: 0x%08x\n", + I915_READ(INSTPM)); + if (!IS_I965G(dev)) { + u32 ipeir = I915_READ(IPEIR); + + printk(KERN_ERR " IPEIR: 0x%08x\n", + I915_READ(IPEIR)); + printk(KERN_ERR " IPEHR: 0x%08x\n", + I915_READ(IPEHR)); + printk(KERN_ERR " INSTDONE: 0x%08x\n", + I915_READ(INSTDONE)); + printk(KERN_ERR " ACTHD: 0x%08x\n", + I915_READ(ACTHD)); + I915_WRITE(IPEIR, ipeir); + (void)I915_READ(IPEIR); + } else { + u32 ipeir = I915_READ(IPEIR_I965); + + printk(KERN_ERR " IPEIR: 0x%08x\n", + I915_READ(IPEIR_I965)); + printk(KERN_ERR " IPEHR: 0x%08x\n", + I915_READ(IPEHR_I965)); + printk(KERN_ERR " INSTDONE: 0x%08x\n", + I915_READ(INSTDONE_I965)); + printk(KERN_ERR " INSTPS: 0x%08x\n", + I915_READ(INSTPS)); + printk(KERN_ERR " INSTDONE1: 0x%08x\n", + I915_READ(INSTDONE1)); + printk(KERN_ERR " ACTHD: 0x%08x\n", + I915_READ(ACTHD_I965)); + I915_WRITE(IPEIR_I965, ipeir); + (void)I915_READ(IPEIR_I965); + } + } + + I915_WRITE(EIR, eir); + (void)I915_READ(EIR); + eir = I915_READ(EIR); + if (eir) { + /* + * some errors might have become stuck, + * mask them. + */ + DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); + I915_WRITE(EMR, I915_READ(EMR) | eir); + I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + } + + queue_work(dev_priv->wq, &dev_priv->error_work); +} + irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -319,15 +526,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) pipea_stats = I915_READ(PIPEASTAT); pipeb_stats = I915_READ(PIPEBSTAT); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + i915_handle_error(dev); + /* * Clear the PIPE(A|B)STAT regs before the IIR */ if (pipea_stats & 0x8000ffff) { + if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG("pipe a underrun\n"); I915_WRITE(PIPEASTAT, pipea_stats); irq_received = 1; } if (pipeb_stats & 0x8000ffff) { + if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) + DRM_DEBUG("pipe b underrun\n"); I915_WRITE(PIPEBSTAT, pipeb_stats); irq_received = 1; } @@ -346,7 +560,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) DRM_DEBUG("hotplug event received, stat 0x%08x\n", hotplug_status); if (hotplug_status & dev_priv->hotplug_supported_mask) - schedule_work(&dev_priv->hotplug_work); + queue_work(dev_priv->wq, + &dev_priv->hotplug_work); I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); I915_READ(PORT_HOTPLUG_STAT); @@ -699,6 +914,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) atomic_set(&dev_priv->irq_received, 0); INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + INIT_WORK(&dev_priv->error_work, i915_error_work_func); if (IS_IGDNG(dev)) { igdng_irq_preinstall(dev); @@ -722,6 +938,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; + u32 error_mask; DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); @@ -758,6 +975,21 @@ int i915_driver_irq_postinstall(struct drm_device *dev) i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT); } + /* + * Enable some error detection, note the instruction error mask + * bit is reserved, so we leave it masked. + */ + if (IS_G4X(dev)) { + error_mask = ~(GM45_ERROR_PAGE_TABLE | + GM45_ERROR_MEM_PRIV | + GM45_ERROR_CP_PRIV | + I915_ERROR_MEMORY_REFRESH); + } else { + error_mask = ~(I915_ERROR_PAGE_TABLE | + I915_ERROR_MEMORY_REFRESH); + } + I915_WRITE(EMR, error_mask); + /* Disable pipe interrupt enables, clear pending pipe status */ I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f6237a0b113..2955083aa47 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -206,6 +206,7 @@ /* * Instruction and interrupt control regs */ +#define PGTBL_ER 0x02024 #define PRB0_TAIL 0x02030 #define PRB0_HEAD 0x02034 #define PRB0_START 0x02038 @@ -226,11 +227,18 @@ #define PRB1_HEAD 0x02044 /* 915+ only */ #define PRB1_START 0x02048 /* 915+ only */ #define PRB1_CTL 0x0204c /* 915+ only */ +#define IPEIR_I965 0x02064 +#define IPEHR_I965 0x02068 +#define INSTDONE_I965 0x0206c +#define INSTPS 0x02070 /* 965+ only */ +#define INSTDONE1 0x0207c /* 965+ only */ #define ACTHD_I965 0x02074 #define HWS_PGA 0x02080 #define HWS_ADDRESS_MASK 0xfffff000 #define HWS_START_ADDRESS_SHIFT 4 #define IPEIR 0x02088 +#define IPEHR 0x0208c +#define INSTDONE 0x02090 #define NOPID 0x02094 #define HWSTAM 0x02098 #define SCPD0 0x0209c /* 915+ only */ @@ -258,10 +266,22 @@ #define EIR 0x020b0 #define EMR 0x020b4 #define ESR 0x020b8 +#define GM45_ERROR_PAGE_TABLE (1<<5) +#define GM45_ERROR_MEM_PRIV (1<<4) +#define I915_ERROR_PAGE_TABLE (1<<4) +#define GM45_ERROR_CP_PRIV (1<<3) +#define I915_ERROR_MEMORY_REFRESH (1<<1) +#define I915_ERROR_INSTRUCTION (1<<0) #define INSTPM 0x020c0 #define ACTHD 0x020c8 #define FW_BLC 0x020d8 +#define FW_BLC2 0x020dc #define FW_BLC_SELF 0x020e0 /* 915+ only */ +#define FW_BLC_SELF_EN (1<<15) +#define MM_BURST_LENGTH 0x00700000 +#define MM_FIFO_WATERMARK 0x0001F000 +#define LM_BURST_LENGTH 0x00000700 +#define LM_FIFO_WATERMARK 0x0000001F #define MI_ARB_STATE 0x020e4 /* 915+ only */ #define CACHE_MODE_0 0x02120 /* 915+ only */ #define CM0_MASK_SHIFT 16 @@ -569,6 +589,23 @@ #define C0DRB3 0x10206 #define C1DRB3 0x10606 +/* Clocking configuration register */ +#define CLKCFG 0x10c00 +#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ +#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */ +#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */ +#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */ +#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */ +#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */ +/* Note, below two are guess */ +#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */ +#define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */ +#define CLKCFG_FSB_MASK (7 << 0) +#define CLKCFG_MEM_533 (1 << 4) +#define CLKCFG_MEM_667 (2 << 4) +#define CLKCFG_MEM_800 (3 << 4) +#define CLKCFG_MEM_MASK (7 << 4) + /** GM965 GM45 render standby register */ #define MCHBAR_RENDER_STANDBY 0x111B8 @@ -834,9 +871,25 @@ #define HORIZ_INTERP_MASK (3 << 6) #define HORIZ_AUTO_SCALE (1 << 5) #define PANEL_8TO6_DITHER_ENABLE (1 << 3) +#define PFIT_FILTER_FUZZY (0 << 24) +#define PFIT_SCALING_AUTO (0 << 26) +#define PFIT_SCALING_PROGRAMMED (1 << 26) +#define PFIT_SCALING_PILLAR (2 << 26) +#define PFIT_SCALING_LETTER (3 << 26) #define PFIT_PGM_RATIOS 0x61234 #define PFIT_VERT_SCALE_MASK 0xfff00000 #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 +/* Pre-965 */ +#define PFIT_VERT_SCALE_SHIFT 20 +#define PFIT_VERT_SCALE_MASK 0xfff00000 +#define PFIT_HORIZ_SCALE_SHIFT 4 +#define PFIT_HORIZ_SCALE_MASK 0x0000fff0 +/* 965+ */ +#define PFIT_VERT_SCALE_SHIFT_965 16 +#define PFIT_VERT_SCALE_MASK_965 0x1fff0000 +#define PFIT_HORIZ_SCALE_SHIFT_965 0 +#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff + #define PFIT_AUTO_RATIOS 0x61238 /* Backlight control */ @@ -1342,6 +1395,7 @@ #define TV_V_CHROMA_42 0x684a8 /* Display Port */ +#define DP_A 0x64000 /* eDP */ #define DP_B 0x64100 #define DP_C 0x64200 #define DP_D 0x64300 @@ -1384,13 +1438,22 @@ /* Mystic DPCD version 1.1 special mode */ #define DP_ENHANCED_FRAMING (1 << 18) +/* eDP */ +#define DP_PLL_FREQ_270MHZ (0 << 16) +#define DP_PLL_FREQ_160MHZ (1 << 16) +#define DP_PLL_FREQ_MASK (3 << 16) + /** locked once port is enabled */ #define DP_PORT_REVERSAL (1 << 15) +/* eDP */ +#define DP_PLL_ENABLE (1 << 14) + /** sends the clock on lane 15 of the PEG for debug */ #define DP_CLOCK_OUTPUT_ENABLE (1 << 13) #define DP_SCRAMBLING_DISABLE (1 << 12) +#define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7) /** limit RGB values to avoid confusing TVs */ #define DP_COLOR_RANGE_16_235 (1 << 8) @@ -1410,6 +1473,13 @@ * is 20 bytes in each direction, hence the 5 fixed * data registers */ +#define DPA_AUX_CH_CTL 0x64010 +#define DPA_AUX_CH_DATA1 0x64014 +#define DPA_AUX_CH_DATA2 0x64018 +#define DPA_AUX_CH_DATA3 0x6401c +#define DPA_AUX_CH_DATA4 0x64020 +#define DPA_AUX_CH_DATA5 0x64024 + #define DPB_AUX_CH_CTL 0x64110 #define DPB_AUX_CH_DATA1 0x64114 #define DPB_AUX_CH_DATA2 0x64118 @@ -1552,6 +1622,34 @@ #define DSPARB_CSTART_SHIFT 7 #define DSPARB_BSTART_MASK (0x7f) #define DSPARB_BSTART_SHIFT 0 +#define DSPARB_BEND_SHIFT 9 /* on 855 */ +#define DSPARB_AEND_SHIFT 0 + +#define DSPFW1 0x70034 +#define DSPFW2 0x70038 +#define DSPFW3 0x7003c +#define IGD_SELF_REFRESH_EN (1<<30) + +/* FIFO watermark sizes etc */ +#define I915_FIFO_LINE_SIZE 64 +#define I830_FIFO_LINE_SIZE 32 +#define I945_FIFO_SIZE 127 /* 945 & 965 */ +#define I915_FIFO_SIZE 95 +#define I855GM_FIFO_SIZE 127 /* In cachelines */ +#define I830_FIFO_SIZE 95 +#define I915_MAX_WM 0x3f + +#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ +#define IGD_FIFO_LINE_SIZE 64 +#define IGD_MAX_WM 0x1ff +#define IGD_DFT_WM 0x3f +#define IGD_DFT_HPLLOFF_WM 0 +#define IGD_GUARD_WM 10 +#define IGD_CURSOR_FIFO 64 +#define IGD_CURSOR_MAX_WM 0x3f +#define IGD_CURSOR_DFT_WM 0 +#define IGD_CURSOR_GUARD_WM 5 + /* * The two pipe frame counter registers are not synchronized, so * reading a stable value is somewhat tricky. The following code @@ -1767,6 +1865,8 @@ #define PFA_CTL_1 0x68080 #define PFB_CTL_1 0x68880 #define PF_ENABLE (1<<31) +#define PFA_WIN_SZ 0x68074 +#define PFB_WIN_SZ 0x68874 /* legacy palette */ #define LGC_PALETTE_A 0x4a000 @@ -2127,4 +2227,28 @@ #define PCH_PP_OFF_DELAYS 0xc720c #define PCH_PP_DIVISOR 0xc7210 +#define PCH_DP_B 0xe4100 +#define PCH_DPB_AUX_CH_CTL 0xe4110 +#define PCH_DPB_AUX_CH_DATA1 0xe4114 +#define PCH_DPB_AUX_CH_DATA2 0xe4118 +#define PCH_DPB_AUX_CH_DATA3 0xe411c +#define PCH_DPB_AUX_CH_DATA4 0xe4120 +#define PCH_DPB_AUX_CH_DATA5 0xe4124 + +#define PCH_DP_C 0xe4200 +#define PCH_DPC_AUX_CH_CTL 0xe4210 +#define PCH_DPC_AUX_CH_DATA1 0xe4214 +#define PCH_DPC_AUX_CH_DATA2 0xe4218 +#define PCH_DPC_AUX_CH_DATA3 0xe421c +#define PCH_DPC_AUX_CH_DATA4 0xe4220 +#define PCH_DPC_AUX_CH_DATA5 0xe4224 + +#define PCH_DP_D 0xe4300 +#define PCH_DPD_AUX_CH_CTL 0xe4310 +#define PCH_DPD_AUX_CH_DATA1 0xe4314 +#define PCH_DPD_AUX_CH_DATA2 0xe4318 +#define PCH_DPD_AUX_CH_DATA3 0xe431c +#define PCH_DPD_AUX_CH_DATA4 0xe4320 +#define PCH_DPD_AUX_CH_DATA5 0xe4324 + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index a98e2831ed3..1d04e1904ac 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -222,23 +222,12 @@ static void i915_restore_vga(struct drm_device *dev) I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); } -int i915_save_state(struct drm_device *dev) +static void i915_save_modeset_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); - - /* Render Standby */ - if (IS_I965G(dev) && IS_MOBILE(dev)) - dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); - - /* Hardware status page */ - dev_priv->saveHWS = I915_READ(HWS_PGA); - - /* Display arbitration control */ - dev_priv->saveDSPARB = I915_READ(DSPARB); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; /* Pipe & plane A info */ dev_priv->savePIPEACONF = I915_READ(PIPEACONF); dev_priv->savePIPEASRC = I915_READ(PIPEASRC); @@ -294,7 +283,122 @@ int i915_save_state(struct drm_device *dev) } i915_save_palette(dev, PIPE_B); dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); + return; +} +static void i915_restore_modeset_reg(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + /* Pipe & plane A info */ + /* Prime the clock */ + if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & + ~DPLL_VCO_ENABLE); + DRM_UDELAY(150); + } + I915_WRITE(FPA0, dev_priv->saveFPA0); + I915_WRITE(FPA1, dev_priv->saveFPA1); + /* Actually enable it */ + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); + DRM_UDELAY(150); + if (IS_I965G(dev)) + I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); + DRM_UDELAY(150); + + /* Restore mode */ + I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); + I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); + I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); + I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); + I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); + I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); + I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); + + /* Restore plane info */ + I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); + I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); + I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); + I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); + I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); + if (IS_I965G(dev)) { + I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); + I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); + } + + I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); + + i915_restore_palette(dev, PIPE_A); + /* Enable the plane */ + I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); + I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); + + /* Pipe & plane B info */ + if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & + ~DPLL_VCO_ENABLE); + DRM_UDELAY(150); + } + I915_WRITE(FPB0, dev_priv->saveFPB0); + I915_WRITE(FPB1, dev_priv->saveFPB1); + /* Actually enable it */ + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); + DRM_UDELAY(150); + if (IS_I965G(dev)) + I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); + DRM_UDELAY(150); + + /* Restore mode */ + I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); + I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); + I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); + I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); + I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); + I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); + I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); + + /* Restore plane info */ + I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); + I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); + I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); + I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); + I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); + if (IS_I965G(dev)) { + I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); + I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); + } + + I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); + + i915_restore_palette(dev, PIPE_B); + /* Enable the plane */ + I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); + I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); + return; +} +int i915_save_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); + + /* Render Standby */ + if (IS_I965G(dev) && IS_MOBILE(dev)) + dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); + + /* Hardware status page */ + dev_priv->saveHWS = I915_READ(HWS_PGA); + + /* Display arbitration control */ + dev_priv->saveDSPARB = I915_READ(DSPARB); + + /* This is only meaningful in non-KMS mode */ + /* Don't save them in KMS mode */ + i915_save_modeset_reg(dev); /* Cursor state */ dev_priv->saveCURACNTR = I915_READ(CURACNTR); dev_priv->saveCURAPOS = I915_READ(CURAPOS); @@ -322,6 +426,20 @@ int i915_save_state(struct drm_device *dev) dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); + /* Display Port state */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + dev_priv->saveDP_B = I915_READ(DP_B); + dev_priv->saveDP_C = I915_READ(DP_C); + dev_priv->saveDP_D = I915_READ(DP_D); + dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M); + dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M); + dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N); + dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N); + dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M); + dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M); + dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N); + dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N); + } /* FIXME: save TV & SDVO state */ /* FBC state */ @@ -404,92 +522,21 @@ int i915_restore_state(struct drm_device *dev) for (i = 0; i < 8; i++) I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); } - - /* Pipe & plane A info */ - /* Prime the clock */ - if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { - I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & - ~DPLL_VCO_ENABLE); - DRM_UDELAY(150); + + /* Display port ratios (must be done before clock is set) */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); + I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); + I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); + I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); + I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); + I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); + I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); + I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); } - I915_WRITE(FPA0, dev_priv->saveFPA0); - I915_WRITE(FPA1, dev_priv->saveFPA1); - /* Actually enable it */ - I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); - DRM_UDELAY(150); - if (IS_I965G(dev)) - I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); - DRM_UDELAY(150); - - /* Restore mode */ - I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); - I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); - I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); - I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); - I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); - I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); - I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); - - /* Restore plane info */ - I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); - I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); - I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); - I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); - I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); - if (IS_I965G(dev)) { - I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); - I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); - } - - I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); - - i915_restore_palette(dev, PIPE_A); - /* Enable the plane */ - I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); - I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); - - /* Pipe & plane B info */ - if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { - I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & - ~DPLL_VCO_ENABLE); - DRM_UDELAY(150); - } - I915_WRITE(FPB0, dev_priv->saveFPB0); - I915_WRITE(FPB1, dev_priv->saveFPB1); - /* Actually enable it */ - I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); - DRM_UDELAY(150); - if (IS_I965G(dev)) - I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); - DRM_UDELAY(150); - - /* Restore mode */ - I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); - I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); - I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); - I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); - I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); - I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); - I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); - - /* Restore plane info */ - I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); - I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); - I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); - I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); - I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); - if (IS_I965G(dev)) { - I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); - I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); - } - - I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); - - i915_restore_palette(dev, PIPE_B); - /* Enable the plane */ - I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); - I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); - + /* This is only meaningful in non-KMS mode */ + /* Don't restore them in KMS mode */ + i915_restore_modeset_reg(dev); /* Cursor state */ I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); @@ -518,6 +565,12 @@ int i915_restore_state(struct drm_device *dev) I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); + /* Display Port state */ + if (SUPPORTS_INTEGRATED_DP(dev)) { + I915_WRITE(DP_B, dev_priv->saveDP_B); + I915_WRITE(DP_C, dev_priv->saveDP_C); + I915_WRITE(DP_D, dev_priv->saveDP_D); + } /* FIXME: restore TV & SDVO state */ /* FBC info */ @@ -545,7 +598,7 @@ int i915_restore_state(struct drm_device *dev) for (i = 0; i < 16; i++) { I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); - I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); + I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); } for (i = 0; i < 3; i++) I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index cdd126d068a..300aee3296c 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -99,9 +99,11 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, { struct bdb_lvds_options *lvds_options; struct bdb_lvds_lfp_data *lvds_lfp_data; + struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; struct bdb_lvds_lfp_data_entry *entry; struct lvds_dvo_timing *dvo_timing; struct drm_display_mode *panel_fixed_mode; + int lfp_data_size, dvo_timing_offset; /* Defaults if we can't find VBT info */ dev_priv->lvds_dither = 0; @@ -119,10 +121,27 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, if (!lvds_lfp_data) return; + lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS); + if (!lvds_lfp_data_ptrs) + return; + dev_priv->lvds_vbt = 1; - entry = &lvds_lfp_data->data[lvds_options->panel_type]; - dvo_timing = &entry->dvo_timing; + lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - + lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; + entry = (struct bdb_lvds_lfp_data_entry *) + ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * + lvds_options->panel_type)); + dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - + lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; + + /* + * the size of fp_timing varies on the different platform. + * So calculate the DVO timing relative offset in LVDS data + * entry to get the DVO timing entry + */ + dvo_timing = (struct lvds_dvo_timing *) + ((unsigned char *)entry + dvo_timing_offset); panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); @@ -185,10 +204,12 @@ parse_general_features(struct drm_i915_private *dev_priv, dev_priv->lvds_use_ssc = general->enable_ssc; if (dev_priv->lvds_use_ssc) { - if (IS_I855(dev_priv->dev)) - dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; - else - dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96; + if (IS_I85X(dev_priv->dev)) + dev_priv->lvds_ssc_freq = + general->ssc_freq ? 66 : 48; + else + dev_priv->lvds_ssc_freq = + general->ssc_freq ? 100 : 96; } } } @@ -275,6 +296,25 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, } return; } + +static void +parse_driver_features(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct drm_device *dev = dev_priv->dev; + struct bdb_driver_features *driver; + + /* set default for chips without eDP */ + if (!SUPPORTS_EDP(dev)) { + dev_priv->edp_support = 0; + return; + } + + driver = find_section(bdb, BDB_DRIVER_FEATURES); + if (driver && driver->lvds_config == BDB_DRIVER_FEATURE_EDP) + dev_priv->edp_support = 1; +} + /** * intel_init_bios - initialize VBIOS settings & find VBT * @dev: DRM device @@ -325,6 +365,8 @@ intel_init_bios(struct drm_device *dev) parse_lfp_panel_data(dev_priv, bdb); parse_sdvo_panel_data(dev_priv, bdb); parse_sdvo_device_mapping(dev_priv, bdb); + parse_driver_features(dev_priv, bdb); + pci_unmap_rom(pdev, bios); return 0; diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index fe72e1c225d..0f8e5f69ac7 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -381,6 +381,51 @@ struct bdb_sdvo_lvds_options { } __attribute__((packed)); +#define BDB_DRIVER_FEATURE_NO_LVDS 0 +#define BDB_DRIVER_FEATURE_INT_LVDS 1 +#define BDB_DRIVER_FEATURE_SDVO_LVDS 2 +#define BDB_DRIVER_FEATURE_EDP 3 + +struct bdb_driver_features { + u8 boot_dev_algorithm:1; + u8 block_display_switch:1; + u8 allow_display_switch:1; + u8 hotplug_dvo:1; + u8 dual_view_zoom:1; + u8 int15h_hook:1; + u8 sprite_in_clone:1; + u8 primary_lfp_id:1; + + u16 boot_mode_x; + u16 boot_mode_y; + u8 boot_mode_bpp; + u8 boot_mode_refresh; + + u16 enable_lfp_primary:1; + u16 selective_mode_pruning:1; + u16 dual_frequency:1; + u16 render_clock_freq:1; /* 0: high freq; 1: low freq */ + u16 nt_clone_support:1; + u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */ + u16 sprite_display_assign:1; /* 0: secondary; 1: primary */ + u16 cui_aspect_scaling:1; + u16 preserve_aspect_ratio:1; + u16 sdvo_device_power_down:1; + u16 crt_hotplug:1; + u16 lvds_config:2; + u16 tv_hotplug:1; + u16 hdmi_config:2; + + u8 static_display:1; + u8 reserved2:7; + u16 legacy_crt_max_x; + u16 legacy_crt_max_y; + u8 legacy_crt_max_refresh; + + u8 hdmi_termination; + u8 custom_vbt_version; +} __attribute__((packed)); + bool intel_init_bios(struct drm_device *dev); /* diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 6de97fc6602..4cf8e2e88a4 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -46,7 +46,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) temp = I915_READ(reg); temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); - temp |= ADPA_DAC_ENABLE; + temp &= ~ADPA_DAC_ENABLE; switch(mode) { case DRM_MODE_DPMS_ON: @@ -156,6 +156,9 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) temp = adpa = I915_READ(PCH_ADPA); + adpa &= ~ADPA_DAC_ENABLE; + I915_WRITE(PCH_ADPA, adpa); + adpa &= ~ADPA_CRT_HOTPLUG_MASK; adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | @@ -169,13 +172,14 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) DRM_DEBUG("pch crt adpa 0x%x", adpa); I915_WRITE(PCH_ADPA, adpa); - /* This might not be needed as not specified in spec...*/ - udelay(1000); + while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) + ; /* Check the status to see if both blue and green are on now */ adpa = I915_READ(PCH_ADPA); - if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) == - ADPA_CRT_HOTPLUG_MONITOR_COLOR) + adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; + if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) || + (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO)) ret = true; else ret = false; @@ -428,8 +432,34 @@ static void intel_crt_destroy(struct drm_connector *connector) static int intel_crt_get_modes(struct drm_connector *connector) { + int ret; struct intel_output *intel_output = to_intel_output(connector); - return intel_ddc_get_modes(intel_output); + struct i2c_adapter *ddcbus; + struct drm_device *dev = connector->dev; + + + ret = intel_ddc_get_modes(intel_output); + if (ret || !IS_G4X(dev)) + goto end; + + ddcbus = intel_output->ddc_bus; + /* Try to probe digital port for output in DVI-I -> VGA mode. */ + intel_output->ddc_bus = + intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); + + if (!intel_output->ddc_bus) { + intel_output->ddc_bus = ddcbus; + dev_printk(KERN_ERR, &connector->dev->pdev->dev, + "DDC bus registration failed for CRTDDC_D.\n"); + goto end; + } + /* Try to get modes by GPIOD port */ + ret = intel_ddc_get_modes(intel_output); + intel_i2c_destroy(ddcbus); + +end: + return ret; + } static int intel_crt_set_property(struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3e1c7816211..d6fce213341 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -25,14 +25,19 @@ */ #include <linux/i2c.h> +#include <linux/kernel.h> #include "drmP.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" +#include "intel_dp.h" #include "drm_crtc_helper.h" +#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) + bool intel_pipe_has_type (struct drm_crtc *crtc, int type); +static void intel_update_watermarks(struct drm_device *dev); typedef struct { /* given values */ @@ -85,7 +90,7 @@ struct intel_limit { #define I8XX_P2_SLOW 4 #define I8XX_P2_FAST 2 #define I8XX_P2_LVDS_SLOW 14 -#define I8XX_P2_LVDS_FAST 14 /* No fast option */ +#define I8XX_P2_LVDS_FAST 7 #define I8XX_P2_SLOW_LIMIT 165000 #define I9XX_DOT_MIN 20000 @@ -127,19 +132,6 @@ struct intel_limit { #define I9XX_P2_LVDS_FAST 7 #define I9XX_P2_LVDS_SLOW_LIMIT 112000 -#define INTEL_LIMIT_I8XX_DVO_DAC 0 -#define INTEL_LIMIT_I8XX_LVDS 1 -#define INTEL_LIMIT_I9XX_SDVO_DAC 2 -#define INTEL_LIMIT_I9XX_LVDS 3 -#define INTEL_LIMIT_G4X_SDVO 4 -#define INTEL_LIMIT_G4X_HDMI_DAC 5 -#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6 -#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 -#define INTEL_LIMIT_IGD_SDVO_DAC 8 -#define INTEL_LIMIT_IGD_LVDS 9 -#define INTEL_LIMIT_IGDNG_SDVO_DAC 10 -#define INTEL_LIMIT_IGDNG_LVDS 11 - /*The parameter is for SDVO on G4x platform*/ #define G4X_DOT_SDVO_MIN 25000 #define G4X_DOT_SDVO_MAX 270000 @@ -218,6 +210,25 @@ struct intel_limit { #define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 #define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 +/*The parameter is for DISPLAY PORT on G4x platform*/ +#define G4X_DOT_DISPLAY_PORT_MIN 161670 +#define G4X_DOT_DISPLAY_PORT_MAX 227000 +#define G4X_N_DISPLAY_PORT_MIN 1 +#define G4X_N_DISPLAY_PORT_MAX 2 +#define G4X_M_DISPLAY_PORT_MIN 97 +#define G4X_M_DISPLAY_PORT_MAX 108 +#define G4X_M1_DISPLAY_PORT_MIN 0x10 +#define G4X_M1_DISPLAY_PORT_MAX 0x12 +#define G4X_M2_DISPLAY_PORT_MIN 0x05 +#define G4X_M2_DISPLAY_PORT_MAX 0x06 +#define G4X_P_DISPLAY_PORT_MIN 10 +#define G4X_P_DISPLAY_PORT_MAX 20 +#define G4X_P1_DISPLAY_PORT_MIN 1 +#define G4X_P1_DISPLAY_PORT_MAX 2 +#define G4X_P2_DISPLAY_PORT_SLOW 10 +#define G4X_P2_DISPLAY_PORT_FAST 10 +#define G4X_P2_DISPLAY_PORT_LIMIT 0 + /* IGDNG */ /* as we calculate clock using (register_value + 2) for N/M1/M2, so here the range value for them is (actual_value-2). @@ -256,8 +267,14 @@ static bool intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); -static const intel_limit_t intel_limits[] = { - { /* INTEL_LIMIT_I8XX_DVO_DAC */ +static bool +intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock); +static bool +intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock); + +static const intel_limit_t intel_limits_i8xx_dvo = { .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, @@ -269,8 +286,9 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, .find_pll = intel_find_best_PLL, - }, - { /* INTEL_LIMIT_I8XX_LVDS */ +}; + +static const intel_limit_t intel_limits_i8xx_lvds = { .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX }, .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX }, @@ -282,8 +300,9 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, .find_pll = intel_find_best_PLL, - }, - { /* INTEL_LIMIT_I9XX_SDVO_DAC */ +}; + +static const intel_limit_t intel_limits_i9xx_sdvo = { .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, @@ -295,8 +314,9 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, .find_pll = intel_find_best_PLL, - }, - { /* INTEL_LIMIT_I9XX_LVDS */ +}; + +static const intel_limit_t intel_limits_i9xx_lvds = { .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX }, .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX }, @@ -311,9 +331,10 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, .find_pll = intel_find_best_PLL, - }, +}; + /* below parameter and function is for G4X Chipset Family*/ - { /* INTEL_LIMIT_G4X_SDVO */ +static const intel_limit_t intel_limits_g4x_sdvo = { .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX }, .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX }, @@ -327,8 +348,9 @@ static const intel_limit_t intel_limits[] = { .p2_fast = G4X_P2_SDVO_FAST }, .find_pll = intel_g4x_find_best_PLL, - }, - { /* INTEL_LIMIT_G4X_HDMI_DAC */ +}; + +static const intel_limit_t intel_limits_g4x_hdmi = { .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX }, .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX}, .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX }, @@ -342,8 +364,9 @@ static const intel_limit_t intel_limits[] = { .p2_fast = G4X_P2_HDMI_DAC_FAST }, .find_pll = intel_g4x_find_best_PLL, - }, - { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */ +}; + +static const intel_limit_t intel_limits_g4x_single_channel_lvds = { .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN, .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX }, .vco = { .min = G4X_VCO_MIN, @@ -365,8 +388,9 @@ static const intel_limit_t intel_limits[] = { .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST }, .find_pll = intel_g4x_find_best_PLL, - }, - { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */ +}; + +static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN, .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX }, .vco = { .min = G4X_VCO_MIN, @@ -388,8 +412,32 @@ static const intel_limit_t intel_limits[] = { .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST }, .find_pll = intel_g4x_find_best_PLL, - }, - { /* INTEL_LIMIT_IGD_SDVO */ +}; + +static const intel_limit_t intel_limits_g4x_display_port = { + .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN, + .max = G4X_DOT_DISPLAY_PORT_MAX }, + .vco = { .min = G4X_VCO_MIN, + .max = G4X_VCO_MAX}, + .n = { .min = G4X_N_DISPLAY_PORT_MIN, + .max = G4X_N_DISPLAY_PORT_MAX }, + .m = { .min = G4X_M_DISPLAY_PORT_MIN, + .max = G4X_M_DISPLAY_PORT_MAX }, + .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN, + .max = G4X_M1_DISPLAY_PORT_MAX }, + .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN, + .max = G4X_M2_DISPLAY_PORT_MAX }, + .p = { .min = G4X_P_DISPLAY_PORT_MIN, + .max = G4X_P_DISPLAY_PORT_MAX }, + .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN, + .max = G4X_P1_DISPLAY_PORT_MAX}, + .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT, + .p2_slow = G4X_P2_DISPLAY_PORT_SLOW, + .p2_fast = G4X_P2_DISPLAY_PORT_FAST }, + .find_pll = intel_find_pll_g4x_dp, +}; + +static const intel_limit_t intel_limits_igd_sdvo = { .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, @@ -401,8 +449,9 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, .find_pll = intel_find_best_PLL, - }, - { /* INTEL_LIMIT_IGD_LVDS */ +}; + +static const intel_limit_t intel_limits_igd_lvds = { .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, @@ -415,8 +464,9 @@ static const intel_limit_t intel_limits[] = { .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, .find_pll = intel_find_best_PLL, - }, - { /* INTEL_LIMIT_IGDNG_SDVO_DAC */ +}; + +static const intel_limit_t intel_limits_igdng_sdvo = { .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, @@ -429,8 +479,9 @@ static const intel_limit_t intel_limits[] = { .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, .find_pll = intel_igdng_find_best_PLL, - }, - { /* INTEL_LIMIT_IGDNG_LVDS */ +}; + +static const intel_limit_t intel_limits_igdng_lvds = { .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, @@ -443,16 +494,15 @@ static const intel_limit_t intel_limits[] = { .p2_slow = IGDNG_P2_LVDS_SLOW, .p2_fast = IGDNG_P2_LVDS_FAST }, .find_pll = intel_igdng_find_best_PLL, - }, }; static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) { const intel_limit_t *limit; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS]; + limit = &intel_limits_igdng_lvds; else - limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC]; + limit = &intel_limits_igdng_sdvo; return limit; } @@ -467,19 +517,19 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) /* LVDS with dual channel */ - limit = &intel_limits - [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS]; + limit = &intel_limits_g4x_dual_channel_lvds; else /* LVDS with dual channel */ - limit = &intel_limits - [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS]; + limit = &intel_limits_g4x_single_channel_lvds; } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { - limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC]; + limit = &intel_limits_g4x_hdmi; } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { - limit = &intel_limits[INTEL_LIMIT_G4X_SDVO]; + limit = &intel_limits_g4x_sdvo; + } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) { + limit = &intel_limits_g4x_display_port; } else /* The option is for other outputs */ - limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; + limit = &intel_limits_i9xx_sdvo; return limit; } @@ -495,19 +545,19 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) limit = intel_g4x_limit(crtc); } else if (IS_I9XX(dev) && !IS_IGD(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; + limit = &intel_limits_i9xx_lvds; else - limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; + limit = &intel_limits_i9xx_sdvo; } else if (IS_IGD(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits[INTEL_LIMIT_IGD_LVDS]; + limit = &intel_limits_igd_lvds; else - limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC]; + limit = &intel_limits_igd_sdvo; } else { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) - limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; + limit = &intel_limits_i8xx_lvds; else - limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC]; + limit = &intel_limits_i8xx_dvo; } return limit; } @@ -553,6 +603,23 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type) return false; } +struct drm_connector * +intel_pipe_get_output (struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *l_entry, *ret = NULL; + + list_for_each_entry(l_entry, &mode_config->connector_list, head) { + if (l_entry->encoder && + l_entry->encoder->crtc == crtc) { + ret = l_entry; + break; + } + } + return ret; +} + #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) /** * Returns whether the given set of divisors are valid for a given refclk with @@ -600,7 +667,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int err = target; if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && - (I915_READ(LVDS) & LVDS_PORT_EN) != 0) { + (I915_READ(LVDS)) != 0) { /* * For LVDS, if the panel is on, just rely on its current * settings for dual-channel. We haven't figured out how to @@ -707,6 +774,30 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, } static bool +intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock) +{ + struct drm_device *dev = crtc->dev; + intel_clock_t clock; + if (target < 200000) { + clock.n = 1; + clock.p1 = 2; + clock.p2 = 10; + clock.m1 = 12; + clock.m2 = 9; + } else { + clock.n = 2; + clock.p1 = 1; + clock.p2 = 10; + clock.m1 = 14; + clock.m2 = 8; + } + intel_clock(dev, refclk, &clock); + memcpy(best_clock, &clock, sizeof(intel_clock_t)); + return true; +} + +static bool intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock) { @@ -718,6 +809,14 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int err_most = 47; found = false; + /* eDP has only 2 clock choice, no n/m/p setting */ + if (HAS_eDP) + return true; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) + return intel_find_pll_igdng_dp(limit, crtc, target, + refclk, best_clock); + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) @@ -764,6 +863,32 @@ out: return found; } +/* DisplayPort has only two frequencies, 162MHz and 270MHz */ +static bool +intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *best_clock) +{ + intel_clock_t clock; + if (target < 200000) { + clock.p1 = 2; + clock.p2 = 10; + clock.n = 2; + clock.m1 = 23; + clock.m2 = 8; + } else { + clock.p1 = 1; + clock.p2 = 10; + clock.n = 1; + clock.m1 = 14; + clock.m2 = 2; + } + clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); + clock.p = (clock.p1 * clock.p2); + clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; + memcpy(best_clock, &clock, sizeof(intel_clock_t)); + return true; +} + void intel_wait_for_vblank(struct drm_device *dev) { @@ -927,13 +1052,97 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return 0; } +/* Disable the VGA plane that we never use */ +static void i915_disable_vga (struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u8 sr1; + u32 vga_reg; + + if (IS_IGDNG(dev)) + vga_reg = CPU_VGACNTRL; + else + vga_reg = VGACNTRL; + + if (I915_READ(vga_reg) & VGA_DISP_DISABLE) + return; + + I915_WRITE8(VGA_SR_INDEX, 1); + sr1 = I915_READ8(VGA_SR_DATA); + I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5)); + udelay(100); + + I915_WRITE(vga_reg, VGA_DISP_DISABLE); +} + +static void igdng_disable_pll_edp (struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpa_ctl; + + DRM_DEBUG("\n"); + dpa_ctl = I915_READ(DP_A); + dpa_ctl &= ~DP_PLL_ENABLE; + I915_WRITE(DP_A, dpa_ctl); +} + +static void igdng_enable_pll_edp (struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpa_ctl; + + dpa_ctl = I915_READ(DP_A); + dpa_ctl |= DP_PLL_ENABLE; + I915_WRITE(DP_A, dpa_ctl); + udelay(200); +} + + +static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpa_ctl; + + DRM_DEBUG("eDP PLL enable for clock %d\n", clock); + dpa_ctl = I915_READ(DP_A); + dpa_ctl &= ~DP_PLL_FREQ_MASK; + + if (clock < 200000) { + u32 temp; + dpa_ctl |= DP_PLL_FREQ_160MHZ; + /* workaround for 160Mhz: + 1) program 0x4600c bits 15:0 = 0x8124 + 2) program 0x46010 bit 0 = 1 + 3) program 0x46034 bit 24 = 1 + 4) program 0x64000 bit 14 = 1 + */ + temp = I915_READ(0x4600c); + temp &= 0xffff0000; + I915_WRITE(0x4600c, temp | 0x8124); + + temp = I915_READ(0x46010); + I915_WRITE(0x46010, temp | 1); + + temp = I915_READ(0x46034); + I915_WRITE(0x46034, temp | (1 << 24)); + } else { + dpa_ctl |= DP_PLL_FREQ_270MHZ; + } + I915_WRITE(DP_A, dpa_ctl); + + udelay(500); +} + static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int plane = intel_crtc->pipe; + int plane = intel_crtc->plane; int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B; int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; @@ -944,6 +1153,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1; + int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ; int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; @@ -957,7 +1167,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B; int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; u32 temp; - int tries = 5, j; + int tries = 5, j, n; /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. @@ -967,27 +1177,32 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: DRM_DEBUG("crtc %d dpms on\n", pipe); - /* enable PCH DPLL */ - temp = I915_READ(pch_dpll_reg); - if ((temp & DPLL_VCO_ENABLE) == 0) { - I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); - I915_READ(pch_dpll_reg); - } - - /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | - FDI_SEL_PCDCLK | - FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ - I915_READ(fdi_rx_reg); - udelay(200); + if (HAS_eDP) { + /* enable eDP PLL */ + igdng_enable_pll_edp(crtc); + } else { + /* enable PCH DPLL */ + temp = I915_READ(pch_dpll_reg); + if ((temp & DPLL_VCO_ENABLE) == 0) { + I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE); + I915_READ(pch_dpll_reg); + } - /* Enable CPU FDI TX PLL, always on for IGDNG */ - temp = I915_READ(fdi_tx_reg); - if ((temp & FDI_TX_PLL_ENABLE) == 0) { - I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); - I915_READ(fdi_tx_reg); - udelay(100); + /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | + FDI_SEL_PCDCLK | + FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ + I915_READ(fdi_rx_reg); + udelay(200); + + /* Enable CPU FDI TX PLL, always on for IGDNG */ + temp = I915_READ(fdi_tx_reg); + if ((temp & FDI_TX_PLL_ENABLE) == 0) { + I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); + I915_READ(fdi_tx_reg); + udelay(100); + } } /* Enable CPU pipe */ @@ -1006,122 +1221,126 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) I915_WRITE(dspbase_reg, I915_READ(dspbase_reg)); } - /* enable CPU FDI TX and PCH FDI RX */ - temp = I915_READ(fdi_tx_reg); - temp |= FDI_TX_ENABLE; - temp |= FDI_DP_PORT_WIDTH_X4; /* default */ - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_tx_reg, temp); - I915_READ(fdi_tx_reg); + if (!HAS_eDP) { + /* enable CPU FDI TX and PCH FDI RX */ + temp = I915_READ(fdi_tx_reg); + temp |= FDI_TX_ENABLE; + temp |= FDI_DP_PORT_WIDTH_X4; /* default */ + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_tx_reg, temp); + I915_READ(fdi_tx_reg); - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); - I915_READ(fdi_rx_reg); + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_1; + I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE); + I915_READ(fdi_rx_reg); - udelay(150); + udelay(150); - /* Train FDI. */ - /* umask FDI RX Interrupt symbol_lock and bit_lock bit - for train result */ - temp = I915_READ(fdi_rx_imr_reg); - temp &= ~FDI_RX_SYMBOL_LOCK; - temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(fdi_rx_imr_reg, temp); - I915_READ(fdi_rx_imr_reg); - udelay(150); + /* Train FDI. */ + /* umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + temp = I915_READ(fdi_rx_imr_reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(fdi_rx_imr_reg, temp); + I915_READ(fdi_rx_imr_reg); + udelay(150); - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); - if ((temp & FDI_RX_BIT_LOCK) == 0) { - for (j = 0; j < tries; j++) { - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); - if (temp & FDI_RX_BIT_LOCK) - break; - udelay(200); - } - if (j != tries) + if ((temp & FDI_RX_BIT_LOCK) == 0) { + for (j = 0; j < tries; j++) { + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + if (temp & FDI_RX_BIT_LOCK) + break; + udelay(200); + } + if (j != tries) + I915_WRITE(fdi_rx_iir_reg, + temp | FDI_RX_BIT_LOCK); + else + DRM_DEBUG("train 1 fail\n"); + } else { I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_BIT_LOCK); - else - DRM_DEBUG("train 1 fail\n"); - } else { - I915_WRITE(fdi_rx_iir_reg, - temp | FDI_RX_BIT_LOCK); - DRM_DEBUG("train 1 ok 2!\n"); - } - temp = I915_READ(fdi_tx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(fdi_tx_reg, temp); - - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(fdi_rx_reg, temp); + DRM_DEBUG("train 1 ok 2!\n"); + } + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + I915_WRITE(fdi_tx_reg, temp); + + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_PATTERN_2; + I915_WRITE(fdi_rx_reg, temp); - udelay(150); + udelay(150); - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); - if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { - for (j = 0; j < tries; j++) { - temp = I915_READ(fdi_rx_iir_reg); - DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); - if (temp & FDI_RX_SYMBOL_LOCK) - break; - udelay(200); - } - if (j != tries) { + if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { + for (j = 0; j < tries; j++) { + temp = I915_READ(fdi_rx_iir_reg); + DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); + if (temp & FDI_RX_SYMBOL_LOCK) + break; + udelay(200); + } + if (j != tries) { + I915_WRITE(fdi_rx_iir_reg, + temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG("train 2 ok 1!\n"); + } else + DRM_DEBUG("train 2 fail\n"); + } else { I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG("train 2 ok 1!\n"); - } else - DRM_DEBUG("train 2 fail\n"); - } else { - I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG("train 2 ok 2!\n"); - } - DRM_DEBUG("train done\n"); + DRM_DEBUG("train 2 ok 2!\n"); + } + DRM_DEBUG("train done\n"); - /* set transcoder timing */ - I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); - I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); - I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); + /* set transcoder timing */ + I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); + I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg)); + I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg)); - I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); - I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); - I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); + I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg)); + I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg)); + I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg)); - /* enable PCH transcoder */ - temp = I915_READ(transconf_reg); - I915_WRITE(transconf_reg, temp | TRANS_ENABLE); - I915_READ(transconf_reg); + /* enable PCH transcoder */ + temp = I915_READ(transconf_reg); + I915_WRITE(transconf_reg, temp | TRANS_ENABLE); + I915_READ(transconf_reg); - while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) - ; + while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) + ; - /* enable normal */ + /* enable normal */ - temp = I915_READ(fdi_tx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | - FDI_TX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_tx_reg); + temp = I915_READ(fdi_tx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE | + FDI_TX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_tx_reg); - temp = I915_READ(fdi_rx_reg); - temp &= ~FDI_LINK_TRAIN_NONE; - I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | - FDI_RX_ENHANCE_FRAME_ENABLE); - I915_READ(fdi_rx_reg); + temp = I915_READ(fdi_rx_reg); + temp &= ~FDI_LINK_TRAIN_NONE; + I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE | + FDI_RX_ENHANCE_FRAME_ENABLE); + I915_READ(fdi_rx_reg); - /* wait one idle pattern time */ - udelay(100); + /* wait one idle pattern time */ + udelay(100); + + } intel_crtc_load_lut(crtc); @@ -1129,8 +1348,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_OFF: DRM_DEBUG("crtc %d dpms off\n", pipe); - /* Disable the VGA plane that we never use */ - I915_WRITE(CPU_VGACNTRL, VGA_DISP_DISABLE); + i915_disable_vga(dev); /* Disable display plane */ temp = I915_READ(dspcntr_reg); @@ -1146,17 +1364,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) if ((temp & PIPEACONF_ENABLE) != 0) { I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); I915_READ(pipeconf_reg); + n = 0; /* wait for cpu pipe off, pipe state */ - while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) - ; + while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) { + n++; + if (n < 60) { + udelay(500); + continue; + } else { + DRM_DEBUG("pipe %d off delay\n", pipe); + break; + } + } } else DRM_DEBUG("crtc %d is disabled\n", pipe); - /* IGDNG-A : disable cpu panel fitter ? */ - temp = I915_READ(pf_ctl_reg); - if ((temp & PF_ENABLE) != 0) { - I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); - I915_READ(pf_ctl_reg); + if (HAS_eDP) { + igdng_disable_pll_edp(crtc); } /* disable CPU FDI tx and PCH FDI rx */ @@ -1168,6 +1392,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); I915_READ(fdi_rx_reg); + udelay(100); + /* still set train pattern 1 */ temp = I915_READ(fdi_tx_reg); temp &= ~FDI_LINK_TRAIN_NONE; @@ -1179,14 +1405,25 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) temp |= FDI_LINK_TRAIN_PATTERN_1; I915_WRITE(fdi_rx_reg, temp); + udelay(100); + /* disable PCH transcoder */ temp = I915_READ(transconf_reg); if ((temp & TRANS_ENABLE) != 0) { I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); I915_READ(transconf_reg); + n = 0; /* wait for PCH transcoder off, transcoder state */ - while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) - ; + while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) { + n++; + if (n < 60) { + udelay(500); + continue; + } else { + DRM_DEBUG("transcoder %d off delay\n", pipe); + break; + } + } } /* disable PCH DPLL */ @@ -1204,6 +1441,22 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) I915_READ(fdi_rx_reg); } + /* Disable CPU FDI TX PLL */ + temp = I915_READ(fdi_tx_reg); + if ((temp & FDI_TX_PLL_ENABLE) != 0) { + I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_PLL_ENABLE); + I915_READ(fdi_tx_reg); + udelay(100); + } + + /* Disable PF */ + temp = I915_READ(pf_ctl_reg); + if ((temp & PF_ENABLE) != 0) { + I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); + I915_READ(pf_ctl_reg); + } + I915_WRITE(pf_win_size, 0); + /* Wait for the clocks to turn off. */ udelay(150); break; @@ -1263,13 +1516,15 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) /* Give the overlay scaler a chance to enable if it's on this pipe */ //intel_crtc_dpms_video(crtc, true); TODO + intel_update_watermarks(dev); break; case DRM_MODE_DPMS_OFF: + intel_update_watermarks(dev); /* Give the overlay scaler a chance to disable if it's on this pipe */ //intel_crtc_dpms_video(crtc, FALSE); TODO /* Disable the VGA plane that we never use */ - I915_WRITE(VGACNTRL, VGA_DISP_DISABLE); + i915_disable_vga(dev); /* Disable display plane */ temp = I915_READ(dspcntr_reg); @@ -1443,7 +1698,6 @@ static int intel_get_core_clock_speed(struct drm_device *dev) return 0; /* Silence gcc warning */ } - /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use @@ -1502,7 +1756,7 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes, temp = (u64) DATA_N * pixel_clock; temp = div_u64(temp, link_clock); - m_n->gmch_m = (temp * bytes_per_pixel) / nlanes; + m_n->gmch_m = div_u64(temp * bytes_per_pixel, nlanes); m_n->gmch_n = DATA_N; fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); @@ -1513,6 +1767,464 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes, } +struct intel_watermark_params { + unsigned long fifo_size; + unsigned long max_wm; + unsigned long default_wm; + unsigned long guard_size; + unsigned long cacheline_size; +}; + +/* IGD has different values for various configs */ +static struct intel_watermark_params igd_display_wm = { + IGD_DISPLAY_FIFO, + IGD_MAX_WM, + IGD_DFT_WM, + IGD_GUARD_WM, + IGD_FIFO_LINE_SIZE +}; +static struct intel_watermark_params igd_display_hplloff_wm = { + IGD_DISPLAY_FIFO, + IGD_MAX_WM, + IGD_DFT_HPLLOFF_WM, + IGD_GUARD_WM, + IGD_FIFO_LINE_SIZE +}; +static struct intel_watermark_params igd_cursor_wm = { + IGD_CURSOR_FIFO, + IGD_CURSOR_MAX_WM, + IGD_CURSOR_DFT_WM, + IGD_CURSOR_GUARD_WM, + IGD_FIFO_LINE_SIZE, +}; +static struct intel_watermark_params igd_cursor_hplloff_wm = { + IGD_CURSOR_FIFO, + IGD_CURSOR_MAX_WM, + IGD_CURSOR_DFT_WM, + IGD_CURSOR_GUARD_WM, + IGD_FIFO_LINE_SIZE +}; +static struct intel_watermark_params i945_wm_info = { + I945_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I915_FIFO_LINE_SIZE +}; +static struct intel_watermark_params i915_wm_info = { + I915_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I915_FIFO_LINE_SIZE +}; +static struct intel_watermark_params i855_wm_info = { + I855GM_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I830_FIFO_LINE_SIZE +}; +static struct intel_watermark_params i830_wm_info = { + I830_FIFO_SIZE, + I915_MAX_WM, + 1, + 2, + I830_FIFO_LINE_SIZE +}; + +/** + * intel_calculate_wm - calculate watermark level + * @clock_in_khz: pixel clock + * @wm: chip FIFO params + * @pixel_size: display pixel size + * @latency_ns: memory latency for the platform + * + * Calculate the watermark level (the level at which the display plane will + * start fetching from memory again). Each chip has a different display + * FIFO size and allocation, so the caller needs to figure that out and pass + * in the correct intel_watermark_params structure. + * + * As the pixel clock runs, the FIFO will be drained at a rate that depends + * on the pixel size. When it reaches the watermark level, it'll start + * fetching FIFO line sized based chunks from memory until the FIFO fills + * past the watermark point. If the FIFO drains completely, a FIFO underrun + * will occur, and a display engine hang could result. + */ +static unsigned long intel_calculate_wm(unsigned long clock_in_khz, + struct intel_watermark_params *wm, + int pixel_size, + unsigned long latency_ns) +{ + long entries_required, wm_size; + + entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000; + entries_required /= wm->cacheline_size; + + DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); + + wm_size = wm->fifo_size - (entries_required + wm->guard_size); + + DRM_DEBUG("FIFO watermark level: %d\n", wm_size); + + /* Don't promote wm_size to unsigned... */ + if (wm_size > (long)wm->max_wm) + wm_size = wm->max_wm; + if (wm_size <= 0) + wm_size = wm->default_wm; + return wm_size; +} + +struct cxsr_latency { + int is_desktop; + unsigned long fsb_freq; + unsigned long mem_freq; + unsigned long display_sr; + unsigned long display_hpll_disable; + unsigned long cursor_sr; + unsigned long cursor_hpll_disable; +}; + +static struct cxsr_latency cxsr_latency_table[] = { + {1, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ + {1, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ + {1, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ + + {1, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ + {1, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ + {1, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ + + {1, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ + {1, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ + {1, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ + + {0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ + {0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ + {0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ + + {0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ + {0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ + {0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ + + {0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ + {0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ + {0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ +}; + +static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb, + int mem) +{ + int i; + struct cxsr_latency *latency; + + if (fsb == 0 || mem == 0) + return NULL; + + for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { + latency = &cxsr_latency_table[i]; + if (is_desktop == latency->is_desktop && + fsb == latency->fsb_freq && mem == latency->mem_freq) + break; + } + if (i >= ARRAY_SIZE(cxsr_latency_table)) { + DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); + return NULL; + } + return latency; +} + +static void igd_disable_cxsr(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + + /* deactivate cxsr */ + reg = I915_READ(DSPFW3); + reg &= ~(IGD_SELF_REFRESH_EN); + I915_WRITE(DSPFW3, reg); + DRM_INFO("Big FIFO is disabled\n"); +} + +static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, + int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg; + unsigned long wm; + struct cxsr_latency *latency; + + latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq, + dev_priv->mem_freq); + if (!latency) { + DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); + igd_disable_cxsr(dev); + return; + } + + /* Display SR */ + wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size, + latency->display_sr); + reg = I915_READ(DSPFW1); + reg &= 0x7fffff; + reg |= wm << 23; + I915_WRITE(DSPFW1, reg); + DRM_DEBUG("DSPFW1 register is %x\n", reg); + + /* cursor SR */ + wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size, + latency->cursor_sr); + reg = I915_READ(DSPFW3); + reg &= ~(0x3f << 24); + reg |= (wm & 0x3f) << 24; + I915_WRITE(DSPFW3, reg); + + /* Display HPLL off SR */ + wm = intel_calculate_wm(clock, &igd_display_hplloff_wm, + latency->display_hpll_disable, I915_FIFO_LINE_SIZE); + reg = I915_READ(DSPFW3); + reg &= 0xfffffe00; + reg |= wm & 0x1ff; + I915_WRITE(DSPFW3, reg); + + /* cursor HPLL off SR */ + wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size, + latency->cursor_hpll_disable); + reg = I915_READ(DSPFW3); + reg &= ~(0x3f << 16); + reg |= (wm & 0x3f) << 16; + I915_WRITE(DSPFW3, reg); + DRM_DEBUG("DSPFW3 register is %x\n", reg); + + /* activate cxsr */ + reg = I915_READ(DSPFW3); + reg |= IGD_SELF_REFRESH_EN; + I915_WRITE(DSPFW3, reg); + + DRM_INFO("Big FIFO is enabled\n"); + + return; +} + +const static int latency_ns = 3000; /* default for non-igd platforms */ + +static int intel_get_fifo_size(struct drm_device *dev, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dsparb = I915_READ(DSPARB); + int size; + + if (IS_I9XX(dev)) { + if (plane == 0) + size = dsparb & 0x7f; + else + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - + (dsparb & 0x7f); + } else if (IS_I85X(dev)) { + if (plane == 0) + size = dsparb & 0x1ff; + else + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - + (dsparb & 0x1ff); + size >>= 1; /* Convert to cachelines */ + } else if (IS_845G(dev)) { + size = dsparb & 0x7f; + size >>= 2; /* Convert to cachelines */ + } else { + size = dsparb & 0x7f; + size >>= 1; /* Convert to cachelines */ + } + + DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", + size); + + return size; +} + +static void i965_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n"); + + /* 965 has limitations... */ + I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0)); + I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); +} + +static void i9xx_update_wm(struct drm_device *dev, int planea_clock, + int planeb_clock, int sr_hdisplay, int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t fwater_lo; + uint32_t fwater_hi; + int total_size, cacheline_size, cwm, srwm = 1; + int planea_wm, planeb_wm; + struct intel_watermark_params planea_params, planeb_params; + unsigned long line_time_us; + int sr_clock, sr_entries = 0; + + /* Create copies of the base settings for each pipe */ + if (IS_I965GM(dev) || IS_I945GM(dev)) + planea_params = planeb_params = i945_wm_info; + else if (IS_I9XX(dev)) + planea_params = planeb_params = i915_wm_info; + else + planea_params = planeb_params = i855_wm_info; + + /* Grab a couple of global values before we overwrite them */ + total_size = planea_params.fifo_size; + cacheline_size = planea_params.cacheline_size; + + /* Update per-plane FIFO sizes */ + planea_params.fifo_size = intel_get_fifo_size(dev, 0); + planeb_params.fifo_size = intel_get_fifo_size(dev, 1); + + planea_wm = intel_calculate_wm(planea_clock, &planea_params, + pixel_size, latency_ns); + planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, + pixel_size, latency_ns); + DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); + + /* + * Overlay gets an aggressive default since video jitter is bad. + */ + cwm = 2; + + /* Calc sr entries for one plane configs */ + if (sr_hdisplay && (!planea_clock || !planeb_clock)) { + /* self-refresh has much higher latency */ + const static int sr_latency_ns = 6000; + + sr_clock = planea_clock ? planea_clock : planeb_clock; + line_time_us = ((sr_hdisplay * 1000) / sr_clock); + + /* Use ns/us then divide to preserve precision */ + sr_entries = (((sr_latency_ns / line_time_us) + 1) * + pixel_size * sr_hdisplay) / 1000; + sr_entries = roundup(sr_entries / cacheline_size, 1); + DRM_DEBUG("self-refresh entries: %d\n", sr_entries); + srwm = total_size - sr_entries; + if (srwm < 0) + srwm = 1; + if (IS_I9XX(dev)) + I915_WRITE(FW_BLC_SELF, (srwm & 0x3f)); + } + + DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", + planea_wm, planeb_wm, cwm, srwm); + + fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); + fwater_hi = (cwm & 0x1f); + + /* Set request length to 8 cachelines per fetch */ + fwater_lo = fwater_lo | (1 << 24) | (1 << 8); + fwater_hi = fwater_hi | (1 << 8); + + I915_WRITE(FW_BLC, fwater_lo); + I915_WRITE(FW_BLC2, fwater_hi); +} + +static void i830_update_wm(struct drm_device *dev, int planea_clock, + int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; + int planea_wm; + + i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0); + + planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info, + pixel_size, latency_ns); + fwater_lo |= (3<<8) | planea_wm; + + DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm); + + I915_WRITE(FW_BLC, fwater_lo); +} + +/** + * intel_update_watermarks - update FIFO watermark values based on current modes + * + * Calculate watermark values for the various WM regs based on current mode + * and plane configuration. + * + * There are several cases to deal with here: + * - normal (i.e. non-self-refresh) + * - self-refresh (SR) mode + * - lines are large relative to FIFO size (buffer can hold up to 2) + * - lines are small relative to FIFO size (buffer can hold more than 2 + * lines), so need to account for TLB latency + * + * The normal calculation is: + * watermark = dotclock * bytes per pixel * latency + * where latency is platform & configuration dependent (we assume pessimal + * values here). + * + * The SR calculation is: + * watermark = (trunc(latency/line time)+1) * surface width * + * bytes per pixel + * where + * line time = htotal / dotclock + * and latency is assumed to be high, as above. + * + * The final value programmed to the register should always be rounded up, + * and include an extra 2 entries to account for clock crossings. + * + * We don't use the sprite, so we can ignore that. And on Crestline we have + * to set the non-SR watermarks to 8. + */ +static void intel_update_watermarks(struct drm_device *dev) +{ + struct drm_crtc *crtc; + struct intel_crtc *intel_crtc; + int sr_hdisplay = 0; + unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; + int enabled = 0, pixel_size = 0; + + if (DSPARB_HWCONTROL(dev)) + return; + + /* Get the clock config from both planes */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + intel_crtc = to_intel_crtc(crtc); + if (crtc->enabled) { + enabled++; + if (intel_crtc->plane == 0) { + DRM_DEBUG("plane A (pipe %d) clock: %d\n", + intel_crtc->pipe, crtc->mode.clock); + planea_clock = crtc->mode.clock; + } else { + DRM_DEBUG("plane B (pipe %d) clock: %d\n", + intel_crtc->pipe, crtc->mode.clock); + planeb_clock = crtc->mode.clock; + } + sr_hdisplay = crtc->mode.hdisplay; + sr_clock = crtc->mode.clock; + if (crtc->fb) + pixel_size = crtc->fb->bits_per_pixel / 8; + else + pixel_size = 4; /* by default */ + } + } + + if (enabled <= 0) + return; + + /* Single plane configs can enable self refresh */ + if (enabled == 1 && IS_IGD(dev)) + igd_enable_cxsr(dev, sr_clock, pixel_size); + else if (IS_IGD(dev)) + igd_disable_cxsr(dev); + + if (IS_I965G(dev)) + i965_update_wm(dev); + else if (IS_I9XX(dev) || IS_MOBILE(dev)) + i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay, + pixel_size); + else + i830_update_wm(dev, planea_clock, pixel_size); +} + static int intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -1541,7 +2253,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, intel_clock_t clock; u32 dpll = 0, fp = 0, dspcntr, pipeconf; bool ok, is_sdvo = false, is_dvo = false; - bool is_crt = false, is_lvds = false, is_tv = false; + bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; + bool is_edp = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; const intel_limit_t *limit; @@ -1557,6 +2270,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, int lvds_reg = LVDS; u32 temp; int sdvo_pixel_multiply; + int target_clock; drm_vblank_pre_modeset(dev, pipe); @@ -1585,6 +2299,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, case INTEL_OUTPUT_ANALOG: is_crt = true; break; + case INTEL_OUTPUT_DISPLAYPORT: + is_dp = true; + break; + case INTEL_OUTPUT_EDP: + is_edp = true; + break; } num_outputs++; @@ -1600,6 +2320,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } else { refclk = 48000; } + /* * Returns a set of divisors for the desired target clock with the given @@ -1635,11 +2356,29 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } /* FDI link */ - if (IS_IGDNG(dev)) - igdng_compute_m_n(3, 4, /* lane num 4 */ - adjusted_mode->clock, - 270000, /* lane clock */ - &m_n); + if (IS_IGDNG(dev)) { + int lane, link_bw; + /* eDP doesn't require FDI link, so just set DP M/N + according to current link config */ + if (is_edp) { + struct drm_connector *edp; + target_clock = mode->clock; + edp = intel_pipe_get_output(crtc); + intel_edp_link_config(to_intel_output(edp), + &lane, &link_bw); + } else { + /* DP over FDI requires target mode clock + instead of link clock */ + if (is_dp) + target_clock = mode->clock; + else + target_clock = adjusted_mode->clock; + lane = 4; + link_bw = 270000; + } + igdng_compute_m_n(3, lane, target_clock, + link_bw, &m_n); + } if (IS_IGD(dev)) fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; @@ -1662,6 +2401,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, else if (IS_IGDNG(dev)) dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; } + if (is_dp) + dpll |= DPLL_DVO_HIGH_SPEED; /* compute bitmask from p1 value */ if (IS_IGD(dev)) @@ -1758,29 +2499,15 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll_reg = pch_dpll_reg; } - if (dpll & DPLL_VCO_ENABLE) { + if (is_edp) { + igdng_disable_pll_edp(crtc); + } else if ((dpll & DPLL_VCO_ENABLE)) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); I915_READ(dpll_reg); udelay(150); } - if (IS_IGDNG(dev)) { - /* enable PCH clock reference source */ - /* XXX need to change the setting for other outputs */ - u32 temp; - temp = I915_READ(PCH_DREF_CONTROL); - temp &= ~DREF_NONSPREAD_SOURCE_MASK; - temp |= DREF_NONSPREAD_CK505_ENABLE; - temp &= ~DREF_SSC_SOURCE_MASK; - temp |= DREF_SSC_SOURCE_ENABLE; - temp &= ~DREF_SSC1_ENABLE; - /* if no eDP, disable source output to CPU */ - temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - I915_WRITE(PCH_DREF_CONTROL, temp); - } - /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. @@ -1809,24 +2536,28 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(lvds_reg, lvds); I915_READ(lvds_reg); } + if (is_dp) + intel_dp_set_m_n(crtc, mode, adjusted_mode); - I915_WRITE(fp_reg, fp); - I915_WRITE(dpll_reg, dpll); - I915_READ(dpll_reg); - /* Wait for the clocks to stabilize. */ - udelay(150); - - if (IS_I965G(dev) && !IS_IGDNG(dev)) { - sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; - I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | - ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); - } else { - /* write it again -- the BIOS does, after all */ + if (!is_edp) { + I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll); + I915_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); + + if (IS_I965G(dev) && !IS_IGDNG(dev)) { + sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; + I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | + ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); + } else { + /* write it again -- the BIOS does, after all */ + I915_WRITE(dpll_reg, dpll); + } + I915_READ(dpll_reg); + /* Wait for the clocks to stabilize. */ + udelay(150); } - I915_READ(dpll_reg); - /* Wait for the clocks to stabilize. */ - udelay(150); I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); @@ -1856,10 +2587,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(link_m1_reg, m_n.link_m); I915_WRITE(link_n1_reg, m_n.link_n); - /* enable FDI RX PLL too */ - temp = I915_READ(fdi_rx_reg); - I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); - udelay(200); + if (is_edp) { + igdng_set_pll_edp(crtc, adjusted_mode->clock); + } else { + /* enable FDI RX PLL too */ + temp = I915_READ(fdi_rx_reg); + I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE); + udelay(200); + } } I915_WRITE(pipeconf_reg, pipeconf); @@ -1871,6 +2606,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Flush the plane changes */ ret = intel_pipe_set_base(crtc, x, y, old_fb); + + intel_update_watermarks(dev); + drm_vblank_post_modeset(dev, pipe); return ret; @@ -2359,6 +3097,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); intel_crtc->pipe = pipe; + intel_crtc->plane = pipe; for (i = 0; i < 256; i++) { intel_crtc->lut_r[i] = i; intel_crtc->lut_g[i] = i; @@ -2453,12 +3192,17 @@ static void intel_setup_outputs(struct drm_device *dev) if (IS_IGDNG(dev)) { int found; + if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) + intel_dp_init(dev, DP_A); + if (I915_READ(HDMIB) & PORT_DETECTED) { /* check SDVOB */ /* found = intel_sdvo_init(dev, HDMIB); */ found = 0; if (!found) intel_hdmi_init(dev, HDMIB); + if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) + intel_dp_init(dev, PCH_DP_B); } if (I915_READ(HDMIC) & PORT_DETECTED) @@ -2467,6 +3211,12 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(HDMID) & PORT_DETECTED) intel_hdmi_init(dev, HDMID); + if (I915_READ(PCH_DP_C) & DP_DETECTED) + intel_dp_init(dev, PCH_DP_C); + + if (I915_READ(PCH_DP_D) & DP_DETECTED) + intel_dp_init(dev, PCH_DP_D); + } else if (IS_I9XX(dev)) { int found; u32 reg; @@ -2475,6 +3225,8 @@ static void intel_setup_outputs(struct drm_device *dev) found = intel_sdvo_init(dev, SDVOB); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) intel_hdmi_init(dev, SDVOB); + if (!found && SUPPORTS_INTEGRATED_DP(dev)) + intel_dp_init(dev, DP_B); } /* Before G4X SDVOC doesn't have its own detect register */ @@ -2487,7 +3239,11 @@ static void intel_setup_outputs(struct drm_device *dev) found = intel_sdvo_init(dev, SDVOC); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) intel_hdmi_init(dev, SDVOC); + if (!found && SUPPORTS_INTEGRATED_DP(dev)) + intel_dp_init(dev, DP_C); } + if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) + intel_dp_init(dev, DP_D); } else intel_dvo_init(dev); @@ -2530,6 +3286,15 @@ static void intel_setup_outputs(struct drm_device *dev) (1 << 1)); clone_mask = (1 << INTEL_OUTPUT_TVOUT); break; + case INTEL_OUTPUT_DISPLAYPORT: + crtc_mask = ((1 << 0) | + (1 << 1)); + clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT); + break; + case INTEL_OUTPUT_EDP: + crtc_mask = (1 << 1); + clone_mask = (1 << INTEL_OUTPUT_EDP); + break; } encoder->possible_crtcs = crtc_mask; encoder->possible_clones = intel_connector_clones(dev, clone_mask); @@ -2639,6 +3404,9 @@ void intel_modeset_init(struct drm_device *dev) if (IS_I965G(dev)) { dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; + } else if (IS_I9XX(dev)) { + dev->mode_config.max_width = 4096; + dev->mode_config.max_height = 4096; } else { dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c new file mode 100644 index 00000000000..a6ff15ac548 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -0,0 +1,1318 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Keith Packard <keithp@keithp.com> + * + */ + +#include <linux/i2c.h> +#include "drmP.h" +#include "drm.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" +#include "intel_drv.h" +#include "i915_drm.h" +#include "i915_drv.h" +#include "intel_dp.h" + +#define DP_LINK_STATUS_SIZE 6 +#define DP_LINK_CHECK_TIMEOUT (10 * 1000) + +#define DP_LINK_CONFIGURATION_SIZE 9 + +#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) + +struct intel_dp_priv { + uint32_t output_reg; + uint32_t DP; + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; + uint32_t save_DP; + uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE]; + bool has_audio; + int dpms_mode; + uint8_t link_bw; + uint8_t lane_count; + uint8_t dpcd[4]; + struct intel_output *intel_output; + struct i2c_adapter adapter; + struct i2c_algo_dp_aux_data algo; +}; + +static void +intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); + +static void +intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); + +void +intel_edp_link_config (struct intel_output *intel_output, + int *lane_num, int *link_bw) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + *lane_num = dp_priv->lane_count; + if (dp_priv->link_bw == DP_LINK_BW_1_62) + *link_bw = 162000; + else if (dp_priv->link_bw == DP_LINK_BW_2_7) + *link_bw = 270000; +} + +static int +intel_dp_max_lane_count(struct intel_output *intel_output) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + int max_lane_count = 4; + + if (dp_priv->dpcd[0] >= 0x11) { + max_lane_count = dp_priv->dpcd[2] & 0x1f; + switch (max_lane_count) { + case 1: case 2: case 4: + break; + default: + max_lane_count = 4; + } + } + return max_lane_count; +} + +static int +intel_dp_max_link_bw(struct intel_output *intel_output) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + int max_link_bw = dp_priv->dpcd[1]; + + switch (max_link_bw) { + case DP_LINK_BW_1_62: + case DP_LINK_BW_2_7: + break; + default: + max_link_bw = DP_LINK_BW_1_62; + break; + } + return max_link_bw; +} + +static int +intel_dp_link_clock(uint8_t link_bw) +{ + if (link_bw == DP_LINK_BW_2_7) + return 270000; + else + return 162000; +} + +/* I think this is a fiction */ +static int +intel_dp_link_required(int pixel_clock) +{ + return pixel_clock * 3; +} + +static int +intel_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_output *intel_output = to_intel_output(connector); + int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); + int max_lanes = intel_dp_max_lane_count(intel_output); + + if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes) + return MODE_CLOCK_HIGH; + + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + + return MODE_OK; +} + +static uint32_t +pack_aux(uint8_t *src, int src_bytes) +{ + int i; + uint32_t v = 0; + + if (src_bytes > 4) + src_bytes = 4; + for (i = 0; i < src_bytes; i++) + v |= ((uint32_t) src[i]) << ((3-i) * 8); + return v; +} + +static void +unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +{ + int i; + if (dst_bytes > 4) + dst_bytes = 4; + for (i = 0; i < dst_bytes; i++) + dst[i] = src >> ((3-i) * 8); +} + +/* hrawclock is 1/4 the FSB frequency */ +static int +intel_hrawclk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t clkcfg; + + clkcfg = I915_READ(CLKCFG); + switch (clkcfg & CLKCFG_FSB_MASK) { + case CLKCFG_FSB_400: + return 100; + case CLKCFG_FSB_533: + return 133; + case CLKCFG_FSB_667: + return 166; + case CLKCFG_FSB_800: + return 200; + case CLKCFG_FSB_1067: + return 266; + case CLKCFG_FSB_1333: + return 333; + /* these two are just a guess; one of them might be right */ + case CLKCFG_FSB_1600: + case CLKCFG_FSB_1600_ALT: + return 400; + default: + return 133; + } +} + +static int +intel_dp_aux_ch(struct intel_output *intel_output, + uint8_t *send, int send_bytes, + uint8_t *recv, int recv_size) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + uint32_t output_reg = dp_priv->output_reg; + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t ch_ctl = output_reg + 0x10; + uint32_t ch_data = ch_ctl + 4; + int i; + int recv_bytes; + uint32_t ctl; + uint32_t status; + uint32_t aux_clock_divider; + int try; + + /* The clock divider is based off the hrawclk, + * and would like to run at 2MHz. So, take the + * hrawclk value and divide by 2 and use that + */ + if (IS_eDP(intel_output)) + aux_clock_divider = 225; /* eDP input clock at 450Mhz */ + else if (IS_IGDNG(dev)) + aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */ + else + aux_clock_divider = intel_hrawclk(dev) / 2; + + /* Must try at least 3 times according to DP spec */ + for (try = 0; try < 5; try++) { + /* Load the send data into the aux channel data registers */ + for (i = 0; i < send_bytes; i += 4) { + uint32_t d = pack_aux(send + i, send_bytes - i);; + + I915_WRITE(ch_data + i, d); + } + + ctl = (DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_TIME_OUT_400us | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + + /* Send the command and wait for it to complete */ + I915_WRITE(ch_ctl, ctl); + (void) I915_READ(ch_ctl); + for (;;) { + udelay(100); + status = I915_READ(ch_ctl); + if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) + break; + } + + /* Clear done status and any errors */ + I915_WRITE(ch_ctl, (status | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR)); + (void) I915_READ(ch_ctl); + if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) + break; + } + + if ((status & DP_AUX_CH_CTL_DONE) == 0) { + DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); + return -EBUSY; + } + + /* Check for timeout or receive error. + * Timeouts occur when the sink is not connected + */ + if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { + DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); + return -EIO; + } + + /* Timeouts occur when the device isn't connected, so they're + * "normal" -- don't fill the kernel log with these */ + if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { + DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status); + return -ETIMEDOUT; + } + + /* Unload any bytes sent back from the other side */ + recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> + DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); + + if (recv_bytes > recv_size) + recv_bytes = recv_size; + + for (i = 0; i < recv_bytes; i += 4) { + uint32_t d = I915_READ(ch_data + i); + + unpack_aux(d, recv + i, recv_bytes - i); + } + + return recv_bytes; +} + +/* Write data to the aux channel in native mode */ +static int +intel_dp_aux_native_write(struct intel_output *intel_output, + uint16_t address, uint8_t *send, int send_bytes) +{ + int ret; + uint8_t msg[20]; + int msg_bytes; + uint8_t ack; + + if (send_bytes > 16) + return -1; + msg[0] = AUX_NATIVE_WRITE << 4; + msg[1] = address >> 8; + msg[2] = address & 0xff; + msg[3] = send_bytes - 1; + memcpy(&msg[4], send, send_bytes); + msg_bytes = send_bytes + 4; + for (;;) { + ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); + if (ret < 0) + return ret; + if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) + break; + else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) + udelay(100); + else + return -EIO; + } + return send_bytes; +} + +/* Write a single byte to the aux channel in native mode */ +static int +intel_dp_aux_native_write_1(struct intel_output *intel_output, + uint16_t address, uint8_t byte) +{ + return intel_dp_aux_native_write(intel_output, address, &byte, 1); +} + +/* read bytes from a native aux channel */ +static int +intel_dp_aux_native_read(struct intel_output *intel_output, + uint16_t address, uint8_t *recv, int recv_bytes) +{ + uint8_t msg[4]; + int msg_bytes; + uint8_t reply[20]; + int reply_bytes; + uint8_t ack; + int ret; + + msg[0] = AUX_NATIVE_READ << 4; + msg[1] = address >> 8; + msg[2] = address & 0xff; + msg[3] = recv_bytes - 1; + + msg_bytes = 4; + reply_bytes = recv_bytes + 1; + + for (;;) { + ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, + reply, reply_bytes); + if (ret == 0) + return -EPROTO; + if (ret < 0) + return ret; + ack = reply[0]; + if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { + memcpy(recv, reply + 1, ret - 1); + return ret - 1; + } + else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) + udelay(100); + else + return -EIO; + } +} + +static int +intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, + uint8_t *send, int send_bytes, + uint8_t *recv, int recv_bytes) +{ + struct intel_dp_priv *dp_priv = container_of(adapter, + struct intel_dp_priv, + adapter); + struct intel_output *intel_output = dp_priv->intel_output; + + return intel_dp_aux_ch(intel_output, + send, send_bytes, recv, recv_bytes); +} + +static int +intel_dp_i2c_init(struct intel_output *intel_output, const char *name) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + DRM_ERROR("i2c_init %s\n", name); + dp_priv->algo.running = false; + dp_priv->algo.address = 0; + dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; + + memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter)); + dp_priv->adapter.owner = THIS_MODULE; + dp_priv->adapter.class = I2C_CLASS_DDC; + strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); + dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; + dp_priv->adapter.algo_data = &dp_priv->algo; + dp_priv->adapter.dev.parent = &intel_output->base.kdev; + + return i2c_dp_aux_add_bus(&dp_priv->adapter); +} + +static bool +intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct intel_output *intel_output = enc_to_intel_output(encoder); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + int lane_count, clock; + int max_lane_count = intel_dp_max_lane_count(intel_output); + int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; + static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; + + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { + for (clock = 0; clock <= max_clock; clock++) { + int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; + + if (intel_dp_link_required(mode->clock) <= link_avail) { + dp_priv->link_bw = bws[clock]; + dp_priv->lane_count = lane_count; + adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); + DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n", + dp_priv->link_bw, dp_priv->lane_count, + adjusted_mode->clock); + return true; + } + } + } + return false; +} + +struct intel_dp_m_n { + uint32_t tu; + uint32_t gmch_m; + uint32_t gmch_n; + uint32_t link_m; + uint32_t link_n; +}; + +static void +intel_reduce_ratio(uint32_t *num, uint32_t *den) +{ + while (*num > 0xffffff || *den > 0xffffff) { + *num >>= 1; + *den >>= 1; + } +} + +static void +intel_dp_compute_m_n(int bytes_per_pixel, + int nlanes, + int pixel_clock, + int link_clock, + struct intel_dp_m_n *m_n) +{ + m_n->tu = 64; + m_n->gmch_m = pixel_clock * bytes_per_pixel; + m_n->gmch_n = link_clock * nlanes; + intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); + m_n->link_m = pixel_clock; + m_n->link_n = link_clock; + intel_reduce_ratio(&m_n->link_m, &m_n->link_n); +} + +void +intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_connector *connector; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int lane_count = 4; + struct intel_dp_m_n m_n; + + /* + * Find the lane count in the intel_output private + */ + list_for_each_entry(connector, &mode_config->connector_list, head) { + struct intel_output *intel_output = to_intel_output(connector); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + if (!connector->encoder || connector->encoder->crtc != crtc) + continue; + + if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { + lane_count = dp_priv->lane_count; + break; + } + } + + /* + * Compute the GMCH and Link ratios. The '3' here is + * the number of bytes_per_pixel post-LUT, which we always + * set up for 8-bits of R/G/B, or 3 bytes total. + */ + intel_dp_compute_m_n(3, lane_count, + mode->clock, adjusted_mode->clock, &m_n); + + if (IS_IGDNG(dev)) { + if (intel_crtc->pipe == 0) { + I915_WRITE(TRANSA_DATA_M1, + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); + I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n); + I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m); + I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n); + } else { + I915_WRITE(TRANSB_DATA_M1, + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); + I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n); + I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m); + I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n); + } + } else { + if (intel_crtc->pipe == 0) { + I915_WRITE(PIPEA_GMCH_DATA_M, + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); + I915_WRITE(PIPEA_GMCH_DATA_N, + m_n.gmch_n); + I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m); + I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n); + } else { + I915_WRITE(PIPEB_GMCH_DATA_M, + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); + I915_WRITE(PIPEB_GMCH_DATA_N, + m_n.gmch_n); + I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m); + I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n); + } + } +} + +static void +intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct intel_output *intel_output = enc_to_intel_output(encoder); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct drm_crtc *crtc = intel_output->enc.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + dp_priv->DP = (DP_LINK_TRAIN_OFF | + DP_VOLTAGE_0_4 | + DP_PRE_EMPHASIS_0 | + DP_SYNC_VS_HIGH | + DP_SYNC_HS_HIGH); + + switch (dp_priv->lane_count) { + case 1: + dp_priv->DP |= DP_PORT_WIDTH_1; + break; + case 2: + dp_priv->DP |= DP_PORT_WIDTH_2; + break; + case 4: + dp_priv->DP |= DP_PORT_WIDTH_4; + break; + } + if (dp_priv->has_audio) + dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE; + + memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); + dp_priv->link_configuration[0] = dp_priv->link_bw; + dp_priv->link_configuration[1] = dp_priv->lane_count; + + /* + * Check for DPCD version > 1.1, + * enable enahanced frame stuff in that case + */ + if (dp_priv->dpcd[0] >= 0x11) { + dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + dp_priv->DP |= DP_ENHANCED_FRAMING; + } + + if (intel_crtc->pipe == 1) + dp_priv->DP |= DP_PIPEB_SELECT; + + if (IS_eDP(intel_output)) { + /* don't miss out required setting for eDP */ + dp_priv->DP |= DP_PLL_ENABLE; + if (adjusted_mode->clock < 200000) + dp_priv->DP |= DP_PLL_FREQ_160MHZ; + else + dp_priv->DP |= DP_PLL_FREQ_270MHZ; + } +} + +static void igdng_edp_backlight_on (struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + + DRM_DEBUG("\n"); + pp = I915_READ(PCH_PP_CONTROL); + pp |= EDP_BLC_ENABLE; + I915_WRITE(PCH_PP_CONTROL, pp); +} + +static void igdng_edp_backlight_off (struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pp; + + DRM_DEBUG("\n"); + pp = I915_READ(PCH_PP_CONTROL); + pp &= ~EDP_BLC_ENABLE; + I915_WRITE(PCH_PP_CONTROL, pp); +} + +static void +intel_dp_dpms(struct drm_encoder *encoder, int mode) +{ + struct intel_output *intel_output = enc_to_intel_output(encoder); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dp_reg = I915_READ(dp_priv->output_reg); + + if (mode != DRM_MODE_DPMS_ON) { + if (dp_reg & DP_PORT_EN) { + intel_dp_link_down(intel_output, dp_priv->DP); + if (IS_eDP(intel_output)) + igdng_edp_backlight_off(dev); + } + } else { + if (!(dp_reg & DP_PORT_EN)) { + intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); + if (IS_eDP(intel_output)) + igdng_edp_backlight_on(dev); + } + } + dp_priv->dpms_mode = mode; +} + +/* + * Fetch AUX CH registers 0x202 - 0x207 which contain + * link status information + */ +static bool +intel_dp_get_link_status(struct intel_output *intel_output, + uint8_t link_status[DP_LINK_STATUS_SIZE]) +{ + int ret; + + ret = intel_dp_aux_native_read(intel_output, + DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret != DP_LINK_STATUS_SIZE) + return false; + return true; +} + +static uint8_t +intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +static void +intel_dp_save(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + dp_priv->save_DP = I915_READ(dp_priv->output_reg); + intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, + dp_priv->save_link_configuration, + sizeof (dp_priv->save_link_configuration)); +} + +static uint8_t +intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + uint8_t l = intel_dp_link_status(link_status, i); + + return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} + +static uint8_t +intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + uint8_t l = intel_dp_link_status(link_status, i); + + return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} + + +#if 0 +static char *voltage_names[] = { + "0.4V", "0.6V", "0.8V", "1.2V" +}; +static char *pre_emph_names[] = { + "0dB", "3.5dB", "6dB", "9.5dB" +}; +static char *link_train_names[] = { + "pattern 1", "pattern 2", "idle", "off" +}; +#endif + +/* + * These are source-specific values; current Intel hardware supports + * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB + */ +#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 + +static uint8_t +intel_dp_pre_emphasis_max(uint8_t voltage_swing) +{ + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_600: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_800: + return DP_TRAIN_PRE_EMPHASIS_3_5; + case DP_TRAIN_VOLTAGE_SWING_1200: + default: + return DP_TRAIN_PRE_EMPHASIS_0; + } +} + +static void +intel_get_adjust_train(struct intel_output *intel_output, + uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane_count, + uint8_t train_set[4]) +{ + uint8_t v = 0; + uint8_t p = 0; + int lane; + + for (lane = 0; lane < lane_count; lane++) { + uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane); + uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + + if (v >= I830_DP_VOLTAGE_MAX) + v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; + + if (p >= intel_dp_pre_emphasis_max(v)) + p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + train_set[lane] = v | p; +} + +static uint32_t +intel_dp_signal_levels(uint8_t train_set, int lane_count) +{ + uint32_t signal_levels = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + default: + signal_levels |= DP_VOLTAGE_0_4; + break; + case DP_TRAIN_VOLTAGE_SWING_600: + signal_levels |= DP_VOLTAGE_0_6; + break; + case DP_TRAIN_VOLTAGE_SWING_800: + signal_levels |= DP_VOLTAGE_0_8; + break; + case DP_TRAIN_VOLTAGE_SWING_1200: + signal_levels |= DP_VOLTAGE_1_2; + break; + } + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPHASIS_0: + default: + signal_levels |= DP_PRE_EMPHASIS_0; + break; + case DP_TRAIN_PRE_EMPHASIS_3_5: + signal_levels |= DP_PRE_EMPHASIS_3_5; + break; + case DP_TRAIN_PRE_EMPHASIS_6: + signal_levels |= DP_PRE_EMPHASIS_6; + break; + case DP_TRAIN_PRE_EMPHASIS_9_5: + signal_levels |= DP_PRE_EMPHASIS_9_5; + break; + } + return signal_levels; +} + +static uint8_t +intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_LANE0_1_STATUS + (lane >> 1); + int s = (lane & 1) * 4; + uint8_t l = intel_dp_link_status(link_status, i); + + return (l >> s) & 0xf; +} + +/* Check for clock recovery is done on all channels */ +static bool +intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) +{ + int lane; + uint8_t lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = intel_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return false; + } + return true; +} + +/* Check to see if channel eq is done on all channels */ +#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ + DP_LANE_CHANNEL_EQ_DONE|\ + DP_LANE_SYMBOL_LOCKED) +static bool +intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) +{ + uint8_t lane_align; + uint8_t lane_status; + int lane; + + lane_align = intel_dp_link_status(link_status, + DP_LANE_ALIGN_STATUS_UPDATED); + if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) + return false; + for (lane = 0; lane < lane_count; lane++) { + lane_status = intel_get_lane_status(link_status, lane); + if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) + return false; + } + return true; +} + +static bool +intel_dp_set_link_train(struct intel_output *intel_output, + uint32_t dp_reg_value, + uint8_t dp_train_pat, + uint8_t train_set[4], + bool first) +{ + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + int ret; + + I915_WRITE(dp_priv->output_reg, dp_reg_value); + POSTING_READ(dp_priv->output_reg); + if (first) + intel_wait_for_vblank(dev); + + intel_dp_aux_native_write_1(intel_output, + DP_TRAINING_PATTERN_SET, + dp_train_pat); + + ret = intel_dp_aux_native_write(intel_output, + DP_TRAINING_LANE0_SET, train_set, 4); + if (ret != 4) + return false; + + return true; +} + +static void +intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) +{ + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + uint8_t train_set[4]; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + int i; + uint8_t voltage; + bool clock_recovery = false; + bool channel_eq = false; + bool first = true; + int tries; + + /* Write the link configuration data */ + intel_dp_aux_native_write(intel_output, 0x100, + link_configuration, DP_LINK_CONFIGURATION_SIZE); + + DP |= DP_PORT_EN; + DP &= ~DP_LINK_TRAIN_MASK; + memset(train_set, 0, 4); + voltage = 0xff; + tries = 0; + clock_recovery = false; + for (;;) { + /* Use train_set[0] to set the voltage and pre emphasis values */ + uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); + DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; + + if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, + DP_TRAINING_PATTERN_1, train_set, first)) + break; + first = false; + /* Set training pattern 1 */ + + udelay(100); + if (!intel_dp_get_link_status(intel_output, link_status)) + break; + + if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { + clock_recovery = true; + break; + } + + /* Check to see if we've tried the max voltage */ + for (i = 0; i < dp_priv->lane_count; i++) + if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + break; + if (i == dp_priv->lane_count) + break; + + /* Check to see if we've tried the same voltage 5 times */ + if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++tries; + if (tries == 5) + break; + } else + tries = 0; + voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Compute new train_set as requested by target */ + intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); + } + + /* channel equalization */ + tries = 0; + channel_eq = false; + for (;;) { + /* Use train_set[0] to set the voltage and pre emphasis values */ + uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); + DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; + + /* channel eq pattern */ + if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, + DP_TRAINING_PATTERN_2, train_set, + false)) + break; + + udelay(400); + if (!intel_dp_get_link_status(intel_output, link_status)) + break; + + if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { + channel_eq = true; + break; + } + + /* Try 5 times */ + if (tries > 5) + break; + + /* Compute new train_set as requested by target */ + intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); + ++tries; + } + + I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); + POSTING_READ(dp_priv->output_reg); + intel_dp_aux_native_write_1(intel_output, + DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); +} + +static void +intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) +{ + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + DRM_DEBUG("\n"); + + if (IS_eDP(intel_output)) { + DP &= ~DP_PLL_ENABLE; + I915_WRITE(dp_priv->output_reg, DP); + POSTING_READ(dp_priv->output_reg); + udelay(100); + } + + DP &= ~DP_LINK_TRAIN_MASK; + I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); + POSTING_READ(dp_priv->output_reg); + + udelay(17000); + + if (IS_eDP(intel_output)) + DP |= DP_LINK_TRAIN_OFF; + I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); + POSTING_READ(dp_priv->output_reg); +} + +static void +intel_dp_restore(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + if (dp_priv->save_DP & DP_PORT_EN) + intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); + else + intel_dp_link_down(intel_output, dp_priv->save_DP); +} + +/* + * According to DP spec + * 5.1.2: + * 1. Read DPCD + * 2. Configure link according to Receiver Capabilities + * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 + * 4. Check link status on receipt of hot-plug interrupt + */ + +static void +intel_dp_check_link_status(struct intel_output *intel_output) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + if (!intel_output->enc.crtc) + return; + + if (!intel_dp_get_link_status(intel_output, link_status)) { + intel_dp_link_down(intel_output, dp_priv->DP); + return; + } + + if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) + intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); +} + +static enum drm_connector_status +igdng_dp_detect(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + enum drm_connector_status status; + + status = connector_status_disconnected; + if (intel_dp_aux_native_read(intel_output, + 0x000, dp_priv->dpcd, + sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) + { + if (dp_priv->dpcd[0] != 0) + status = connector_status_connected; + } + return status; +} + +/** + * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. + * + * \return true if DP port is connected. + * \return false if DP port is disconnected. + */ +static enum drm_connector_status +intel_dp_detect(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + uint32_t temp, bit; + enum drm_connector_status status; + + dp_priv->has_audio = false; + + if (IS_IGDNG(dev)) + return igdng_dp_detect(connector); + + temp = I915_READ(PORT_HOTPLUG_EN); + + I915_WRITE(PORT_HOTPLUG_EN, + temp | + DPB_HOTPLUG_INT_EN | + DPC_HOTPLUG_INT_EN | + DPD_HOTPLUG_INT_EN); + + POSTING_READ(PORT_HOTPLUG_EN); + + switch (dp_priv->output_reg) { + case DP_B: + bit = DPB_HOTPLUG_INT_STATUS; + break; + case DP_C: + bit = DPC_HOTPLUG_INT_STATUS; + break; + case DP_D: + bit = DPD_HOTPLUG_INT_STATUS; + break; + default: + return connector_status_unknown; + } + + temp = I915_READ(PORT_HOTPLUG_STAT); + + if ((temp & bit) == 0) + return connector_status_disconnected; + + status = connector_status_disconnected; + if (intel_dp_aux_native_read(intel_output, + 0x000, dp_priv->dpcd, + sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) + { + if (dp_priv->dpcd[0] != 0) + status = connector_status_connected; + } + return status; +} + +static int intel_dp_get_modes(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct drm_device *dev = intel_output->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + /* We should parse the EDID data and find out if it has an audio sink + */ + + ret = intel_ddc_get_modes(intel_output); + if (ret) + return ret; + + /* if eDP has no EDID, try to use fixed panel mode from VBT */ + if (IS_eDP(intel_output)) { + if (dev_priv->panel_fixed_mode != NULL) { + struct drm_display_mode *mode; + mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); + drm_mode_probed_add(connector, mode); + return 1; + } + } + return 0; +} + +static void +intel_dp_destroy (struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + + if (intel_output->i2c_bus) + intel_i2c_destroy(intel_output->i2c_bus); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(intel_output); +} + +static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { + .dpms = intel_dp_dpms, + .mode_fixup = intel_dp_mode_fixup, + .prepare = intel_encoder_prepare, + .mode_set = intel_dp_mode_set, + .commit = intel_encoder_commit, +}; + +static const struct drm_connector_funcs intel_dp_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .save = intel_dp_save, + .restore = intel_dp_restore, + .detect = intel_dp_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = intel_dp_destroy, +}; + +static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { + .get_modes = intel_dp_get_modes, + .mode_valid = intel_dp_mode_valid, + .best_encoder = intel_best_encoder, +}; + +static void intel_dp_enc_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs intel_dp_enc_funcs = { + .destroy = intel_dp_enc_destroy, +}; + +void +intel_dp_hot_plug(struct intel_output *intel_output) +{ + struct intel_dp_priv *dp_priv = intel_output->dev_priv; + + if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) + intel_dp_check_link_status(intel_output); +} + +void +intel_dp_init(struct drm_device *dev, int output_reg) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_connector *connector; + struct intel_output *intel_output; + struct intel_dp_priv *dp_priv; + const char *name = NULL; + + intel_output = kcalloc(sizeof(struct intel_output) + + sizeof(struct intel_dp_priv), 1, GFP_KERNEL); + if (!intel_output) + return; + + dp_priv = (struct intel_dp_priv *)(intel_output + 1); + + connector = &intel_output->base; + drm_connector_init(dev, connector, &intel_dp_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); + + if (output_reg == DP_A) + intel_output->type = INTEL_OUTPUT_EDP; + else + intel_output->type = INTEL_OUTPUT_DISPLAYPORT; + + connector->interlace_allowed = true; + connector->doublescan_allowed = 0; + + dp_priv->intel_output = intel_output; + dp_priv->output_reg = output_reg; + dp_priv->has_audio = false; + dp_priv->dpms_mode = DRM_MODE_DPMS_ON; + intel_output->dev_priv = dp_priv; + + drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); + + drm_mode_connector_attach_encoder(&intel_output->base, + &intel_output->enc); + drm_sysfs_connector_add(connector); + + /* Set up the DDC bus. */ + switch (output_reg) { + case DP_A: + name = "DPDDC-A"; + break; + case DP_B: + case PCH_DP_B: + name = "DPDDC-B"; + break; + case DP_C: + case PCH_DP_C: + name = "DPDDC-C"; + break; + case DP_D: + case PCH_DP_D: + name = "DPDDC-D"; + break; + } + + intel_dp_i2c_init(intel_output, name); + + intel_output->ddc_bus = &dp_priv->adapter; + intel_output->hot_plug = intel_dp_hot_plug; + + if (output_reg == DP_A) { + /* initialize panel mode from VBT if available for eDP */ + if (dev_priv->lfp_lvds_vbt_mode) { + dev_priv->panel_fixed_mode = + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + if (dev_priv->panel_fixed_mode) { + dev_priv->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; + } + } + } + + /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written + * 0xd. Failure to do so will result in spurious interrupts being + * generated on the port when a cable is not attached. + */ + if (IS_G4X(dev) && !IS_GM45(dev)) { + u32 temp = I915_READ(PEG_BAND_GAP_DATA); + I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); + } +} diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h new file mode 100644 index 00000000000..2b38054d3b6 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp.h @@ -0,0 +1,144 @@ +/* + * Copyright © 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _INTEL_DP_H_ +#define _INTEL_DP_H_ + +/* From the VESA DisplayPort spec */ + +#define AUX_NATIVE_WRITE 0x8 +#define AUX_NATIVE_READ 0x9 +#define AUX_I2C_WRITE 0x0 +#define AUX_I2C_READ 0x1 +#define AUX_I2C_STATUS 0x2 +#define AUX_I2C_MOT 0x4 + +#define AUX_NATIVE_REPLY_ACK (0x0 << 4) +#define AUX_NATIVE_REPLY_NACK (0x1 << 4) +#define AUX_NATIVE_REPLY_DEFER (0x2 << 4) +#define AUX_NATIVE_REPLY_MASK (0x3 << 4) + +#define AUX_I2C_REPLY_ACK (0x0 << 6) +#define AUX_I2C_REPLY_NACK (0x1 << 6) +#define AUX_I2C_REPLY_DEFER (0x2 << 6) +#define AUX_I2C_REPLY_MASK (0x3 << 6) + +/* AUX CH addresses */ +#define DP_LINK_BW_SET 0x100 +# define DP_LINK_BW_1_62 0x06 +# define DP_LINK_BW_2_7 0x0a + +#define DP_LANE_COUNT_SET 0x101 +# define DP_LANE_COUNT_MASK 0x0f +# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) + +#define DP_TRAINING_PATTERN_SET 0x102 + +# define DP_TRAINING_PATTERN_DISABLE 0 +# define DP_TRAINING_PATTERN_1 1 +# define DP_TRAINING_PATTERN_2 2 +# define DP_TRAINING_PATTERN_MASK 0x3 + +# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) +# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2) +# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2) +# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2) +# define DP_LINK_QUAL_PATTERN_MASK (3 << 2) + +# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) +# define DP_LINK_SCRAMBLING_DISABLE (1 << 5) + +# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) +# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) +# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) +# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) + +#define DP_TRAINING_LANE0_SET 0x103 +#define DP_TRAINING_LANE1_SET 0x104 +#define DP_TRAINING_LANE2_SET 0x105 +#define DP_TRAINING_LANE3_SET 0x106 + +# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 +# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 +# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) +# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) +# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) +# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) +# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) + +# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) +# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) +# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) +# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) +# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) + +# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 +# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) + +#define DP_DOWNSPREAD_CTRL 0x107 +# define DP_SPREAD_AMP_0_5 (1 << 4) + +#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 +# define DP_SET_ANSI_8B10B (1 << 0) + +#define DP_LANE0_1_STATUS 0x202 +#define DP_LANE2_3_STATUS 0x203 + +# define DP_LANE_CR_DONE (1 << 0) +# define DP_LANE_CHANNEL_EQ_DONE (1 << 1) +# define DP_LANE_SYMBOL_LOCKED (1 << 2) + +#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 + +#define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) +#define DP_LINK_STATUS_UPDATED (1 << 7) + +#define DP_SINK_STATUS 0x205 + +#define DP_RECEIVE_PORT_0_STATUS (1 << 0) +#define DP_RECEIVE_PORT_1_STATUS (1 << 1) + +#define DP_ADJUST_REQUEST_LANE0_1 0x206 +#define DP_ADJUST_REQUEST_LANE2_3 0x207 + +#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 +#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 +#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c +#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 +#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 +#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 +#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 +#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 + +struct i2c_algo_dp_aux_data { + bool running; + u16 address; + int (*aux_ch) (struct i2c_adapter *adapter, + uint8_t *send, int send_bytes, + uint8_t *recv, int recv_bytes); +}; + +int +i2c_dp_aux_add_bus(struct i2c_adapter *adapter); + +#endif /* _INTEL_DP_H_ */ diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c new file mode 100644 index 00000000000..a63b6f57d2d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp_i2c.c @@ -0,0 +1,273 @@ +/* + * Copyright © 2009 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/i2c.h> +#include "intel_dp.h" +#include "drmP.h" + +/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ + +#define MODE_I2C_START 1 +#define MODE_I2C_WRITE 2 +#define MODE_I2C_READ 4 +#define MODE_I2C_STOP 8 + +static int +i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + uint16_t address = algo_data->address; + uint8_t msg[5]; + uint8_t reply[2]; + int msg_bytes; + int reply_bytes; + int ret; + + /* Set up the command byte */ + if (mode & MODE_I2C_READ) + msg[0] = AUX_I2C_READ << 4; + else + msg[0] = AUX_I2C_WRITE << 4; + + if (!(mode & MODE_I2C_STOP)) + msg[0] |= AUX_I2C_MOT << 4; + + msg[1] = address >> 8; + msg[2] = address; + + switch (mode) { + case MODE_I2C_WRITE: + msg[3] = 0; + msg[4] = write_byte; + msg_bytes = 5; + reply_bytes = 1; + break; + case MODE_I2C_READ: + msg[3] = 0; + msg_bytes = 4; + reply_bytes = 2; + break; + default: + msg_bytes = 3; + reply_bytes = 1; + break; + } + + for (;;) { + ret = (*algo_data->aux_ch)(adapter, + msg, msg_bytes, + reply, reply_bytes); + if (ret < 0) { + DRM_DEBUG("aux_ch failed %d\n", ret); + return ret; + } + switch (reply[0] & AUX_I2C_REPLY_MASK) { + case AUX_I2C_REPLY_ACK: + if (mode == MODE_I2C_READ) { + *read_byte = reply[1]; + } + return reply_bytes - 1; + case AUX_I2C_REPLY_NACK: + DRM_DEBUG("aux_ch nack\n"); + return -EREMOTEIO; + case AUX_I2C_REPLY_DEFER: + DRM_DEBUG("aux_ch defer\n"); + udelay(100); + break; + default: + DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); + return -EREMOTEIO; + } + } +} + +/* + * I2C over AUX CH + */ + +/* + * Send the address. If the I2C link is running, this 'restarts' + * the connection with the new address, this is used for doing + * a write followed by a read (as needed for DDC) + */ +static int +i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int mode = MODE_I2C_START; + int ret; + + if (reading) + mode |= MODE_I2C_READ; + else + mode |= MODE_I2C_WRITE; + algo_data->address = address; + algo_data->running = true; + ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); + return ret; +} + +/* + * Stop the I2C transaction. This closes out the link, sending + * a bare address packet with the MOT bit turned off + */ +static void +i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int mode = MODE_I2C_STOP; + + if (reading) + mode |= MODE_I2C_READ; + else + mode |= MODE_I2C_WRITE; + if (algo_data->running) { + (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL); + algo_data->running = false; + } +} + +/* + * Write a single byte to the current I2C address, the + * the I2C link must be running or this returns -EIO + */ +static int +i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int ret; + + if (!algo_data->running) + return -EIO; + + ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL); + return ret; +} + +/* + * Read a single byte from the current I2C address, the + * I2C link must be running or this returns -EIO + */ +static int +i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + int ret; + + if (!algo_data->running) + return -EIO; + + ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret); + return ret; +} + +static int +i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + int ret = 0; + bool reading = false; + int m; + int b; + + for (m = 0; m < num; m++) { + u16 len = msgs[m].len; + u8 *buf = msgs[m].buf; + reading = (msgs[m].flags & I2C_M_RD) != 0; + ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading); + if (ret < 0) + break; + if (reading) { + for (b = 0; b < len; b++) { + ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]); + if (ret < 0) + break; + } + } else { + for (b = 0; b < len; b++) { + ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]); + if (ret < 0) + break; + } + } + if (ret < 0) + break; + } + if (ret >= 0) + ret = num; + i2c_algo_dp_aux_stop(adapter, reading); + DRM_DEBUG("dp_aux_xfer return %d\n", ret); + return ret; +} + +static u32 +i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL | + I2C_FUNC_10BIT_ADDR; +} + +static const struct i2c_algorithm i2c_dp_aux_algo = { + .master_xfer = i2c_algo_dp_aux_xfer, + .functionality = i2c_algo_dp_aux_functionality, +}; + +static void +i2c_dp_aux_reset_bus(struct i2c_adapter *adapter) +{ + (void) i2c_algo_dp_aux_address(adapter, 0, false); + (void) i2c_algo_dp_aux_stop(adapter, false); + +} + +static int +i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) +{ + adapter->algo = &i2c_dp_aux_algo; + adapter->retries = 3; + i2c_dp_aux_reset_bus(adapter); + return 0; +} + +int +i2c_dp_aux_add_bus(struct i2c_adapter *adapter) +{ + int error; + + error = i2c_dp_aux_prepare_bus(adapter); + if (error) + return error; + error = i2c_add_adapter(adapter); + return error; +} +EXPORT_SYMBOL(i2c_dp_aux_add_bus); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd4b9c5f715..d6f92ea1b55 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -54,6 +54,8 @@ #define INTEL_OUTPUT_LVDS 4 #define INTEL_OUTPUT_TVOUT 5 #define INTEL_OUTPUT_HDMI 6 +#define INTEL_OUTPUT_DISPLAYPORT 7 +#define INTEL_OUTPUT_EDP 8 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 @@ -65,7 +67,6 @@ struct intel_i2c_chan { u32 reg; /* GPIO reg */ struct i2c_adapter adapter; struct i2c_algo_bit_data algo; - u8 slave_addr; }; struct intel_framebuffer { @@ -79,11 +80,12 @@ struct intel_output { struct drm_encoder enc; int type; - struct intel_i2c_chan *i2c_bus; /* for control functions */ - struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ + struct i2c_adapter *i2c_bus; + struct i2c_adapter *ddc_bus; bool load_detect_temp; bool needs_tv_clock; void *dev_priv; + void (*hot_plug)(struct intel_output *); }; struct intel_crtc { @@ -104,9 +106,9 @@ struct intel_crtc { #define enc_to_intel_output(x) container_of(x, struct intel_output, enc) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) -struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, - const char *name); -void intel_i2c_destroy(struct intel_i2c_chan *chan); +struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, + const char *name); +void intel_i2c_destroy(struct i2c_adapter *adapter); int intel_ddc_get_modes(struct intel_output *intel_output); extern bool intel_ddc_probe(struct intel_output *intel_output); void intel_i2c_quirk_set(struct drm_device *dev, bool enable); @@ -116,6 +118,12 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); extern void intel_lvds_init(struct drm_device *dev); +extern void intel_dp_init(struct drm_device *dev, int dp_reg); +void +intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +extern void intel_edp_link_config (struct intel_output *, int *, int *); + extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_encoder_prepare (struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 1ee3007d6ec..13bff20930e 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -384,10 +384,9 @@ void intel_dvo_init(struct drm_device *dev) { struct intel_output *intel_output; struct intel_dvo_device *dvo; - struct intel_i2c_chan *i2cbus = NULL; + struct i2c_adapter *i2cbus = NULL; int ret = 0; int i; - int gpio_inited = 0; int encoder_type = DRM_MODE_ENCODER_NONE; intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); if (!intel_output) @@ -420,14 +419,11 @@ void intel_dvo_init(struct drm_device *dev) * It appears that everything is on GPIOE except for panels * on i830 laptops, which are on GPIOB (DVOA). */ - if (gpio_inited != gpio) { - if (i2cbus != NULL) - intel_i2c_destroy(i2cbus); - if (!(i2cbus = intel_i2c_create(dev, gpio, - gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { - continue; - } - gpio_inited = gpio; + if (i2cbus != NULL) + intel_i2c_destroy(i2cbus); + if (!(i2cbus = intel_i2c_create(dev, gpio, + gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) { + continue; } if (dvo->dev_ops!= NULL) diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 1af7d68e380..1d30802e773 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -453,7 +453,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, size = ALIGN(size, PAGE_SIZE); fbo = drm_gem_object_alloc(dev, size); if (!fbo) { - printk(KERN_ERR "failed to allocate framebuffer\n"); + DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } @@ -610,8 +610,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, par->dev = dev; /* To allow resizeing without swapping buffers */ - printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, - intel_fb->base.height, obj_priv->gtt_offset, fbo); + DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, + intel_fb->base.height, obj_priv->gtt_offset, fbo); mutex_unlock(&dev->struct_mutex); return 0; @@ -698,13 +698,13 @@ static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc * } else intelfb_set_par(info); - printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, + DRM_INFO("fb%d: %s frame buffer device\n", info->node, info->fix.id); /* Switch back to kernel console on panic */ kernelfb_mode = *modeset; atomic_notifier_chain_register(&panic_notifier_list, &paniced); - printk(KERN_INFO "registered panic notifier\n"); + DRM_DEBUG("registered panic notifier\n"); return 0; } @@ -852,13 +852,13 @@ static int intelfb_single_fb_probe(struct drm_device *dev) } else intelfb_set_par(info); - printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, + DRM_INFO("fb%d: %s frame buffer device\n", info->node, info->fix.id); /* Switch back to kernel console on panic */ kernelfb_mode = *modeset; atomic_notifier_chain_register(&panic_notifier_list, &paniced); - printk(KERN_INFO "registered panic notifier\n"); + DRM_DEBUG("registered panic notifier\n"); return 0; } @@ -872,8 +872,8 @@ void intelfb_restore(void) { int ret; if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) { - printk(KERN_ERR "Failed to restore crtc configuration: %d\n", - ret); + DRM_ERROR("Failed to restore crtc configuration: %d\n", + ret); } } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 4ea2a651b92..1842290cded 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -31,6 +31,7 @@ #include "drmP.h" #include "drm.h" #include "drm_crtc.h" +#include "drm_edid.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" @@ -56,8 +57,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE | SDVO_VSYNC_ACTIVE_HIGH | - SDVO_HSYNC_ACTIVE_HIGH | - SDVO_NULL_PACKETS_DURING_VSYNC; + SDVO_HSYNC_ACTIVE_HIGH; if (hdmi_priv->has_hdmi_sink) sdvox |= SDVO_AUDIO_ENABLE; @@ -129,83 +129,28 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, return true; } -static void -intel_hdmi_sink_detect(struct drm_connector *connector) -{ - struct intel_output *intel_output = to_intel_output(connector); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; - struct edid *edid = NULL; - - edid = drm_get_edid(&intel_output->base, - &intel_output->ddc_bus->adapter); - if (edid != NULL) { - hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); - kfree(edid); - intel_output->base.display_info.raw_edid = NULL; - } -} - -static enum drm_connector_status -igdng_hdmi_detect(struct drm_connector *connector) -{ - struct intel_output *intel_output = to_intel_output(connector); - struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; - - /* FIXME hotplug detect */ - - hdmi_priv->has_hdmi_sink = false; - intel_hdmi_sink_detect(connector); - if (hdmi_priv->has_hdmi_sink) - return connector_status_connected; - else - return connector_status_disconnected; -} - static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_output *intel_output = to_intel_output(connector); struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; - u32 temp, bit; - - if (IS_IGDNG(dev)) - return igdng_hdmi_detect(connector); - - temp = I915_READ(PORT_HOTPLUG_EN); - - switch (hdmi_priv->sdvox_reg) { - case SDVOB: - temp |= HDMIB_HOTPLUG_INT_EN; - break; - case SDVOC: - temp |= HDMIC_HOTPLUG_INT_EN; - break; - default: - return connector_status_unknown; - } - - I915_WRITE(PORT_HOTPLUG_EN, temp); + struct edid *edid = NULL; + enum drm_connector_status status = connector_status_disconnected; - POSTING_READ(PORT_HOTPLUG_EN); + hdmi_priv->has_hdmi_sink = false; + edid = drm_get_edid(&intel_output->base, + intel_output->ddc_bus); - switch (hdmi_priv->sdvox_reg) { - case SDVOB: - bit = HDMIB_HOTPLUG_INT_STATUS; - break; - case SDVOC: - bit = HDMIC_HOTPLUG_INT_STATUS; - break; - default: - return connector_status_unknown; + if (edid) { + if (edid->input & DRM_EDID_INPUT_DIGITAL) { + status = connector_status_connected; + hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + } + intel_output->base.display_info.raw_edid = NULL; + kfree(edid); } - if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) { - intel_hdmi_sink_detect(connector); - return connector_status_connected; - } else - return connector_status_disconnected; + return status; } static int intel_hdmi_get_modes(struct drm_connector *connector) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index f7061f68d05..62b8bead765 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -124,6 +124,7 @@ static void set_data(void *data, int state_high) * @output: driver specific output device * @reg: GPIO reg to use * @name: name for this bus + * @slave_addr: slave address (if fixed) * * Creates and registers a new i2c bus with the Linux i2c layer, for use * in output probing and control (e.g. DDC or SDVO control functions). @@ -139,8 +140,8 @@ static void set_data(void *data, int state_high) * %GPIOH * see PRM for details on how these different busses are used. */ -struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, - const char *name) +struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, + const char *name) { struct intel_i2c_chan *chan; @@ -174,7 +175,7 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, intel_i2c_quirk_set(dev, false); udelay(20); - return chan; + return &chan->adapter; out_free: kfree(chan); @@ -187,11 +188,16 @@ out_free: * * Unregister the adapter from the i2c layer, then free the structure. */ -void intel_i2c_destroy(struct intel_i2c_chan *chan) +void intel_i2c_destroy(struct i2c_adapter *adapter) { - if (!chan) + struct intel_i2c_chan *chan; + + if (!adapter) return; + chan = container_of(adapter, + struct intel_i2c_chan, + adapter); i2c_del_adapter(&chan->adapter); kfree(chan); } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f073ed8432e..3f445a80c55 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -36,9 +36,25 @@ #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" +#include <linux/acpi.h> #define I915_LVDS "i915_lvds" +/* + * the following four scaling options are defined. + * #define DRM_MODE_SCALE_NON_GPU 0 + * #define DRM_MODE_SCALE_FULLSCREEN 1 + * #define DRM_MODE_SCALE_NO_SCALE 2 + * #define DRM_MODE_SCALE_ASPECT 3 + */ + +/* Private structure for the integrated LVDS support */ +struct intel_lvds_priv { + int fitting_mode; + u32 pfit_control; + u32 pfit_pgm_ratios; +}; + /** * Sets the backlight level. * @@ -213,26 +229,45 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + /* + * float point operation is not supported . So the PANEL_RATIO_FACTOR + * is defined, which can avoid the float point computation when + * calculating the panel ratio. + */ +#define PANEL_RATIO_FACTOR 8192 struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct drm_encoder *tmp_encoder; + struct intel_output *intel_output = enc_to_intel_output(encoder); + struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; + u32 pfit_control = 0, pfit_pgm_ratios = 0; + int left_border = 0, right_border = 0, top_border = 0; + int bottom_border = 0; + bool border = 0; + int panel_ratio, desired_ratio, vert_scale, horiz_scale; + int horiz_ratio, vert_ratio; + u32 hsync_width, vsync_width; + u32 hblank_width, vblank_width; + u32 hsync_pos, vsync_pos; /* Should never happen!! */ if (!IS_I965G(dev) && intel_crtc->pipe == 0) { - printk(KERN_ERR "Can't support LVDS on pipe A\n"); + DRM_ERROR("Can't support LVDS on pipe A\n"); return false; } /* Should never happen!! */ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) { if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) { - printk(KERN_ERR "Can't enable LVDS and another " + DRM_ERROR("Can't enable LVDS and another " "encoder on the same pipe\n"); return false; } } - + /* If we don't have a panel mode, there is nothing we can do */ + if (dev_priv->panel_fixed_mode == NULL) + return true; /* * If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, @@ -256,6 +291,243 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); } + /* Make sure pre-965s set dither correctly */ + if (!IS_I965G(dev)) { + if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) + pfit_control |= PANEL_8TO6_DITHER_ENABLE; + } + + /* Native modes don't need fitting */ + if (adjusted_mode->hdisplay == mode->hdisplay && + adjusted_mode->vdisplay == mode->vdisplay) { + pfit_pgm_ratios = 0; + border = 0; + goto out; + } + + /* 965+ wants fuzzy fitting */ + if (IS_I965G(dev)) + pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | + PFIT_FILTER_FUZZY; + + hsync_width = adjusted_mode->crtc_hsync_end - + adjusted_mode->crtc_hsync_start; + vsync_width = adjusted_mode->crtc_vsync_end - + adjusted_mode->crtc_vsync_start; + hblank_width = adjusted_mode->crtc_hblank_end - + adjusted_mode->crtc_hblank_start; + vblank_width = adjusted_mode->crtc_vblank_end - + adjusted_mode->crtc_vblank_start; + /* + * Deal with panel fitting options. Figure out how to stretch the + * image based on its aspect ratio & the current panel fitting mode. + */ + panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR / + adjusted_mode->vdisplay; + desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR / + mode->vdisplay; + /* + * Enable automatic panel scaling for non-native modes so that they fill + * the screen. Should be enabled before the pipe is enabled, according + * to register description and PRM. + * Change the value here to see the borders for debugging + */ + I915_WRITE(BCLRPAT_A, 0); + I915_WRITE(BCLRPAT_B, 0); + + switch (lvds_priv->fitting_mode) { + case DRM_MODE_SCALE_NO_SCALE: + /* + * For centered modes, we have to calculate border widths & + * heights and modify the values programmed into the CRTC. + */ + left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2; + right_border = left_border; + if (mode->hdisplay & 1) + right_border++; + top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2; + bottom_border = top_border; + if (mode->vdisplay & 1) + bottom_border++; + /* Set active & border values */ + adjusted_mode->crtc_hdisplay = mode->hdisplay; + /* Keep the boder be even */ + if (right_border & 1) + right_border++; + /* use the border directly instead of border minuse one */ + adjusted_mode->crtc_hblank_start = mode->hdisplay + + right_border; + /* keep the blank width constant */ + adjusted_mode->crtc_hblank_end = + adjusted_mode->crtc_hblank_start + hblank_width; + /* get the hsync pos relative to hblank start */ + hsync_pos = (hblank_width - hsync_width) / 2; + /* keep the hsync pos be even */ + if (hsync_pos & 1) + hsync_pos++; + adjusted_mode->crtc_hsync_start = + adjusted_mode->crtc_hblank_start + hsync_pos; + /* keep the hsync width constant */ + adjusted_mode->crtc_hsync_end = + adjusted_mode->crtc_hsync_start + hsync_width; + adjusted_mode->crtc_vdisplay = mode->vdisplay; + /* use the border instead of border minus one */ + adjusted_mode->crtc_vblank_start = mode->vdisplay + + bottom_border; + /* keep the vblank width constant */ + adjusted_mode->crtc_vblank_end = + adjusted_mode->crtc_vblank_start + vblank_width; + /* get the vsync start postion relative to vblank start */ + vsync_pos = (vblank_width - vsync_width) / 2; + adjusted_mode->crtc_vsync_start = + adjusted_mode->crtc_vblank_start + vsync_pos; + /* keep the vsync width constant */ + adjusted_mode->crtc_vsync_end = + adjusted_mode->crtc_vblank_start + vsync_width; + border = 1; + break; + case DRM_MODE_SCALE_ASPECT: + /* Scale but preserve the spect ratio */ + pfit_control |= PFIT_ENABLE; + if (IS_I965G(dev)) { + /* 965+ is easy, it does everything in hw */ + if (panel_ratio > desired_ratio) + pfit_control |= PFIT_SCALING_PILLAR; + else if (panel_ratio < desired_ratio) + pfit_control |= PFIT_SCALING_LETTER; + else + pfit_control |= PFIT_SCALING_AUTO; + } else { + /* + * For earlier chips we have to calculate the scaling + * ratio by hand and program it into the + * PFIT_PGM_RATIO register + */ + u32 horiz_bits, vert_bits, bits = 12; + horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/ + adjusted_mode->hdisplay; + vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/ + adjusted_mode->vdisplay; + horiz_scale = adjusted_mode->hdisplay * + PANEL_RATIO_FACTOR / mode->hdisplay; + vert_scale = adjusted_mode->vdisplay * + PANEL_RATIO_FACTOR / mode->vdisplay; + + /* retain aspect ratio */ + if (panel_ratio > desired_ratio) { /* Pillar */ + u32 scaled_width; + scaled_width = mode->hdisplay * vert_scale / + PANEL_RATIO_FACTOR; + horiz_ratio = vert_ratio; + pfit_control |= (VERT_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + /* Pillar will have left/right borders */ + left_border = (adjusted_mode->hdisplay - + scaled_width) / 2; + right_border = left_border; + if (mode->hdisplay & 1) /* odd resolutions */ + right_border++; + /* keep the border be even */ + if (right_border & 1) + right_border++; + adjusted_mode->crtc_hdisplay = scaled_width; + /* use border instead of border minus one */ + adjusted_mode->crtc_hblank_start = + scaled_width + right_border; + /* keep the hblank width constant */ + adjusted_mode->crtc_hblank_end = + adjusted_mode->crtc_hblank_start + + hblank_width; + /* + * get the hsync start pos relative to + * hblank start + */ + hsync_pos = (hblank_width - hsync_width) / 2; + /* keep the hsync_pos be even */ + if (hsync_pos & 1) + hsync_pos++; + adjusted_mode->crtc_hsync_start = + adjusted_mode->crtc_hblank_start + + hsync_pos; + /* keept hsync width constant */ + adjusted_mode->crtc_hsync_end = + adjusted_mode->crtc_hsync_start + + hsync_width; + border = 1; + } else if (panel_ratio < desired_ratio) { /* letter */ + u32 scaled_height = mode->vdisplay * + horiz_scale / PANEL_RATIO_FACTOR; + vert_ratio = horiz_ratio; + pfit_control |= (HORIZ_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + /* Letterbox will have top/bottom border */ + top_border = (adjusted_mode->vdisplay - + scaled_height) / 2; + bottom_border = top_border; + if (mode->vdisplay & 1) + bottom_border++; + adjusted_mode->crtc_vdisplay = scaled_height; + /* use border instead of border minus one */ + adjusted_mode->crtc_vblank_start = + scaled_height + bottom_border; + /* keep the vblank width constant */ + adjusted_mode->crtc_vblank_end = + adjusted_mode->crtc_vblank_start + + vblank_width; + /* + * get the vsync start pos relative to + * vblank start + */ + vsync_pos = (vblank_width - vsync_width) / 2; + adjusted_mode->crtc_vsync_start = + adjusted_mode->crtc_vblank_start + + vsync_pos; + /* keep the vsync width constant */ + adjusted_mode->crtc_vsync_end = + adjusted_mode->crtc_vsync_start + + vsync_width; + border = 1; + } else { + /* Aspects match, Let hw scale both directions */ + pfit_control |= (VERT_AUTO_SCALE | + HORIZ_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + } + horiz_bits = (1 << bits) * horiz_ratio / + PANEL_RATIO_FACTOR; + vert_bits = (1 << bits) * vert_ratio / + PANEL_RATIO_FACTOR; + pfit_pgm_ratios = + ((vert_bits << PFIT_VERT_SCALE_SHIFT) & + PFIT_VERT_SCALE_MASK) | + ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) & + PFIT_HORIZ_SCALE_MASK); + } + break; + + case DRM_MODE_SCALE_FULLSCREEN: + /* + * Full scaling, even if it changes the aspect ratio. + * Fortunately this is all done for us in hw. + */ + pfit_control |= PFIT_ENABLE; + if (IS_I965G(dev)) + pfit_control |= PFIT_SCALING_AUTO; + else + pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + break; + default: + break; + } + +out: + lvds_priv->pfit_control = pfit_control; + lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; /* * XXX: It would be nice to support lower refresh rates on the * panels to reduce power consumption, and perhaps match the @@ -301,8 +573,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - u32 pfit_control; + struct intel_output *intel_output = enc_to_intel_output(encoder); + struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; /* * The LVDS pin pair will already have been turned on in the @@ -319,22 +591,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, * screen. Should be enabled before the pipe is enabled, according to * register description and PRM. */ - if (mode->hdisplay != adjusted_mode->hdisplay || - mode->vdisplay != adjusted_mode->vdisplay) - pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE | - HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | - HORIZ_INTERP_BILINEAR); - else - pfit_control = 0; - - if (!IS_I965G(dev)) { - if (dev_priv->panel_wants_dither || dev_priv->lvds_dither) - pfit_control |= PANEL_8TO6_DITHER_ENABLE; - } - else - pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; - - I915_WRITE(PFIT_CONTROL, pfit_control); + I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios); + I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); } /** @@ -406,6 +664,34 @@ static int intel_lvds_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t value) { + struct drm_device *dev = connector->dev; + struct intel_output *intel_output = + to_intel_output(connector); + + if (property == dev->mode_config.scaling_mode_property && + connector->encoder) { + struct drm_crtc *crtc = connector->encoder->crtc; + struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; + if (value == DRM_MODE_SCALE_NON_GPU) { + DRM_DEBUG_KMS(I915_LVDS, + "non_GPU property is unsupported\n"); + return 0; + } + if (lvds_priv->fitting_mode == value) { + /* the LVDS scaling property is not changed */ + return 0; + } + lvds_priv->fitting_mode = value; + if (crtc && crtc->enabled) { + /* + * If the CRTC is enabled, the display will be changed + * according to the new panel fitting mode. + */ + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, crtc->fb); + } + } + return 0; } @@ -456,7 +742,7 @@ static const struct dmi_system_id intel_no_lvds[] = { .callback = intel_no_lvds_dmi_callback, .ident = "Apple Mac Mini (Core series)", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_SYS_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), }, }, @@ -464,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = { .callback = intel_no_lvds_dmi_callback, .ident = "Apple Mac Mini (Core 2 series)", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_SYS_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"), }, }, @@ -494,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, + .ident = "AOpen Mini PC MP915", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), + DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, .ident = "Aopen i945GTt-VFA", .matches = { DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), @@ -503,6 +797,65 @@ static const struct dmi_system_id intel_no_lvds[] = { { } /* terminating entry */ }; +#ifdef CONFIG_ACPI +/* + * check_lid_device -- check whether @handle is an ACPI LID device. + * @handle: ACPI device handle + * @level : depth in the ACPI namespace tree + * @context: the number of LID device when we find the device + * @rv: a return value to fill if desired (Not use) + */ +static acpi_status +check_lid_device(acpi_handle handle, u32 level, void *context, + void **return_value) +{ + struct acpi_device *acpi_dev; + int *lid_present = context; + + acpi_dev = NULL; + /* Get the acpi device for device handle */ + if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) { + /* If there is no ACPI device for handle, return */ + return AE_OK; + } + + if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7)) + *lid_present = 1; + + return AE_OK; +} + +/** + * check whether there exists the ACPI LID device by enumerating the ACPI + * device tree. + */ +static int intel_lid_present(void) +{ + int lid_present = 0; + + if (acpi_disabled) { + /* If ACPI is disabled, there is no ACPI device tree to + * check, so assume the LID device would have been present. + */ + return 1; + } + + acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, + check_lid_device, &lid_present, NULL); + + return lid_present; +} +#else +static int intel_lid_present(void) +{ + /* In the absence of ACPI built in, assume that the LID device would + * have been present. + */ + return 1; +} +#endif + /** * intel_lvds_init - setup LVDS connectors on this device * @dev: drm device @@ -518,6 +871,7 @@ void intel_lvds_init(struct drm_device *dev) struct drm_encoder *encoder; struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_crtc *crtc; + struct intel_lvds_priv *lvds_priv; u32 lvds; int pipe, gpio = GPIOC; @@ -525,13 +879,28 @@ void intel_lvds_init(struct drm_device *dev) if (dmi_check_system(intel_no_lvds)) return; + /* Assume that any device without an ACPI LID device also doesn't + * have an integrated LVDS. We would be better off parsing the BIOS + * to get a reliable indicator, but that code isn't written yet. + * + * In the case of all-in-one desktops using LVDS that we've seen, + * they're using SDVO LVDS. + */ + if (!intel_lid_present()) + return; + if (IS_IGDNG(dev)) { if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) return; + if (dev_priv->edp_support) { + DRM_DEBUG("disable LVDS for eDP support\n"); + return; + } gpio = PCH_GPIOC; } - intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); + intel_output = kzalloc(sizeof(struct intel_output) + + sizeof(struct intel_lvds_priv), GFP_KERNEL); if (!intel_output) { return; } @@ -553,7 +922,18 @@ void intel_lvds_init(struct drm_device *dev) connector->interlace_allowed = false; connector->doublescan_allowed = false; + lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); + intel_output->dev_priv = lvds_priv; + /* create the scaling mode property */ + drm_mode_create_scaling_mode_property(dev); + /* + * the initial panel fitting mode will be FULL_SCREEN. + */ + drm_connector_attach_property(&intel_output->base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_FULLSCREEN); + lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; /* * LVDS discovery: * 1) check for EDID on DDC @@ -649,5 +1029,5 @@ failed: if (intel_output->ddc_bus) intel_i2c_destroy(intel_output->ddc_bus); drm_connector_cleanup(connector); - kfree(connector); + kfree(intel_output); } diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index e0910fefce8..67e2f4632a2 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -53,10 +53,9 @@ bool intel_ddc_probe(struct intel_output *intel_output) } }; - intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); - ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2); - intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); - + intel_i2c_quirk_set(intel_output->base.dev, true); + ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); + intel_i2c_quirk_set(intel_output->base.dev, false); if (ret == 2) return true; @@ -74,10 +73,9 @@ int intel_ddc_get_modes(struct intel_output *intel_output) struct edid *edid; int ret = 0; - intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true); - edid = drm_get_edid(&intel_output->base, - &intel_output->ddc_bus->adapter); - intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false); + intel_i2c_quirk_set(intel_output->base.dev, true); + edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); + intel_i2c_quirk_set(intel_output->base.dev, false); if (edid) { drm_mode_connector_update_edid_property(&intel_output->base, edid); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 9a00adb3a50..5371d933255 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -31,6 +31,7 @@ #include "drm.h" #include "drm_crtc.h" #include "intel_drv.h" +#include "drm_edid.h" #include "i915_drm.h" #include "i915_drv.h" #include "intel_sdvo_regs.h" @@ -38,8 +39,7 @@ #undef SDVO_DEBUG #define I915_SDVO "i915_sdvo" struct intel_sdvo_priv { - struct intel_i2c_chan *i2c_bus; - int slaveaddr; + u8 slave_addr; /* Register for the SDVO device: SDVOB or SDVOC */ int output_device; @@ -56,6 +56,12 @@ struct intel_sdvo_priv { /* Pixel clock limitations reported by the SDVO device, in kHz */ int pixel_clock_min, pixel_clock_max; + /* + * For multiple function SDVO device, + * this is for current attached outputs. + */ + uint16_t attached_output; + /** * This is set if we're going to treat the device as TV-out. * @@ -69,12 +75,23 @@ struct intel_sdvo_priv { * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; + /** * This is set if we detect output of sdvo device as LVDS. */ bool is_lvds; /** + * This is sdvo flags for input timing. + */ + uint8_t sdvo_flags; + + /** + * This is sdvo fixed pannel mode pointer + */ + struct drm_display_mode *sdvo_lvds_fixed_mode; + + /** * Returned SDTV resolutions allowed for the current format, if the * device reported it. */ @@ -104,6 +121,9 @@ struct intel_sdvo_priv { u32 save_SDVOX; }; +static bool +intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); + /** * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to @@ -146,13 +166,13 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, struct i2c_msg msgs[] = { { - .addr = sdvo_priv->i2c_bus->slave_addr, + .addr = sdvo_priv->slave_addr >> 1, .flags = 0, .len = 1, .buf = out_buf, }, { - .addr = sdvo_priv->i2c_bus->slave_addr, + .addr = sdvo_priv->slave_addr >> 1, .flags = I2C_M_RD, .len = 1, .buf = buf, @@ -162,7 +182,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, out_buf[0] = addr; out_buf[1] = 0; - if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2) + if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) { *ch = buf[0]; return true; @@ -175,10 +195,11 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, u8 ch) { + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; u8 out_buf[2]; struct i2c_msg msgs[] = { { - .addr = intel_output->i2c_bus->slave_addr, + .addr = sdvo_priv->slave_addr >> 1, .flags = 0, .len = 2, .buf = out_buf, @@ -188,7 +209,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, out_buf[0] = addr; out_buf[1] = ch; - if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1) + if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) { return true; } @@ -592,6 +613,7 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, uint16_t height) { struct intel_sdvo_preferred_input_timing_args args; + struct intel_sdvo_priv *sdvo_priv = output->dev_priv; uint8_t status; memset(&args, 0, sizeof(args)); @@ -599,7 +621,12 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output, args.width = width; args.height = height; args.interlace = 0; - args.scaled = 0; + + if (sdvo_priv->is_lvds && + (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width || + sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) + args.scaled = 1; + intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, &args, sizeof(args)); status = intel_sdvo_read_response(output, NULL, 0); @@ -944,12 +971,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, struct intel_output *output = enc_to_intel_output(encoder); struct intel_sdvo_priv *dev_priv = output->dev_priv; - if (!dev_priv->is_tv) { - /* Make the CRTC code factor in the SDVO pixel multiplier. The - * SDVO device will be told of the multiplier during mode_set. - */ - adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); - } else { + if (dev_priv->is_tv) { struct intel_sdvo_dtd output_dtd; bool success; @@ -980,6 +1002,47 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, intel_sdvo_get_preferred_input_timing(output, &input_dtd); intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); + dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; + + drm_mode_set_crtcinfo(adjusted_mode, 0); + + mode->clock = adjusted_mode->clock; + + adjusted_mode->clock *= + intel_sdvo_get_pixel_multiplier(mode); + } else { + return false; + } + } else if (dev_priv->is_lvds) { + struct intel_sdvo_dtd output_dtd; + bool success; + + drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0); + /* Set output timings */ + intel_sdvo_get_dtd_from_mode(&output_dtd, + dev_priv->sdvo_lvds_fixed_mode); + + intel_sdvo_set_target_output(output, + dev_priv->controlled_output); + intel_sdvo_set_output_timing(output, &output_dtd); + + /* Set the input timing to the screen. Assume always input 0. */ + intel_sdvo_set_target_input(output, true, false); + + + success = intel_sdvo_create_preferred_input_timing( + output, + mode->clock / 10, + mode->hdisplay, + mode->vdisplay); + + if (success) { + struct intel_sdvo_dtd input_dtd; + + intel_sdvo_get_preferred_input_timing(output, + &input_dtd); + intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); + dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; drm_mode_set_crtcinfo(adjusted_mode, 0); @@ -990,6 +1053,12 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, } else { return false; } + + } else { + /* Make the CRTC code factor in the SDVO pixel multiplier. The + * SDVO device will be told of the multiplier during mode_set. + */ + adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode); } return true; } @@ -1033,15 +1102,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, /* We have tried to get input timing in mode_fixup, and filled into adjusted_mode */ - if (sdvo_priv->is_tv) + if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); - else + input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags; + } else intel_sdvo_get_dtd_from_mode(&input_dtd, mode); /* If it's a TV, we already set the output timing in mode_fixup. * Otherwise, the output timing is equal to the input timing. */ - if (!sdvo_priv->is_tv) { + if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { /* Set the output timing to the screen */ intel_sdvo_set_target_output(output, sdvo_priv->controlled_output); @@ -1116,6 +1186,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; } + if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) + sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(output, sdvox); } @@ -1276,6 +1348,17 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector, if (sdvo_priv->pixel_clock_max < mode->clock) return MODE_CLOCK_HIGH; + if (sdvo_priv->is_lvds == true) { + if (sdvo_priv->sdvo_lvds_fixed_mode == NULL) + return MODE_PANEL; + + if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay) + return MODE_PANEL; + + if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay) + return MODE_PANEL; + } + return MODE_OK; } @@ -1362,42 +1445,96 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) intel_sdvo_read_response(intel_output, &response, 2); } -static void -intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) +static bool +intel_sdvo_multifunc_encoder(struct intel_output *intel_output) +{ + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + int caps = 0; + + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) + caps++; + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) + caps++; + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID0)) + caps++; + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) + caps++; + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1)) + caps++; + + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1)) + caps++; + + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)) + caps++; + + return (caps > 1); +} + +enum drm_connector_status +intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) { struct intel_output *intel_output = to_intel_output(connector); struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + enum drm_connector_status status = connector_status_connected; struct edid *edid = NULL; - intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); edid = drm_get_edid(&intel_output->base, - &intel_output->ddc_bus->adapter); + intel_output->ddc_bus); if (edid != NULL) { - sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); + /* Don't report the output as connected if it's a DVI-I + * connector with a non-digital EDID coming out. + */ + if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { + if (edid->input & DRM_EDID_INPUT_DIGITAL) + sdvo_priv->is_hdmi = + drm_detect_hdmi_monitor(edid); + else + status = connector_status_disconnected; + } + kfree(edid); intel_output->base.display_info.raw_edid = NULL; - } + + } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) + status = connector_status_disconnected; + + return status; } static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) { - u8 response[2]; + uint16_t response; u8 status; struct intel_output *intel_output = to_intel_output(connector); + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); status = intel_sdvo_read_response(intel_output, &response, 2); - DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); + DRM_DEBUG("SDVO response %d %d\n", response & 0xff, response >> 8); if (status != SDVO_CMD_STATUS_SUCCESS) return connector_status_unknown; - if ((response[0] != 0) || (response[1] != 0)) { - intel_sdvo_hdmi_sink_detect(connector); - return connector_status_connected; - } else + if (response == 0) return connector_status_disconnected; + + if (intel_sdvo_multifunc_encoder(intel_output) && + sdvo_priv->attached_output != response) { + if (sdvo_priv->controlled_output != response && + intel_sdvo_output_setup(intel_output, response) != true) + return connector_status_unknown; + sdvo_priv->attached_output = response; + } + return intel_sdvo_hdmi_sink_detect(connector, response); } static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) @@ -1549,23 +1686,21 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) { struct intel_output *intel_output = to_intel_output(connector); - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct drm_display_mode *newmode; /* * Attempt to get the mode list from DDC. * Assume that the preferred modes are * arranged in priority order. */ - /* set the bus switch and get the modes */ - intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); intel_ddc_get_modes(intel_output); if (list_empty(&connector->probed_modes) == false) - return; + goto end; /* Fetch modes from VBT */ if (dev_priv->sdvo_lvds_vbt_mode != NULL) { - struct drm_display_mode *newmode; newmode = drm_mode_duplicate(connector->dev, dev_priv->sdvo_lvds_vbt_mode); if (newmode != NULL) { @@ -1575,6 +1710,16 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) drm_mode_probed_add(connector, newmode); } } + +end: + list_for_each_entry(newmode, &connector->probed_modes, head) { + if (newmode->type & DRM_MODE_TYPE_PREFERRED) { + sdvo_priv->sdvo_lvds_fixed_mode = + drm_mode_duplicate(connector->dev, newmode); + break; + } + } + } static int intel_sdvo_get_modes(struct drm_connector *connector) @@ -1597,14 +1742,20 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) static void intel_sdvo_destroy(struct drm_connector *connector) { struct intel_output *intel_output = to_intel_output(connector); + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; if (intel_output->i2c_bus) intel_i2c_destroy(intel_output->i2c_bus); if (intel_output->ddc_bus) intel_i2c_destroy(intel_output->ddc_bus); + if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) + drm_mode_destroy(connector->dev, + sdvo_priv->sdvo_lvds_fixed_mode); + drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); + kfree(intel_output); } @@ -1709,7 +1860,7 @@ intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (to_intel_output(connector)->ddc_bus == chan) { + if (to_intel_output(connector)->ddc_bus == &chan->adapter) { intel_output = to_intel_output(connector); break; } @@ -1723,7 +1874,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, struct intel_output *intel_output; struct intel_sdvo_priv *sdvo_priv; struct i2c_algo_bit_data *algo_data; - struct i2c_algorithm *algo; + const struct i2c_algorithm *algo; algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; intel_output = @@ -1733,7 +1884,7 @@ static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, return -EINVAL; sdvo_priv = intel_output->dev_priv; - algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo; + algo = intel_output->i2c_bus->algo; intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); return algo->master_xfer(i2c_adap, msgs, num); @@ -1780,18 +1931,101 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) return 0x72; } +static bool +intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) +{ + struct drm_connector *connector = &intel_output->base; + struct drm_encoder *encoder = &intel_output->enc; + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + bool ret = true, registered = false; + + sdvo_priv->is_tv = false; + intel_output->needs_tv_clock = false; + sdvo_priv->is_lvds = false; + + if (device_is_registered(&connector->kdev)) { + drm_sysfs_connector_remove(connector); + registered = true; + } + + if (flags & + (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { + if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) + sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; + else + sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; + + encoder->encoder_type = DRM_MODE_ENCODER_TMDS; + connector->connector_type = DRM_MODE_CONNECTOR_DVID; + + if (intel_sdvo_get_supp_encode(intel_output, + &sdvo_priv->encode) && + intel_sdvo_get_digital_encoding_mode(intel_output) && + sdvo_priv->is_hdmi) { + /* enable hdmi encoding mode if supported */ + intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); + intel_sdvo_set_colorimetry(intel_output, + SDVO_COLORIMETRY_RGB256); + connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; + } + } else if (flags & SDVO_OUTPUT_SVID0) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; + encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; + connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; + sdvo_priv->is_tv = true; + intel_output->needs_tv_clock = true; + } else if (flags & SDVO_OUTPUT_RGB0) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; + encoder->encoder_type = DRM_MODE_ENCODER_DAC; + connector->connector_type = DRM_MODE_CONNECTOR_VGA; + } else if (flags & SDVO_OUTPUT_RGB1) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; + encoder->encoder_type = DRM_MODE_ENCODER_DAC; + connector->connector_type = DRM_MODE_CONNECTOR_VGA; + } else if (flags & SDVO_OUTPUT_LVDS0) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; + encoder->encoder_type = DRM_MODE_ENCODER_LVDS; + connector->connector_type = DRM_MODE_CONNECTOR_LVDS; + sdvo_priv->is_lvds = true; + } else if (flags & SDVO_OUTPUT_LVDS1) { + + sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; + encoder->encoder_type = DRM_MODE_ENCODER_LVDS; + connector->connector_type = DRM_MODE_CONNECTOR_LVDS; + sdvo_priv->is_lvds = true; + } else { + + unsigned char bytes[2]; + + sdvo_priv->controlled_output = 0; + memcpy(bytes, &sdvo_priv->caps.output_flags, 2); + DRM_DEBUG_KMS(I915_SDVO, + "%s: Unknown SDVO output type (0x%02x%02x)\n", + SDVO_NAME(sdvo_priv), + bytes[0], bytes[1]); + ret = false; + } + + if (ret && registered) + ret = drm_sysfs_connector_add(connector) == 0 ? true : false; + + + return ret; + +} + bool intel_sdvo_init(struct drm_device *dev, int output_device) { struct drm_connector *connector; struct intel_output *intel_output; struct intel_sdvo_priv *sdvo_priv; - struct intel_i2c_chan *i2cbus = NULL; - struct intel_i2c_chan *ddcbus = NULL; - int connector_type; + u8 ch[0x40]; int i; - int encoder_type, output_id; - u8 slave_addr; intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); if (!intel_output) { @@ -1799,29 +2033,24 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) } sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); + sdvo_priv->output_device = output_device; + + intel_output->dev_priv = sdvo_priv; intel_output->type = INTEL_OUTPUT_SDVO; /* setup the DDC bus. */ if (output_device == SDVOB) - i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); + intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); else - i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); + intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); - if (!i2cbus) + if (!intel_output->i2c_bus) goto err_inteloutput; - slave_addr = intel_sdvo_get_slave_addr(dev, output_device); - sdvo_priv->i2c_bus = i2cbus; + sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); - if (output_device == SDVOB) { - output_id = 1; - } else { - output_id = 2; - } - sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1; - sdvo_priv->output_device = output_device; - intel_output->i2c_bus = i2cbus; - intel_output->dev_priv = sdvo_priv; + /* Save the bit-banging i2c functionality for use by the DDC wrapper */ + intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { @@ -1835,101 +2064,39 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) /* setup the DDC bus. */ if (output_device == SDVOB) - ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); + intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); else - ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); + intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); - if (ddcbus == NULL) + if (intel_output->ddc_bus == NULL) goto err_i2c; - intel_sdvo_i2c_bit_algo.functionality = - intel_output->i2c_bus->adapter.algo->functionality; - ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo; - intel_output->ddc_bus = ddcbus; + /* Wrap with our custom algo which switches to DDC mode */ + intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; /* In defaut case sdvo lvds is false */ - sdvo_priv->is_lvds = false; intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); - if (sdvo_priv->caps.output_flags & - (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) { - if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) - sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0; - else - sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; - - encoder_type = DRM_MODE_ENCODER_TMDS; - connector_type = DRM_MODE_CONNECTOR_DVID; - - if (intel_sdvo_get_supp_encode(intel_output, - &sdvo_priv->encode) && - intel_sdvo_get_digital_encoding_mode(intel_output) && - sdvo_priv->is_hdmi) { - /* enable hdmi encoding mode if supported */ - intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); - intel_sdvo_set_colorimetry(intel_output, - SDVO_COLORIMETRY_RGB256); - connector_type = DRM_MODE_CONNECTOR_HDMIA; - } - } - else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0) - { - sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; - encoder_type = DRM_MODE_ENCODER_TVDAC; - connector_type = DRM_MODE_CONNECTOR_SVIDEO; - sdvo_priv->is_tv = true; - intel_output->needs_tv_clock = true; - } - else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) - { - sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; - encoder_type = DRM_MODE_ENCODER_DAC; - connector_type = DRM_MODE_CONNECTOR_VGA; - } - else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) - { - sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; - encoder_type = DRM_MODE_ENCODER_DAC; - connector_type = DRM_MODE_CONNECTOR_VGA; - } - else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0) - { - sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; - encoder_type = DRM_MODE_ENCODER_LVDS; - connector_type = DRM_MODE_CONNECTOR_LVDS; - sdvo_priv->is_lvds = true; - } - else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1) - { - sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; - encoder_type = DRM_MODE_ENCODER_LVDS; - connector_type = DRM_MODE_CONNECTOR_LVDS; - sdvo_priv->is_lvds = true; - } - else - { - unsigned char bytes[2]; - - sdvo_priv->controlled_output = 0; - memcpy (bytes, &sdvo_priv->caps.output_flags, 2); - DRM_DEBUG_KMS(I915_SDVO, - "%s: Unknown SDVO output type (0x%02x%02x)\n", - SDVO_NAME(sdvo_priv), - bytes[0], bytes[1]); - encoder_type = DRM_MODE_ENCODER_NONE; - connector_type = DRM_MODE_CONNECTOR_Unknown; + if (intel_sdvo_output_setup(intel_output, + sdvo_priv->caps.output_flags) != true) { + DRM_DEBUG("SDVO output failed to setup on SDVO%c\n", + output_device == SDVOB ? 'B' : 'C'); goto err_i2c; } + connector = &intel_output->base; drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, - connector_type); + connector->connector_type); + drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; connector->display_info.subpixel_order = SubPixelHorizontalRGB; - drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type); + drm_encoder_init(dev, &intel_output->enc, + &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); + drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); @@ -1965,9 +2132,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) return true; err_i2c: - if (ddcbus != NULL) + if (intel_output->ddc_bus != NULL) intel_i2c_destroy(intel_output->ddc_bus); - intel_i2c_destroy(intel_output->i2c_bus); + if (intel_output->i2c_bus != NULL) + intel_i2c_destroy(intel_output->i2c_bus); err_inteloutput: kfree(intel_output); diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 193938b7d7f..ba5cdf8ae40 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -715,6 +715,7 @@ struct intel_sdvo_enhancements_arg { #define SDVO_HBUF_TX_ONCE (2 << 6) #define SDVO_HBUF_TX_VSYNC (3 << 6) #define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c +#define SDVO_NEED_TO_STALL (1 << 7) struct intel_sdvo_encode{ u8 dvi_rev; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index ea68992e441..da4ab4dc163 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1383,34 +1383,31 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) /* * Detect TV by polling) */ - if (intel_output->load_detect_temp) { - /* TV not currently running, prod it with destructive detect */ - save_tv_dac = tv_dac; - tv_ctl = I915_READ(TV_CTL); - save_tv_ctl = tv_ctl; - tv_ctl &= ~TV_ENC_ENABLE; - tv_ctl &= ~TV_TEST_MODE_MASK; - tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; - tv_dac &= ~TVDAC_SENSE_MASK; - tv_dac &= ~DAC_A_MASK; - tv_dac &= ~DAC_B_MASK; - tv_dac &= ~DAC_C_MASK; - tv_dac |= (TVDAC_STATE_CHG_EN | - TVDAC_A_SENSE_CTL | - TVDAC_B_SENSE_CTL | - TVDAC_C_SENSE_CTL | - DAC_CTL_OVERRIDE | - DAC_A_0_7_V | - DAC_B_0_7_V | - DAC_C_0_7_V); - I915_WRITE(TV_CTL, tv_ctl); - I915_WRITE(TV_DAC, tv_dac); - intel_wait_for_vblank(dev); - tv_dac = I915_READ(TV_DAC); - I915_WRITE(TV_DAC, save_tv_dac); - I915_WRITE(TV_CTL, save_tv_ctl); - intel_wait_for_vblank(dev); - } + save_tv_dac = tv_dac; + tv_ctl = I915_READ(TV_CTL); + save_tv_ctl = tv_ctl; + tv_ctl &= ~TV_ENC_ENABLE; + tv_ctl &= ~TV_TEST_MODE_MASK; + tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; + tv_dac &= ~TVDAC_SENSE_MASK; + tv_dac &= ~DAC_A_MASK; + tv_dac &= ~DAC_B_MASK; + tv_dac &= ~DAC_C_MASK; + tv_dac |= (TVDAC_STATE_CHG_EN | + TVDAC_A_SENSE_CTL | + TVDAC_B_SENSE_CTL | + TVDAC_C_SENSE_CTL | + DAC_CTL_OVERRIDE | + DAC_A_0_7_V | + DAC_B_0_7_V | + DAC_C_0_7_V); + I915_WRITE(TV_CTL, tv_ctl); + I915_WRITE(TV_DAC, tv_dac); + intel_wait_for_vblank(dev); + tv_dac = I915_READ(TV_DAC); + I915_WRITE(TV_DAC, save_tv_dac); + I915_WRITE(TV_CTL, save_tv_ctl); + intel_wait_for_vblank(dev); /* * A B C * 0 1 1 Composite @@ -1493,6 +1490,27 @@ static struct input_res { {"1920x1080", 1920, 1080}, }; +/* + * Chose preferred mode according to line number of TV format + */ +static void +intel_tv_chose_preferred_modes(struct drm_connector *connector, + struct drm_display_mode *mode_ptr) +{ + struct intel_output *intel_output = to_intel_output(connector); + const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); + + if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) + mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; + else if (tv_mode->nbr_end > 480) { + if (tv_mode->progressive == true && tv_mode->nbr_end < 720) { + if (mode_ptr->vdisplay == 720) + mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; + } else if (mode_ptr->vdisplay == 1080) + mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; + } +} + /** * Stub get_modes function. * @@ -1547,6 +1565,7 @@ intel_tv_get_modes(struct drm_connector *connector) mode_ptr->clock = (int) tmp; mode_ptr->type = DRM_MODE_TYPE_DRIVER; + intel_tv_chose_preferred_modes(connector, mode_ptr); drm_mode_probed_add(connector, mode_ptr); count++; } diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 5fae1e074b4..013d3805994 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -13,7 +13,8 @@ radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \ radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \ radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ - rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o + rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o \ + radeon_test.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c0080cc9bf8..74d034f77c6 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -31,6 +31,132 @@ #include "atom.h" #include "atom-bits.h" +static void atombios_overscan_setup(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + SET_CRTC_OVERSCAN_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); + int a1, a2; + + memset(&args, 0, sizeof(args)); + + args.usOverscanRight = 0; + args.usOverscanLeft = 0; + args.usOverscanBottom = 0; + args.usOverscanTop = 0; + args.ucCRTC = radeon_crtc->crtc_id; + + switch (radeon_crtc->rmx_type) { + case RMX_CENTER: + args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; + args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; + args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; + args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + break; + case RMX_ASPECT: + a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; + a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; + + if (a1 > a2) { + args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; + args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; + } else if (a2 > a1) { + args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; + args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; + } + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + break; + case RMX_FULL: + default: + args.usOverscanRight = 0; + args.usOverscanLeft = 0; + args.usOverscanBottom = 0; + args.usOverscanTop = 0; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + break; + } +} + +static void atombios_scaler_setup(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + ENABLE_SCALER_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); + /* fixme - fill in enc_priv for atom dac */ + enum radeon_tv_std tv_std = TV_STD_NTSC; + + if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) + return; + + memset(&args, 0, sizeof(args)); + + args.ucScaler = radeon_crtc->crtc_id; + + if (radeon_crtc->devices & (ATOM_DEVICE_TV_SUPPORT)) { + switch (tv_std) { + case TV_STD_NTSC: + default: + args.ucTVStandard = ATOM_TV_NTSC; + break; + case TV_STD_PAL: + args.ucTVStandard = ATOM_TV_PAL; + break; + case TV_STD_PAL_M: + args.ucTVStandard = ATOM_TV_PALM; + break; + case TV_STD_PAL_60: + args.ucTVStandard = ATOM_TV_PAL60; + break; + case TV_STD_NTSC_J: + args.ucTVStandard = ATOM_TV_NTSCJ; + break; + case TV_STD_SCART_PAL: + args.ucTVStandard = ATOM_TV_PAL; /* ??? */ + break; + case TV_STD_SECAM: + args.ucTVStandard = ATOM_TV_SECAM; + break; + case TV_STD_PAL_CN: + args.ucTVStandard = ATOM_TV_PALCN; + break; + } + args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; + } else if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT)) { + args.ucTVStandard = ATOM_TV_CV; + args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; + } else { + switch (radeon_crtc->rmx_type) { + case RMX_FULL: + args.ucEnable = ATOM_SCALER_EXPANSION; + break; + case RMX_CENTER: + args.ucEnable = ATOM_SCALER_CENTER; + break; + case RMX_ASPECT: + args.ucEnable = ATOM_SCALER_EXPANSION; + break; + default: + if (ASIC_IS_AVIVO(rdev)) + args.ucEnable = ATOM_SCALER_DISABLE; + else + args.ucEnable = ATOM_SCALER_CENTER; + break; + } + } + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + if (radeon_crtc->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) + && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { + atom_rv515_force_tv_scaler(rdev); + } +} + static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); @@ -203,6 +329,12 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) if (ASIC_IS_AVIVO(rdev)) { uint32_t ss_cntl; + if ((rdev->family == CHIP_RS600) || + (rdev->family == CHIP_RS690) || + (rdev->family == CHIP_RS740)) + pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | + RADEON_PLL_PREFER_CLOSEST_LOWER); + if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else @@ -321,7 +453,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_gem_object *obj; struct drm_radeon_gem_object *obj_priv; uint64_t fb_location; - uint32_t fb_format, fb_pitch_pixels; + uint32_t fb_format, fb_pitch_pixels, tiling_flags; if (!crtc->fb) return -EINVAL; @@ -358,7 +490,14 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, return -EINVAL; } - /* TODO tiling */ + radeon_object_get_tiling_flags(obj->driver_private, + &tiling_flags, NULL); + if (tiling_flags & RADEON_TILING_MACRO) + fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; + + if (tiling_flags & RADEON_TILING_MICRO) + fb_format |= AVIVO_D1GRPH_TILED; + if (radeon_crtc->crtc_id == 0) WREG32(AVIVO_D1VGA_CONTROL, 0); else @@ -509,6 +648,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, radeon_crtc_set_base(crtc, x, y, old_fb); radeon_legacy_atom_set_surface(crtc); } + atombios_overscan_setup(crtc, mode, adjusted_mode); + atombios_scaler_setup(crtc); + radeon_bandwidth_update(rdev); return 0; } @@ -516,6 +658,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) + return false; return true; } @@ -548,148 +692,3 @@ void radeon_atombios_init_crtc(struct drm_device *dev, AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); } - -void radeon_init_disp_bw_avivo(struct drm_device *dev, - struct drm_display_mode *mode1, - uint32_t pixel_bytes1, - struct drm_display_mode *mode2, - uint32_t pixel_bytes2) -{ - struct radeon_device *rdev = dev->dev_private; - fixed20_12 min_mem_eff; - fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff; - fixed20_12 sclk_ff, mclk_ff; - uint32_t dc_lb_memory_split, temp; - - min_mem_eff.full = rfixed_const_8(0); - if (rdev->disp_priority == 2) { - uint32_t mc_init_misc_lat_timer = 0; - if (rdev->family == CHIP_RV515) - mc_init_misc_lat_timer = - RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER); - else if (rdev->family == CHIP_RS690) - mc_init_misc_lat_timer = - RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER); - - mc_init_misc_lat_timer &= - ~(R300_MC_DISP1R_INIT_LAT_MASK << - R300_MC_DISP1R_INIT_LAT_SHIFT); - mc_init_misc_lat_timer &= - ~(R300_MC_DISP0R_INIT_LAT_MASK << - R300_MC_DISP0R_INIT_LAT_SHIFT); - - if (mode2) - mc_init_misc_lat_timer |= - (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); - if (mode1) - mc_init_misc_lat_timer |= - (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); - - if (rdev->family == CHIP_RV515) - WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER, - mc_init_misc_lat_timer); - else if (rdev->family == CHIP_RS690) - WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER, - mc_init_misc_lat_timer); - } - - /* - * determine is there is enough bw for current mode - */ - temp_ff.full = rfixed_const(100); - mclk_ff.full = rfixed_const(rdev->clock.default_mclk); - mclk_ff.full = rfixed_div(mclk_ff, temp_ff); - sclk_ff.full = rfixed_const(rdev->clock.default_sclk); - sclk_ff.full = rfixed_div(sclk_ff, temp_ff); - - temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); - temp_ff.full = rfixed_const(temp); - mem_bw.full = rfixed_mul(mclk_ff, temp_ff); - mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); - - pix_clk.full = 0; - pix_clk2.full = 0; - peak_disp_bw.full = 0; - if (mode1) { - temp_ff.full = rfixed_const(1000); - pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ - pix_clk.full = rfixed_div(pix_clk, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes1); - peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); - } - if (mode2) { - temp_ff.full = rfixed_const(1000); - pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ - pix_clk2.full = rfixed_div(pix_clk2, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes2); - peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); - } - - if (peak_disp_bw.full >= mem_bw.full) { - DRM_ERROR - ("You may not have enough display bandwidth for current mode\n" - "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); - printk("peak disp bw %d, mem_bw %d\n", - rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw)); - } - - /* - * Line Buffer Setup - * There is a single line buffer shared by both display controllers. - * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display - * controllers. The paritioning can either be done manually or via one of four - * preset allocations specified in bits 1:0: - * 0 - line buffer is divided in half and shared between each display controller - * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 - * 2 - D1 gets the whole buffer - * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 - * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode. - * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits - * 14:4; D2 allocation follows D1. - */ - - /* is auto or manual better ? */ - dc_lb_memory_split = - RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK; - dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; -#if 1 - /* auto */ - if (mode1 && mode2) { - if (mode1->hdisplay > mode2->hdisplay) { - if (mode1->hdisplay > 2560) - dc_lb_memory_split |= - AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; - else - dc_lb_memory_split |= - AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; - } else if (mode2->hdisplay > mode1->hdisplay) { - if (mode2->hdisplay > 2560) - dc_lb_memory_split |= - AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; - else - dc_lb_memory_split |= - AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; - } else - dc_lb_memory_split |= - AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; - } else if (mode1) { - dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY; - } else if (mode2) { - dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; - } -#else - /* manual */ - dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; - dc_lb_memory_split &= - ~(AVIVO_DC_LB_DISP1_END_ADR_MASK << - AVIVO_DC_LB_DISP1_END_ADR_SHIFT); - if (mode1) { - dc_lb_memory_split |= - ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK) - << AVIVO_DC_LB_DISP1_END_ADR_SHIFT); - } else if (mode2) { - dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT); - } -#endif - WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split); -} diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c550932a108..f1ba8ff4113 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -110,7 +110,7 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) if (i < 0 || i > rdev->gart.num_gpu_pages) { return -EINVAL; } - rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr); + rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); return 0; } @@ -173,8 +173,12 @@ void r100_mc_setup(struct radeon_device *rdev) DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); } /* Write VRAM size in case we are limiting it */ - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); + /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, + * if the aperture is 64MB but we have 32MB VRAM + * we report only 32MB VRAM but we have to set MC_FB_LOCATION + * to 64MB, otherwise the gpu accidentially dies */ + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); WREG32(RADEON_MC_FB_LOCATION, tmp); @@ -215,7 +219,6 @@ int r100_mc_init(struct radeon_device *rdev) r100_pci_gart_disable(rdev); /* Setup GPU memory space */ - rdev->mc.vram_location = 0xFFFFFFFFUL; rdev->mc.gtt_location = 0xFFFFFFFFUL; if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); @@ -719,13 +722,14 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, unsigned idx) { struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; - uint32_t header = ib_chunk->kdata[idx]; + uint32_t header; if (idx >= ib_chunk->length_dw) { DRM_ERROR("Can not parse packet at %d after CS end %d !\n", idx, ib_chunk->length_dw); return -EINVAL; } + header = ib_chunk->kdata[idx]; pkt->idx = idx; pkt->type = CP_PACKET_GET_TYPE(header); pkt->count = CP_PACKET_GET_COUNT(header); @@ -753,6 +757,102 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p, } /** + * r100_cs_packet_next_vline() - parse userspace VLINE packet + * @parser: parser structure holding parsing context. + * + * Userspace sends a special sequence for VLINE waits. + * PACKET0 - VLINE_START_END + value + * PACKET0 - WAIT_UNTIL +_value + * RELOC (P3) - crtc_id in reloc. + * + * This function parses this and relocates the VLINE START END + * and WAIT UNTIL packets to the correct crtc. + * It also detects a switched off crtc and nulls out the + * wait in that case. + */ +int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) +{ + struct radeon_cs_chunk *ib_chunk; + struct drm_mode_object *obj; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + struct radeon_cs_packet p3reloc, waitreloc; + int crtc_id; + int r; + uint32_t header, h_idx, reg; + + ib_chunk = &p->chunks[p->chunk_ib_idx]; + + /* parse the wait until */ + r = r100_cs_packet_parse(p, &waitreloc, p->idx); + if (r) + return r; + + /* check its a wait until and only 1 count */ + if (waitreloc.reg != RADEON_WAIT_UNTIL || + waitreloc.count != 0) { + DRM_ERROR("vline wait had illegal wait until segment\n"); + r = -EINVAL; + return r; + } + + if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { + DRM_ERROR("vline wait had illegal wait until\n"); + r = -EINVAL; + return r; + } + + /* jump over the NOP */ + r = r100_cs_packet_parse(p, &p3reloc, p->idx); + if (r) + return r; + + h_idx = p->idx - 2; + p->idx += waitreloc.count; + p->idx += p3reloc.count; + + header = ib_chunk->kdata[h_idx]; + crtc_id = ib_chunk->kdata[h_idx + 5]; + reg = ib_chunk->kdata[h_idx] >> 2; + mutex_lock(&p->rdev->ddev->mode_config.mutex); + obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); + if (!obj) { + DRM_ERROR("cannot find crtc %d\n", crtc_id); + r = -EINVAL; + goto out; + } + crtc = obj_to_crtc(obj); + radeon_crtc = to_radeon_crtc(crtc); + crtc_id = radeon_crtc->crtc_id; + + if (!crtc->enabled) { + /* if the CRTC isn't enabled - we need to nop out the wait until */ + ib_chunk->kdata[h_idx + 2] = PACKET2(0); + ib_chunk->kdata[h_idx + 3] = PACKET2(0); + } else if (crtc_id == 1) { + switch (reg) { + case AVIVO_D1MODE_VLINE_START_END: + header &= R300_CP_PACKET0_REG_MASK; + header |= AVIVO_D2MODE_VLINE_START_END >> 2; + break; + case RADEON_CRTC_GUI_TRIG_VLINE: + header &= R300_CP_PACKET0_REG_MASK; + header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; + break; + default: + DRM_ERROR("unknown crtc reloc\n"); + r = -EINVAL; + goto out; + } + ib_chunk->kdata[h_idx] = header; + ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; + } +out: + mutex_unlock(&p->rdev->ddev->mode_config.mutex); + return r; +} + +/** * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 * @parser: parser structure holding parsing context. * @data: pointer to relocation data @@ -814,6 +914,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, unsigned idx; bool onereg; int r; + u32 tile_flags = 0; ib = p->ib->ptr; ib_chunk = &p->chunks[p->chunk_ib_idx]; @@ -825,6 +926,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p, } for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { switch (reg) { + case RADEON_CRTC_GUI_TRIG_VLINE: + r = r100_cs_packet_parse_vline(p); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + break; /* FIXME: only allow PACKET3 blit? easier to check for out of * range access */ case RADEON_DST_PITCH_OFFSET: @@ -838,7 +948,20 @@ static int r100_packet0_check(struct radeon_cs_parser *p, } tmp = ib_chunk->kdata[idx] & 0x003fffff; tmp += (((u32)reloc->lobj.gpu_offset) >> 10); - ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= RADEON_DST_TILE_MACRO; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + if (reg == RADEON_SRC_PITCH_OFFSET) { + DRM_ERROR("Cannot src blit from microtiled surface\n"); + r100_cs_dump_packet(p, pkt); + return -EINVAL; + } + tile_flags |= RADEON_DST_TILE_MICRO; + } + + tmp |= tile_flags; + ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; break; case RADEON_RB3D_DEPTHOFFSET: case RADEON_RB3D_COLOROFFSET: @@ -869,6 +992,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p, case R300_TX_OFFSET_0+52: case R300_TX_OFFSET_0+56: case R300_TX_OFFSET_0+60: + /* rn50 has no 3D engine so fail on any 3d setup */ + if (ASIC_IS_RN50(p->rdev)) { + DRM_ERROR("attempt to use RN50 3D engine failed\n"); + return -EINVAL; + } r = r100_cs_packet_next_reloc(p, &reloc); if (r) { DRM_ERROR("No reloc for ib[%d]=0x%04X\n", @@ -878,6 +1006,25 @@ static int r100_packet0_check(struct radeon_cs_parser *p, } ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); break; + case R300_RB3D_COLORPITCH0: + case RADEON_RB3D_COLORPITCH: + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= RADEON_COLOR_TILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; + + tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + break; default: /* FIXME: we don't want to allow anyothers packet */ break; @@ -1256,29 +1403,100 @@ static void r100_vram_get_type(struct radeon_device *rdev) } } -void r100_vram_info(struct radeon_device *rdev) +static u32 r100_get_accessible_vram(struct radeon_device *rdev) { - r100_vram_get_type(rdev); + u32 aper_size; + u8 byte; + + aper_size = RREG32(RADEON_CONFIG_APER_SIZE); + + /* Set HDP_APER_CNTL only on cards that are known not to be broken, + * that is has the 2nd generation multifunction PCI interface + */ + if (rdev->family == CHIP_RV280 || + rdev->family >= CHIP_RV350) { + WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, + ~RADEON_HDP_APER_CNTL); + DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); + return aper_size * 2; + } + + /* Older cards have all sorts of funny issues to deal with. First + * check if it's a multifunction card by reading the PCI config + * header type... Limit those to one aperture size + */ + pci_read_config_byte(rdev->pdev, 0xe, &byte); + if (byte & 0x80) { + DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); + DRM_INFO("Limiting VRAM to one aperture\n"); + return aper_size; + } + + /* Single function older card. We read HDP_APER_CNTL to see how the BIOS + * have set it up. We don't write this as it's broken on some ASICs but + * we expect the BIOS to have done the right thing (might be too optimistic...) + */ + if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) + return aper_size * 2; + return aper_size; +} + +void r100_vram_init_sizes(struct radeon_device *rdev) +{ + u64 config_aper_size; + u32 accessible; + + config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); if (rdev->flags & RADEON_IS_IGP) { uint32_t tom; /* read NB_TOM to get the amount of ram stolen for the GPU */ tom = RREG32(RADEON_NB_TOM); - rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); + rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); + /* for IGPs we need to keep VRAM where it was put by the BIOS */ + rdev->mc.vram_location = (tom & 0xffff) << 16; + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; } else { - rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); + rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); /* Some production boards of m6 will report 0 * if it's 8 MB */ - if (rdev->mc.vram_size == 0) { - rdev->mc.vram_size = 8192 * 1024; - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); + if (rdev->mc.real_vram_size == 0) { + rdev->mc.real_vram_size = 8192 * 1024; + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); } + /* let driver place VRAM */ + rdev->mc.vram_location = 0xFFFFFFFFUL; + /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - + * Novell bug 204882 + along with lots of ubuntu ones */ + if (config_aper_size > rdev->mc.real_vram_size) + rdev->mc.mc_vram_size = config_aper_size; + else + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; } + /* work out accessible VRAM */ + accessible = r100_get_accessible_vram(rdev); + rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + + if (accessible > rdev->mc.aper_size) + accessible = rdev->mc.aper_size; + + if (rdev->mc.mc_vram_size > rdev->mc.aper_size) + rdev->mc.mc_vram_size = rdev->mc.aper_size; + + if (rdev->mc.real_vram_size > rdev->mc.aper_size) + rdev->mc.real_vram_size = rdev->mc.aper_size; +} + +void r100_vram_info(struct radeon_device *rdev) +{ + r100_vram_get_type(rdev); + + r100_vram_init_sizes(rdev); } @@ -1533,3 +1751,530 @@ int r100_debugfs_mc_info_init(struct radeon_device *rdev) return 0; #endif } + +int r100_set_surface_reg(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size) +{ + int surf_index = reg * 16; + int flags = 0; + + /* r100/r200 divide by 16 */ + if (rdev->family < CHIP_R300) + flags = pitch / 16; + else + flags = pitch / 8; + + if (rdev->family <= CHIP_RS200) { + if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) + == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) + flags |= RADEON_SURF_TILE_COLOR_BOTH; + if (tiling_flags & RADEON_TILING_MACRO) + flags |= RADEON_SURF_TILE_COLOR_MACRO; + } else if (rdev->family <= CHIP_RV280) { + if (tiling_flags & (RADEON_TILING_MACRO)) + flags |= R200_SURF_TILE_COLOR_MACRO; + if (tiling_flags & RADEON_TILING_MICRO) + flags |= R200_SURF_TILE_COLOR_MICRO; + } else { + if (tiling_flags & RADEON_TILING_MACRO) + flags |= R300_SURF_TILE_MACRO; + if (tiling_flags & RADEON_TILING_MICRO) + flags |= R300_SURF_TILE_MICRO; + } + + DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); + WREG32(RADEON_SURFACE0_INFO + surf_index, flags); + WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); + WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); + return 0; +} + +void r100_clear_surface_reg(struct radeon_device *rdev, int reg) +{ + int surf_index = reg * 16; + WREG32(RADEON_SURFACE0_INFO + surf_index, 0); +} + +void r100_bandwidth_update(struct radeon_device *rdev) +{ + fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; + fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; + fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; + uint32_t temp, data, mem_trcd, mem_trp, mem_tras; + fixed20_12 memtcas_ff[8] = { + fixed_init(1), + fixed_init(2), + fixed_init(3), + fixed_init(0), + fixed_init_half(1), + fixed_init_half(2), + fixed_init(0), + }; + fixed20_12 memtcas_rs480_ff[8] = { + fixed_init(0), + fixed_init(1), + fixed_init(2), + fixed_init(3), + fixed_init(0), + fixed_init_half(1), + fixed_init_half(2), + fixed_init_half(3), + }; + fixed20_12 memtcas2_ff[8] = { + fixed_init(0), + fixed_init(1), + fixed_init(2), + fixed_init(3), + fixed_init(4), + fixed_init(5), + fixed_init(6), + fixed_init(7), + }; + fixed20_12 memtrbs[8] = { + fixed_init(1), + fixed_init_half(1), + fixed_init(2), + fixed_init_half(2), + fixed_init(3), + fixed_init_half(3), + fixed_init(4), + fixed_init_half(4) + }; + fixed20_12 memtrbs_r4xx[8] = { + fixed_init(4), + fixed_init(5), + fixed_init(6), + fixed_init(7), + fixed_init(8), + fixed_init(9), + fixed_init(10), + fixed_init(11) + }; + fixed20_12 min_mem_eff; + fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; + fixed20_12 cur_latency_mclk, cur_latency_sclk; + fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, + disp_drain_rate2, read_return_rate; + fixed20_12 time_disp1_drop_priority; + int c; + int cur_size = 16; /* in octawords */ + int critical_point = 0, critical_point2; +/* uint32_t read_return_rate, time_disp1_drop_priority; */ + int stop_req, max_stop_req; + struct drm_display_mode *mode1 = NULL; + struct drm_display_mode *mode2 = NULL; + uint32_t pixel_bytes1 = 0; + uint32_t pixel_bytes2 = 0; + + if (rdev->mode_info.crtcs[0]->base.enabled) { + mode1 = &rdev->mode_info.crtcs[0]->base.mode; + pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; + } + if (rdev->mode_info.crtcs[1]->base.enabled) { + mode2 = &rdev->mode_info.crtcs[1]->base.mode; + pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; + } + + min_mem_eff.full = rfixed_const_8(0); + /* get modes */ + if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { + uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); + mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); + mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); + /* check crtc enables */ + if (mode2) + mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); + if (mode1) + mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); + WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); + } + + /* + * determine is there is enough bw for current mode + */ + mclk_ff.full = rfixed_const(rdev->clock.default_mclk); + temp_ff.full = rfixed_const(100); + mclk_ff.full = rfixed_div(mclk_ff, temp_ff); + sclk_ff.full = rfixed_const(rdev->clock.default_sclk); + sclk_ff.full = rfixed_div(sclk_ff, temp_ff); + + temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); + temp_ff.full = rfixed_const(temp); + mem_bw.full = rfixed_mul(mclk_ff, temp_ff); + + pix_clk.full = 0; + pix_clk2.full = 0; + peak_disp_bw.full = 0; + if (mode1) { + temp_ff.full = rfixed_const(1000); + pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ + pix_clk.full = rfixed_div(pix_clk, temp_ff); + temp_ff.full = rfixed_const(pixel_bytes1); + peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); + } + if (mode2) { + temp_ff.full = rfixed_const(1000); + pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ + pix_clk2.full = rfixed_div(pix_clk2, temp_ff); + temp_ff.full = rfixed_const(pixel_bytes2); + peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); + } + + mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); + if (peak_disp_bw.full >= mem_bw.full) { + DRM_ERROR("You may not have enough display bandwidth for current mode\n" + "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); + } + + /* Get values from the EXT_MEM_CNTL register...converting its contents. */ + temp = RREG32(RADEON_MEM_TIMING_CNTL); + if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ + mem_trcd = ((temp >> 2) & 0x3) + 1; + mem_trp = ((temp & 0x3)) + 1; + mem_tras = ((temp & 0x70) >> 4) + 1; + } else if (rdev->family == CHIP_R300 || + rdev->family == CHIP_R350) { /* r300, r350 */ + mem_trcd = (temp & 0x7) + 1; + mem_trp = ((temp >> 8) & 0x7) + 1; + mem_tras = ((temp >> 11) & 0xf) + 4; + } else if (rdev->family == CHIP_RV350 || + rdev->family <= CHIP_RV380) { + /* rv3x0 */ + mem_trcd = (temp & 0x7) + 3; + mem_trp = ((temp >> 8) & 0x7) + 3; + mem_tras = ((temp >> 11) & 0xf) + 6; + } else if (rdev->family == CHIP_R420 || + rdev->family == CHIP_R423 || + rdev->family == CHIP_RV410) { + /* r4xx */ + mem_trcd = (temp & 0xf) + 3; + if (mem_trcd > 15) + mem_trcd = 15; + mem_trp = ((temp >> 8) & 0xf) + 3; + if (mem_trp > 15) + mem_trp = 15; + mem_tras = ((temp >> 12) & 0x1f) + 6; + if (mem_tras > 31) + mem_tras = 31; + } else { /* RV200, R200 */ + mem_trcd = (temp & 0x7) + 1; + mem_trp = ((temp >> 8) & 0x7) + 1; + mem_tras = ((temp >> 12) & 0xf) + 4; + } + /* convert to FF */ + trcd_ff.full = rfixed_const(mem_trcd); + trp_ff.full = rfixed_const(mem_trp); + tras_ff.full = rfixed_const(mem_tras); + + /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ + temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); + data = (temp & (7 << 20)) >> 20; + if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { + if (rdev->family == CHIP_RS480) /* don't think rs400 */ + tcas_ff = memtcas_rs480_ff[data]; + else + tcas_ff = memtcas_ff[data]; + } else + tcas_ff = memtcas2_ff[data]; + + if (rdev->family == CHIP_RS400 || + rdev->family == CHIP_RS480) { + /* extra cas latency stored in bits 23-25 0-4 clocks */ + data = (temp >> 23) & 0x7; + if (data < 5) + tcas_ff.full += rfixed_const(data); + } + + if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { + /* on the R300, Tcas is included in Trbs. + */ + temp = RREG32(RADEON_MEM_CNTL); + data = (R300_MEM_NUM_CHANNELS_MASK & temp); + if (data == 1) { + if (R300_MEM_USE_CD_CH_ONLY & temp) { + temp = RREG32(R300_MC_IND_INDEX); + temp &= ~R300_MC_IND_ADDR_MASK; + temp |= R300_MC_READ_CNTL_CD_mcind; + WREG32(R300_MC_IND_INDEX, temp); + temp = RREG32(R300_MC_IND_DATA); + data = (R300_MEM_RBS_POSITION_C_MASK & temp); + } else { + temp = RREG32(R300_MC_READ_CNTL_AB); + data = (R300_MEM_RBS_POSITION_A_MASK & temp); + } + } else { + temp = RREG32(R300_MC_READ_CNTL_AB); + data = (R300_MEM_RBS_POSITION_A_MASK & temp); + } + if (rdev->family == CHIP_RV410 || + rdev->family == CHIP_R420 || + rdev->family == CHIP_R423) + trbs_ff = memtrbs_r4xx[data]; + else + trbs_ff = memtrbs[data]; + tcas_ff.full += trbs_ff.full; + } + + sclk_eff_ff.full = sclk_ff.full; + + if (rdev->flags & RADEON_IS_AGP) { + fixed20_12 agpmode_ff; + agpmode_ff.full = rfixed_const(radeon_agpmode); + temp_ff.full = rfixed_const_666(16); + sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); + } + /* TODO PCIE lanes may affect this - agpmode == 16?? */ + + if (ASIC_IS_R300(rdev)) { + sclk_delay_ff.full = rfixed_const(250); + } else { + if ((rdev->family == CHIP_RV100) || + rdev->flags & RADEON_IS_IGP) { + if (rdev->mc.vram_is_ddr) + sclk_delay_ff.full = rfixed_const(41); + else + sclk_delay_ff.full = rfixed_const(33); + } else { + if (rdev->mc.vram_width == 128) + sclk_delay_ff.full = rfixed_const(57); + else + sclk_delay_ff.full = rfixed_const(41); + } + } + + mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); + + if (rdev->mc.vram_is_ddr) { + if (rdev->mc.vram_width == 32) { + k1.full = rfixed_const(40); + c = 3; + } else { + k1.full = rfixed_const(20); + c = 1; + } + } else { + k1.full = rfixed_const(40); + c = 3; + } + + temp_ff.full = rfixed_const(2); + mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); + temp_ff.full = rfixed_const(c); + mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); + temp_ff.full = rfixed_const(4); + mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); + mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); + mc_latency_mclk.full += k1.full; + + mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); + mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); + + /* + HW cursor time assuming worst case of full size colour cursor. + */ + temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); + temp_ff.full += trcd_ff.full; + if (temp_ff.full < tras_ff.full) + temp_ff.full = tras_ff.full; + cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); + + temp_ff.full = rfixed_const(cur_size); + cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); + /* + Find the total latency for the display data. + */ + disp_latency_overhead.full = rfixed_const(80); + disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); + mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; + mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; + + if (mc_latency_mclk.full > mc_latency_sclk.full) + disp_latency.full = mc_latency_mclk.full; + else + disp_latency.full = mc_latency_sclk.full; + + /* setup Max GRPH_STOP_REQ default value */ + if (ASIC_IS_RV100(rdev)) + max_stop_req = 0x5c; + else + max_stop_req = 0x7c; + + if (mode1) { + /* CRTC1 + Set GRPH_BUFFER_CNTL register using h/w defined optimal values. + GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] + */ + stop_req = mode1->hdisplay * pixel_bytes1 / 16; + + if (stop_req > max_stop_req) + stop_req = max_stop_req; + + /* + Find the drain rate of the display buffer. + */ + temp_ff.full = rfixed_const((16/pixel_bytes1)); + disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); + + /* + Find the critical point of the display buffer. + */ + crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); + crit_point_ff.full += rfixed_const_half(0); + + critical_point = rfixed_trunc(crit_point_ff); + + if (rdev->disp_priority == 2) { + critical_point = 0; + } + + /* + The critical point should never be above max_stop_req-4. Setting + GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. + */ + if (max_stop_req - critical_point < 4) + critical_point = 0; + + if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { + /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ + critical_point = 0x10; + } + + temp = RREG32(RADEON_GRPH_BUFFER_CNTL); + temp &= ~(RADEON_GRPH_STOP_REQ_MASK); + temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); + temp &= ~(RADEON_GRPH_START_REQ_MASK); + if ((rdev->family == CHIP_R350) && + (stop_req > 0x15)) { + stop_req -= 0x10; + } + temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); + temp |= RADEON_GRPH_BUFFER_SIZE; + temp &= ~(RADEON_GRPH_CRITICAL_CNTL | + RADEON_GRPH_CRITICAL_AT_SOF | + RADEON_GRPH_STOP_CNTL); + /* + Write the result into the register. + */ + WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | + (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); + +#if 0 + if ((rdev->family == CHIP_RS400) || + (rdev->family == CHIP_RS480)) { + /* attempt to program RS400 disp regs correctly ??? */ + temp = RREG32(RS400_DISP1_REG_CNTL); + temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | + RS400_DISP1_STOP_REQ_LEVEL_MASK); + WREG32(RS400_DISP1_REQ_CNTL1, (temp | + (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | + (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); + temp = RREG32(RS400_DMIF_MEM_CNTL1); + temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | + RS400_DISP1_CRITICAL_POINT_STOP_MASK); + WREG32(RS400_DMIF_MEM_CNTL1, (temp | + (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | + (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); + } +#endif + + DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", + /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ + (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); + } + + if (mode2) { + u32 grph2_cntl; + stop_req = mode2->hdisplay * pixel_bytes2 / 16; + + if (stop_req > max_stop_req) + stop_req = max_stop_req; + + /* + Find the drain rate of the display buffer. + */ + temp_ff.full = rfixed_const((16/pixel_bytes2)); + disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); + + grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); + grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); + grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); + grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); + if ((rdev->family == CHIP_R350) && + (stop_req > 0x15)) { + stop_req -= 0x10; + } + grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); + grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; + grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | + RADEON_GRPH_CRITICAL_AT_SOF | + RADEON_GRPH_STOP_CNTL); + + if ((rdev->family == CHIP_RS100) || + (rdev->family == CHIP_RS200)) + critical_point2 = 0; + else { + temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; + temp_ff.full = rfixed_const(temp); + temp_ff.full = rfixed_mul(mclk_ff, temp_ff); + if (sclk_ff.full < temp_ff.full) + temp_ff.full = sclk_ff.full; + + read_return_rate.full = temp_ff.full; + + if (mode1) { + temp_ff.full = read_return_rate.full - disp_drain_rate.full; + time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); + } else { + time_disp1_drop_priority.full = 0; + } + crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; + crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); + crit_point_ff.full += rfixed_const_half(0); + + critical_point2 = rfixed_trunc(crit_point_ff); + + if (rdev->disp_priority == 2) { + critical_point2 = 0; + } + + if (max_stop_req - critical_point2 < 4) + critical_point2 = 0; + + } + + if (critical_point2 == 0 && rdev->family == CHIP_R300) { + /* some R300 cards have problem with this set to 0 */ + critical_point2 = 0x10; + } + + WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | + (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); + + if ((rdev->family == CHIP_RS400) || + (rdev->family == CHIP_RS480)) { +#if 0 + /* attempt to program RS400 disp2 regs correctly ??? */ + temp = RREG32(RS400_DISP2_REQ_CNTL1); + temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | + RS400_DISP2_STOP_REQ_LEVEL_MASK); + WREG32(RS400_DISP2_REQ_CNTL1, (temp | + (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | + (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); + temp = RREG32(RS400_DISP2_REQ_CNTL2); + temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | + RS400_DISP2_CRITICAL_POINT_STOP_MASK); + WREG32(RS400_DISP2_REQ_CNTL2, (temp | + (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | + (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); +#endif + WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); + WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); + WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); + WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); + } + + DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", + (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); + } +} diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index e2ed5bc0817..9c8d41534a5 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -30,6 +30,8 @@ #include "drm.h" #include "radeon_reg.h" #include "radeon.h" +#include "radeon_drm.h" +#include "radeon_share.h" /* r300,r350,rv350,rv370,rv380 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -44,6 +46,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev); int r100_cs_packet_parse(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx); +int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc); int r100_cs_parse_packet0(struct radeon_cs_parser *p, @@ -150,8 +153,13 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) if (i < 0 || i > rdev->gart.num_gpu_pages) { return -EINVAL; } - addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; - writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4)); + addr = (lower_32_bits(addr) >> 8) | + ((upper_32_bits(addr) & 0xff) << 24) | + 0xc; + /* on x86 we want this to be CPU endian, on powerpc + * on powerpc without HW swappers, it'll get swapped on way + * into VRAM - so no need for cpu_to_le32 on VRAM tables */ + writel(addr, ((void __iomem *)ptr) + (i * 4)); return 0; } @@ -579,10 +587,8 @@ void r300_vram_info(struct radeon_device *rdev) } else { rdev->mc.vram_width = 64; } - rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + r100_vram_init_sizes(rdev); } @@ -970,7 +976,7 @@ static inline void r300_cs_track_clear(struct r300_cs_track *track) static const unsigned r300_reg_safe_bm[159] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, - 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, @@ -1019,7 +1025,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_reloc *reloc; struct r300_cs_track *track; volatile uint32_t *ib; - uint32_t tmp; + uint32_t tmp, tile_flags = 0; unsigned i; int r; @@ -1027,6 +1033,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, ib_chunk = &p->chunks[p->chunk_ib_idx]; track = (struct r300_cs_track*)p->track; switch(reg) { + case AVIVO_D1MODE_VLINE_START_END: + case RADEON_CRTC_GUI_TRIG_VLINE: + r = r100_cs_packet_parse_vline(p); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + break; case RADEON_DST_PITCH_OFFSET: case RADEON_SRC_PITCH_OFFSET: r = r100_cs_packet_next_reloc(p, &reloc); @@ -1038,7 +1054,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p, } tmp = ib_chunk->kdata[idx] & 0x003fffff; tmp += (((u32)reloc->lobj.gpu_offset) >> 10); - ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= RADEON_DST_TILE_MACRO; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + if (reg == RADEON_SRC_PITCH_OFFSET) { + DRM_ERROR("Cannot src blit from microtiled surface\n"); + r100_cs_dump_packet(p, pkt); + return -EINVAL; + } + tile_flags |= RADEON_DST_TILE_MICRO; + } + tmp |= tile_flags; + ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; break; case R300_RB3D_COLOROFFSET0: case R300_RB3D_COLOROFFSET1: @@ -1127,6 +1155,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, /* RB3D_COLORPITCH1 */ /* RB3D_COLORPITCH2 */ /* RB3D_COLORPITCH3 */ + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_COLOR_TILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_COLOR_MICROTILE_ENABLE; + + tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + i = (reg - 0x4E38) >> 2; track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { @@ -1182,6 +1227,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, break; case 0x4F24: /* ZB_DEPTHPITCH */ + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_DEPTHMACROTILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_DEPTHMICROTILE_TILED;; + + tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; break; case 0x4104: diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 70f48609515..4b7afef35a6 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h @@ -27,7 +27,9 @@ #ifndef _R300_REG_H_ #define _R300_REG_H_ - +#define R300_SURF_TILE_MACRO (1<<16) +#define R300_SURF_TILE_MICRO (2<<16) +#define R300_SURF_TILE_BOTH (3<<16) #define R300_MC_INIT_MISC_LAT_TIMER 0x180 diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 9070a1c2ce2..036691b38cb 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h @@ -445,6 +445,7 @@ #define AVIVO_D1MODE_DATA_FORMAT 0x6528 # define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) #define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C +#define AVIVO_D1MODE_VLINE_START_END 0x6538 #define AVIVO_D1MODE_VIEWPORT_START 0x6580 #define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 #define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 @@ -496,6 +497,7 @@ #define AVIVO_D2CUR_SIZE 0x6c10 #define AVIVO_D2CUR_POSITION 0x6c14 +#define AVIVO_D2MODE_VLINE_START_END 0x6d38 #define AVIVO_D2MODE_VIEWPORT_START 0x6d80 #define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 570a244bd88..09fb0b6ec7d 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -28,6 +28,7 @@ #include "drmP.h" #include "radeon_reg.h" #include "radeon.h" +#include "radeon_share.h" /* r520,rv530,rv560,rv570,r580 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -94,8 +95,8 @@ int r520_mc_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } /* Write VRAM size in case we are limiting it */ - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); WREG32_MC(R520_MC_FB_LOCATION, tmp); @@ -226,9 +227,20 @@ static void r520_vram_get_type(struct radeon_device *rdev) void r520_vram_info(struct radeon_device *rdev) { + fixed20_12 a; + r520_vram_get_type(rdev); - rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + r100_vram_init_sizes(rdev); + /* FIXME: we should enforce default clock in case GPU is not in + * default setup + */ + a.full = rfixed_const(100); + rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); + rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); +} + +void r520_bandwidth_update(struct radeon_device *rdev) +{ + rv515_bandwidth_avivo_update(rdev); } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c45559fc97f..538cd907df6 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -67,7 +67,7 @@ int r600_mc_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24); WREG32(R600_MC_VM_FB_LOCATION, tmp); @@ -140,7 +140,8 @@ void r600_vram_get_type(struct radeon_device *rdev) void r600_vram_info(struct radeon_device *rdev) { r600_vram_get_type(rdev); - rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE); + rdev->mc.real_vram_size = RREG32(R600_CONFIG_MEMSIZE); + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; /* Could aper size report 0 ? */ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 146f3570af8..20f17908b03 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -384,8 +384,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) DRM_INFO("Loading RV670 PFP Microcode\n"); for (i = 0; i < PFP_UCODE_SIZE; i++) RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV670_pfp_microcode[i]); - } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { - DRM_INFO("Loading RS780 CP Microcode\n"); + } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { + DRM_INFO("Loading RS780/RS880 CP Microcode\n"); for (i = 0; i < PM4_UCODE_SIZE; i++) { RADEON_WRITE(R600_CP_ME_RAM_DATA, RS780_cp_microcode[i][0]); @@ -396,7 +397,7 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) } RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); - DRM_INFO("Loading RS780 PFP Microcode\n"); + DRM_INFO("Loading RS780/RS880 PFP Microcode\n"); for (i = 0; i < PFP_UCODE_SIZE; i++) RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RS780_pfp_microcode[i]); } @@ -783,6 +784,7 @@ static void r600_gfx_init(struct drm_device *dev, break; case CHIP_RV610: case CHIP_RS780: + case CHIP_RS880: case CHIP_RV620: dev_priv->r600_max_pipes = 1; dev_priv->r600_max_tile_pipes = 1; @@ -917,7 +919,8 @@ static void r600_gfx_init(struct drm_device *dev, ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || - ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE); else RADEON_WRITE(R600_DB_DEBUG, 0); @@ -935,7 +938,8 @@ static void r600_gfx_init(struct drm_device *dev, sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES); if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || - ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) | R600_FETCH_FIFO_HIWATER(0xa) | R600_DONE_FIFO_HIWATER(0xe0) | @@ -978,7 +982,8 @@ static void r600_gfx_init(struct drm_device *dev, R600_NUM_ES_STACK_ENTRIES(0)); } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || - ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) { + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { /* no vertex cache */ sq_config &= ~R600_VC_ENABLE; @@ -1035,7 +1040,8 @@ static void r600_gfx_init(struct drm_device *dev, if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || - ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780)) + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY)); else RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC)); @@ -1078,6 +1084,7 @@ static void r600_gfx_init(struct drm_device *dev, break; case CHIP_RV610: case CHIP_RS780: + case CHIP_RS880: case CHIP_RV620: gs_prim_buffer_depth = 32; break; @@ -1123,6 +1130,7 @@ static void r600_gfx_init(struct drm_device *dev, switch (dev_priv->flags & RADEON_FAMILY_MASK) { case CHIP_RV610: case CHIP_RS780: + case CHIP_RS880: case CHIP_RV620: tc_cntl = R600_TC_L2_SIZE(8); break; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index d61f2fc61df..b1d945b8ed6 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -64,6 +64,7 @@ extern int radeon_agpmode; extern int radeon_vram_limit; extern int radeon_gart_size; extern int radeon_benchmarking; +extern int radeon_testing; extern int radeon_connector_table; /* @@ -113,6 +114,7 @@ enum radeon_family { CHIP_RV770, CHIP_RV730, CHIP_RV710, + CHIP_RS880, CHIP_LAST, }; @@ -201,6 +203,14 @@ int radeon_fence_wait_last(struct radeon_device *rdev); struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); void radeon_fence_unref(struct radeon_fence **fence); +/* + * Tiling registers + */ +struct radeon_surface_reg { + struct radeon_object *robj; +}; + +#define RADEON_GEM_MAX_SURFACES 8 /* * Radeon buffer. @@ -213,6 +223,7 @@ struct radeon_object_list { uint64_t gpu_offset; unsigned rdomain; unsigned wdomain; + uint32_t tiling_flags; }; int radeon_object_init(struct radeon_device *rdev); @@ -242,8 +253,15 @@ void radeon_object_list_clean(struct list_head *head); int radeon_object_fbdev_mmap(struct radeon_object *robj, struct vm_area_struct *vma); unsigned long radeon_object_size(struct radeon_object *robj); - - +void radeon_object_clear_surface_reg(struct radeon_object *robj); +int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, + bool force_drop); +void radeon_object_set_tiling_flags(struct radeon_object *robj, + uint32_t tiling_flags, uint32_t pitch); +void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch); +void radeon_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); +void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); /* * GEM objects. */ @@ -315,8 +333,11 @@ struct radeon_mc { unsigned gtt_location; unsigned gtt_size; unsigned vram_location; - unsigned vram_size; + /* for some chips with <= 32MB we need to lie + * about vram size near mc fb location */ + unsigned mc_vram_size; unsigned vram_width; + unsigned real_vram_size; int vram_mtrr; bool vram_is_ddr; }; @@ -474,6 +495,39 @@ struct radeon_wb { uint64_t gpu_addr; }; +/** + * struct radeon_pm - power management datas + * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) + * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880) + * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880) + * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880) + * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880) + * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP) + * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) + * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) + * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) + * @sclk: GPU clock Mhz (core bandwith depends of this clock) + * @needed_bandwidth: current bandwidth needs + * + * It keeps track of various data needed to take powermanagement decision. + * Bandwith need is used to determine minimun clock of the GPU and memory. + * Equation between gpu/memory clock and available bandwidth is hw dependent + * (type of memory, bus size, efficiency, ...) + */ +struct radeon_pm { + fixed20_12 max_bandwidth; + fixed20_12 igp_sideport_mclk; + fixed20_12 igp_system_mclk; + fixed20_12 igp_ht_link_clk; + fixed20_12 igp_ht_link_width; + fixed20_12 k8_bandwidth; + fixed20_12 sideport_bandwidth; + fixed20_12 ht_bandwidth; + fixed20_12 core_bandwidth; + fixed20_12 sclk; + fixed20_12 needed_bandwidth; +}; + /* * Benchmarking @@ -482,6 +536,12 @@ void radeon_benchmark(struct radeon_device *rdev); /* + * Testing + */ +void radeon_test_moves(struct radeon_device *rdev); + + +/* * Debugfs */ int radeon_debugfs_add_files(struct radeon_device *rdev, @@ -535,6 +595,11 @@ struct radeon_asic { void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); void (*set_clock_gating)(struct radeon_device *rdev, int enable); + int (*set_surface_reg)(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size); + int (*clear_surface_reg)(struct radeon_device *rdev, int reg); + void (*bandwidth_update)(struct radeon_device *rdev); }; union radeon_asic_config { @@ -566,6 +631,10 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); /* @@ -594,8 +663,8 @@ struct radeon_device { struct radeon_object *fbdev_robj; struct radeon_framebuffer *fbdev_rfb; /* Register mmio */ - unsigned long rmmio_base; - unsigned long rmmio_size; + resource_size_t rmmio_base; + resource_size_t rmmio_size; void *rmmio; radeon_rreg_t mm_rreg; radeon_wreg_t mm_wreg; @@ -619,11 +688,14 @@ struct radeon_device { struct radeon_irq irq; struct radeon_asic *asic; struct radeon_gem gem; + struct radeon_pm pm; struct mutex cs_mutex; struct radeon_wb wb; bool gpu_lockup; bool shutdown; bool suspend; + bool need_dma32; + struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; }; int radeon_device_init(struct radeon_device *rdev, @@ -670,6 +742,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); /* * ASICs helpers. */ +#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \ + (rdev->pdev->device == 0x5969)) #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ (rdev->family == CHIP_RV200) || \ (rdev->family == CHIP_RS100) || \ @@ -796,5 +870,8 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) +#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) +#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) +#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) #endif diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e2e567395df..9a75876e0c3 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -71,6 +71,11 @@ int r100_copy_blit(struct radeon_device *rdev, uint64_t dst_offset, unsigned num_pages, struct radeon_fence *fence); +int r100_set_surface_reg(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size); +int r100_clear_surface_reg(struct radeon_device *rdev, int reg); +void r100_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic r100_asic = { .init = &r100_init, @@ -100,6 +105,9 @@ static struct radeon_asic r100_asic = { .set_memory_clock = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, }; @@ -128,6 +136,7 @@ int r300_copy_dma(struct radeon_device *rdev, uint64_t dst_offset, unsigned num_pages, struct radeon_fence *fence); + static struct radeon_asic r300_asic = { .init = &r300_init, .errata = &r300_errata, @@ -156,6 +165,9 @@ static struct radeon_asic r300_asic = { .set_memory_clock = NULL, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, }; /* @@ -193,6 +205,9 @@ static struct radeon_asic r420_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, }; @@ -237,6 +252,9 @@ static struct radeon_asic rs400_asic = { .set_memory_clock = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, }; @@ -254,6 +272,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev); int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +void rs600_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic rs600_asic = { .init = &r300_init, .errata = &rs600_errata, @@ -282,6 +301,7 @@ static struct radeon_asic rs600_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_atom_set_clock_gating, + .bandwidth_update = &rs600_bandwidth_update, }; @@ -294,6 +314,7 @@ int rs690_mc_init(struct radeon_device *rdev); void rs690_mc_fini(struct radeon_device *rdev); uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +void rs690_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic rs690_asic = { .init = &r300_init, .errata = &rs690_errata, @@ -322,6 +343,9 @@ static struct radeon_asic rs690_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &rs690_bandwidth_update, }; @@ -339,6 +363,7 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rv515_ring_start(struct radeon_device *rdev); uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +void rv515_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic rv515_asic = { .init = &rv515_init, .errata = &rv515_errata, @@ -367,6 +392,9 @@ static struct radeon_asic rv515_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &rv515_bandwidth_update, }; @@ -377,6 +405,7 @@ void r520_errata(struct radeon_device *rdev); void r520_vram_info(struct radeon_device *rdev); int r520_mc_init(struct radeon_device *rdev); void r520_mc_fini(struct radeon_device *rdev); +void r520_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic r520_asic = { .init = &rv515_init, .errata = &r520_errata, @@ -405,6 +434,9 @@ static struct radeon_asic r520_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r520_bandwidth_update, }; /* diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 1f5a1a49098..fcfe5c02d74 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -103,7 +103,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device static bool radeon_atom_apply_quirks(struct drm_device *dev, uint32_t supported_device, int *connector_type, - struct radeon_i2c_bus_rec *i2c_bus) + struct radeon_i2c_bus_rec *i2c_bus, + uint8_t *line_mux) { /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ @@ -127,8 +128,10 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, if ((dev->pdev->device == 0x5653) && (dev->pdev->subsystem_vendor == 0x1462) && (dev->pdev->subsystem_device == 0x0291)) { - if (*connector_type == DRM_MODE_CONNECTOR_LVDS) + if (*connector_type == DRM_MODE_CONNECTOR_LVDS) { i2c_bus->valid = false; + *line_mux = 53; + } } /* Funky macbooks */ @@ -526,7 +529,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct if (!radeon_atom_apply_quirks (dev, (1 << i), &bios_connectors[i].connector_type, - &bios_connectors[i].ddc_bus)) + &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux)) continue; bios_connectors[i].valid = true; diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index c44403a2ca7..2e938f7496f 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, if (r) { goto out_cleanup; } - r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence); + r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence); if (r) { goto out_cleanup; } @@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, if (r) { goto out_cleanup; } - r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence); + r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index b843f9bdfb1..a169067efc4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -127,17 +127,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) sizeof(struct drm_radeon_cs_chunk))) { return -EFAULT; } + p->chunks[i].length_dw = user_chunk.length_dw; + p->chunks[i].kdata = NULL; p->chunks[i].chunk_id = user_chunk.chunk_id; + if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { p->chunk_relocs_idx = i; } if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { p->chunk_ib_idx = i; + /* zero length IB isn't useful */ + if (p->chunks[i].length_dw == 0) + return -EINVAL; } + p->chunks[i].length_dw = user_chunk.length_dw; cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; - p->chunks[i].kdata = NULL; size = p->chunks[i].length_dw * sizeof(uint32_t); p->chunks[i].kdata = kzalloc(size, GFP_KERNEL); if (p->chunks[i].kdata == NULL) { diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 5232441f119..b13c79e38bc 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -111,9 +111,11 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, if (ASIC_IS_AVIVO(rdev)) WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); - else + else { + radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; /* offset is from DISP(2)_BASE_ADDRESS */ - WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr); + WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); + } } int radeon_crtc_cursor_set(struct drm_crtc *crtc, @@ -245,6 +247,9 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, (RADEON_CUR_LOCK | ((xorigin ? 0 : x) << 16) | (yorigin ? 0 : y))); + /* offset is from DISP(2)_BASE_ADDRESS */ + WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + + (yorigin * 256))); } radeon_lock_cursor(crtc, false); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f30aa7274a5..9ff6dcb97f9 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -35,6 +35,25 @@ #include "atom.h" /* + * Clear GPU surface registers. + */ +static void radeon_surface_init(struct radeon_device *rdev) +{ + /* FIXME: check this out */ + if (rdev->family < CHIP_R600) { + int i; + + for (i = 0; i < 8; i++) { + WREG32(RADEON_SURFACE0_INFO + + i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), + 0); + } + /* enable surfaces */ + WREG32(RADEON_SURFACE_CNTL, 0); + } +} + +/* * GPU scratch registers helpers function. */ static void radeon_scratch_init(struct radeon_device *rdev) @@ -102,7 +121,7 @@ int radeon_mc_setup(struct radeon_device *rdev) if (rdev->mc.vram_location != 0xFFFFFFFFUL) { /* vram location was already setup try to put gtt after * if it fits */ - tmp = rdev->mc.vram_location + rdev->mc.vram_size; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { rdev->mc.gtt_location = tmp; @@ -117,13 +136,13 @@ int radeon_mc_setup(struct radeon_device *rdev) } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { /* gtt location was already setup try to put vram before * if it fits */ - if (rdev->mc.vram_size < rdev->mc.gtt_location) { + if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) { rdev->mc.vram_location = 0; } else { tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; - tmp += (rdev->mc.vram_size - 1); - tmp &= ~(rdev->mc.vram_size - 1); - if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) { + tmp += (rdev->mc.mc_vram_size - 1); + tmp &= ~(rdev->mc.mc_vram_size - 1); + if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) { rdev->mc.vram_location = tmp; } else { printk(KERN_ERR "[drm] vram too big to fit " @@ -133,12 +152,16 @@ int radeon_mc_setup(struct radeon_device *rdev) } } else { rdev->mc.vram_location = 0; - rdev->mc.gtt_location = rdev->mc.vram_size; + tmp = rdev->mc.mc_vram_size; + tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); + rdev->mc.gtt_location = tmp; } - DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20); + DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20); DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", rdev->mc.vram_location, - rdev->mc.vram_location + rdev->mc.vram_size - 1); + rdev->mc.vram_location + rdev->mc.mc_vram_size - 1); + if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size) + DRM_INFO("radeon: VRAM less than aperture workaround enabled\n"); DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", rdev->mc.gtt_location, @@ -433,6 +456,7 @@ int radeon_device_init(struct radeon_device *rdev, uint32_t flags) { int r, ret; + int dma_bits; DRM_INFO("radeon: Initializing kernel modesetting.\n"); rdev->shutdown = false; @@ -475,8 +499,20 @@ int radeon_device_init(struct radeon_device *rdev, return r; } - /* Report DMA addressing limitation */ - r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); + /* set DMA mask + need_dma32 flags. + * PCIE - can handle 40-bits. + * IGP - can handle 40-bits (in theory) + * AGP - generally dma32 is safest + * PCI - only dma32 + */ + rdev->need_dma32 = false; + if (rdev->flags & RADEON_IS_AGP) + rdev->need_dma32 = true; + if (rdev->flags & RADEON_IS_PCI) + rdev->need_dma32 = true; + + dma_bits = rdev->need_dma32 ? 32 : 40; + r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); if (r) { printk(KERN_WARNING "radeon: No suitable DMA available.\n"); } @@ -496,6 +532,8 @@ int radeon_device_init(struct radeon_device *rdev, radeon_errata(rdev); /* Initialize scratch registers */ radeon_scratch_init(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); /* TODO: disable VGA need to use VGA request */ /* BIOS*/ @@ -527,27 +565,22 @@ int radeon_device_init(struct radeon_device *rdev, radeon_combios_asic_init(rdev->ddev); } } + /* Initialize clocks */ + r = radeon_clocks_init(rdev); + if (r) { + return r; + } /* Get vram informations */ radeon_vram_info(rdev); - /* Device is severly broken if aper size > vram size. - * for RN50/M6/M7 - Novell bug 204882 ? - */ - if (rdev->mc.vram_size < rdev->mc.aper_size) { - rdev->mc.aper_size = rdev->mc.vram_size; - } + /* Add an MTRR for the VRAM */ rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, MTRR_TYPE_WRCOMB, 1); DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", - rdev->mc.vram_size >> 20, + rdev->mc.real_vram_size >> 20, (unsigned)rdev->mc.aper_size >> 20); DRM_INFO("RAM width %dbits %cDR\n", rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); - /* Initialize clocks */ - r = radeon_clocks_init(rdev); - if (r) { - return r; - } /* Initialize memory controller (also test AGP) */ r = radeon_mc_init(rdev); if (r) { @@ -604,12 +637,12 @@ int radeon_device_init(struct radeon_device *rdev, if (r) { return r; } - if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) { - rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private; - } if (!ret) { DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); } + if (radeon_testing) { + radeon_test_moves(rdev); + } if (radeon_benchmarking) { radeon_benchmark(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 3efcf1a526b..a8fa1bb84cf 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -187,6 +187,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); radeon_crtc->crtc_id = index; + rdev->mode_info.crtcs[index] = radeon_crtc; radeon_crtc->mode_set.crtc = &radeon_crtc->base; radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); @@ -491,7 +492,11 @@ void radeon_compute_pll(struct radeon_pll *pll, tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; current_freq = radeon_div(tmp, ref_div * post_div); - error = abs(current_freq - freq); + if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { + error = freq - current_freq; + error = error < 0 ? 0xffffffff : error; + } else + error = abs(current_freq - freq); vco_diff = abs(vco - best_vco); if ((best_vco == 0 && error < best_error) || @@ -657,36 +662,51 @@ void radeon_modeset_fini(struct radeon_device *rdev) } } -void radeon_init_disp_bandwidth(struct drm_device *dev) +bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct radeon_device *rdev = dev->dev_private; - struct drm_display_mode *modes[2]; - int pixel_bytes[2]; - struct drm_crtc *crtc; - - pixel_bytes[0] = pixel_bytes[1] = 0; - modes[0] = modes[1] = NULL; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_encoder *radeon_encoder; + bool first = true; - if (crtc->enabled && crtc->fb) { - modes[radeon_crtc->crtc_id] = &crtc->mode; - pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + radeon_encoder = to_radeon_encoder(encoder); + if (encoder->crtc != crtc) + continue; + if (first) { + radeon_crtc->rmx_type = radeon_encoder->rmx_type; + radeon_crtc->devices = radeon_encoder->devices; + memcpy(&radeon_crtc->native_mode, + &radeon_encoder->native_mode, + sizeof(struct radeon_native_mode)); + first = false; + } else { + if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { + /* WARNING: Right now this can't happen but + * in the future we need to check that scaling + * are consistent accross different encoder + * (ie all encoder can work with the same + * scaling). + */ + DRM_ERROR("Scaling not consistent accross encoder.\n"); + return false; + } } } - - if (ASIC_IS_AVIVO(rdev)) { - radeon_init_disp_bw_avivo(dev, - modes[0], - pixel_bytes[0], - modes[1], - pixel_bytes[1]); + if (radeon_crtc->rmx_type != RMX_OFF) { + fixed20_12 a, b; + a.full = rfixed_const(crtc->mode.vdisplay); + b.full = rfixed_const(radeon_crtc->native_mode.panel_xres); + radeon_crtc->vsc.full = rfixed_div(a, b); + a.full = rfixed_const(crtc->mode.hdisplay); + b.full = rfixed_const(radeon_crtc->native_mode.panel_yres); + radeon_crtc->hsc.full = rfixed_div(a, b); } else { - radeon_init_disp_bw_legacy(dev, - modes[0], - pixel_bytes[0], - modes[1], - pixel_bytes[1]); + radeon_crtc->vsc.full = rfixed_const(1); + radeon_crtc->hsc.full = rfixed_const(1); } + return true; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 09c9fb9f621..0bd5879a495 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -89,6 +89,7 @@ int radeon_agpmode = 0; int radeon_vram_limit = 0; int radeon_gart_size = 512; /* default gart size */ int radeon_benchmarking = 0; +int radeon_testing = 0; int radeon_connector_table = 0; #endif @@ -117,6 +118,9 @@ module_param_named(gartsize, radeon_gart_size, int, 0600); MODULE_PARM_DESC(benchmark, "Run benchmark"); module_param_named(benchmark, radeon_benchmarking, int, 0444); +MODULE_PARM_DESC(test, "Run tests"); +module_param_named(test, radeon_testing, int, 0444); + MODULE_PARM_DESC(connector_table, "Force connector table"); module_param_named(connector_table, radeon_connector_table, int, 0444); #endif @@ -314,6 +318,14 @@ static int __init radeon_init(void) driver = &driver_old; driver->num_ioctls = radeon_max_ioctl; #if defined(CONFIG_DRM_RADEON_KMS) +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force() && radeon_modeset == -1) { + DRM_INFO("VGACON disable radeon kernel modesetting.\n"); + driver = &driver_old; + driver->driver_features &= ~DRIVER_MODESET; + radeon_modeset = 0; + } +#endif /* if enabled by default */ if (radeon_modeset == -1) { DRM_INFO("radeon default to kernel modesetting.\n"); @@ -325,17 +337,8 @@ static int __init radeon_init(void) driver->driver_features |= DRIVER_MODESET; driver->num_ioctls = radeon_max_kms_ioctl; } - /* if the vga console setting is enabled still * let modprobe override it */ -#ifdef CONFIG_VGA_CONSOLE - if (vgacon_text_force() && radeon_modeset == -1) { - DRM_INFO("VGACON disable radeon kernel modesetting.\n"); - driver = &driver_old; - driver->driver_features &= ~DRIVER_MODESET; - radeon_modeset = 0; - } -#endif #endif return drm_init(driver); } @@ -345,7 +348,7 @@ static void __exit radeon_exit(void) drm_exit(driver); } -late_initcall(radeon_init); +module_init(radeon_init); module_exit(radeon_exit); MODULE_AUTHOR(DRIVER_AUTHOR); diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 127d0456f62..3933f8216a3 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -143,6 +143,7 @@ enum radeon_family { CHIP_RV635, CHIP_RV670, CHIP_RS780, + CHIP_RS880, CHIP_RV770, CHIP_RV730, CHIP_RV710, diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index c8ef0d14ffa..0a92706eac1 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -154,7 +154,6 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder, if (mode->hdisplay < native_mode->panel_xres || mode->vdisplay < native_mode->panel_yres) { - radeon_encoder->flags |= RADEON_USE_RMX; if (ASIC_IS_AVIVO(rdev)) { adjusted_mode->hdisplay = native_mode->panel_xres; adjusted_mode->vdisplay = native_mode->panel_yres; @@ -197,15 +196,13 @@ void radeon_rmx_mode_fixup(struct drm_encoder *encoder, } } + static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - radeon_encoder->flags &= ~RADEON_USE_RMX; - drm_mode_set_crtcinfo(adjusted_mode, 0); if (radeon_encoder->rmx_type != RMX_OFF) @@ -808,234 +805,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) } -static void atom_rv515_force_tv_scaler(struct radeon_device *rdev) -{ - - WREG32(0x659C, 0x0); - WREG32(0x6594, 0x705); - WREG32(0x65A4, 0x10001); - WREG32(0x65D8, 0x0); - WREG32(0x65B0, 0x0); - WREG32(0x65C0, 0x0); - WREG32(0x65D4, 0x0); - WREG32(0x6578, 0x0); - WREG32(0x657C, 0x841880A8); - WREG32(0x6578, 0x1); - WREG32(0x657C, 0x84208680); - WREG32(0x6578, 0x2); - WREG32(0x657C, 0xBFF880B0); - WREG32(0x6578, 0x100); - WREG32(0x657C, 0x83D88088); - WREG32(0x6578, 0x101); - WREG32(0x657C, 0x84608680); - WREG32(0x6578, 0x102); - WREG32(0x657C, 0xBFF080D0); - WREG32(0x6578, 0x200); - WREG32(0x657C, 0x83988068); - WREG32(0x6578, 0x201); - WREG32(0x657C, 0x84A08680); - WREG32(0x6578, 0x202); - WREG32(0x657C, 0xBFF080F8); - WREG32(0x6578, 0x300); - WREG32(0x657C, 0x83588058); - WREG32(0x6578, 0x301); - WREG32(0x657C, 0x84E08660); - WREG32(0x6578, 0x302); - WREG32(0x657C, 0xBFF88120); - WREG32(0x6578, 0x400); - WREG32(0x657C, 0x83188040); - WREG32(0x6578, 0x401); - WREG32(0x657C, 0x85008660); - WREG32(0x6578, 0x402); - WREG32(0x657C, 0xBFF88150); - WREG32(0x6578, 0x500); - WREG32(0x657C, 0x82D88030); - WREG32(0x6578, 0x501); - WREG32(0x657C, 0x85408640); - WREG32(0x6578, 0x502); - WREG32(0x657C, 0xBFF88180); - WREG32(0x6578, 0x600); - WREG32(0x657C, 0x82A08018); - WREG32(0x6578, 0x601); - WREG32(0x657C, 0x85808620); - WREG32(0x6578, 0x602); - WREG32(0x657C, 0xBFF081B8); - WREG32(0x6578, 0x700); - WREG32(0x657C, 0x82608010); - WREG32(0x6578, 0x701); - WREG32(0x657C, 0x85A08600); - WREG32(0x6578, 0x702); - WREG32(0x657C, 0x800081F0); - WREG32(0x6578, 0x800); - WREG32(0x657C, 0x8228BFF8); - WREG32(0x6578, 0x801); - WREG32(0x657C, 0x85E085E0); - WREG32(0x6578, 0x802); - WREG32(0x657C, 0xBFF88228); - WREG32(0x6578, 0x10000); - WREG32(0x657C, 0x82A8BF00); - WREG32(0x6578, 0x10001); - WREG32(0x657C, 0x82A08CC0); - WREG32(0x6578, 0x10002); - WREG32(0x657C, 0x8008BEF8); - WREG32(0x6578, 0x10100); - WREG32(0x657C, 0x81F0BF28); - WREG32(0x6578, 0x10101); - WREG32(0x657C, 0x83608CA0); - WREG32(0x6578, 0x10102); - WREG32(0x657C, 0x8018BED0); - WREG32(0x6578, 0x10200); - WREG32(0x657C, 0x8148BF38); - WREG32(0x6578, 0x10201); - WREG32(0x657C, 0x84408C80); - WREG32(0x6578, 0x10202); - WREG32(0x657C, 0x8008BEB8); - WREG32(0x6578, 0x10300); - WREG32(0x657C, 0x80B0BF78); - WREG32(0x6578, 0x10301); - WREG32(0x657C, 0x85008C20); - WREG32(0x6578, 0x10302); - WREG32(0x657C, 0x8020BEA0); - WREG32(0x6578, 0x10400); - WREG32(0x657C, 0x8028BF90); - WREG32(0x6578, 0x10401); - WREG32(0x657C, 0x85E08BC0); - WREG32(0x6578, 0x10402); - WREG32(0x657C, 0x8018BE90); - WREG32(0x6578, 0x10500); - WREG32(0x657C, 0xBFB8BFB0); - WREG32(0x6578, 0x10501); - WREG32(0x657C, 0x86C08B40); - WREG32(0x6578, 0x10502); - WREG32(0x657C, 0x8010BE90); - WREG32(0x6578, 0x10600); - WREG32(0x657C, 0xBF58BFC8); - WREG32(0x6578, 0x10601); - WREG32(0x657C, 0x87A08AA0); - WREG32(0x6578, 0x10602); - WREG32(0x657C, 0x8010BE98); - WREG32(0x6578, 0x10700); - WREG32(0x657C, 0xBF10BFF0); - WREG32(0x6578, 0x10701); - WREG32(0x657C, 0x886089E0); - WREG32(0x6578, 0x10702); - WREG32(0x657C, 0x8018BEB0); - WREG32(0x6578, 0x10800); - WREG32(0x657C, 0xBED8BFE8); - WREG32(0x6578, 0x10801); - WREG32(0x657C, 0x89408940); - WREG32(0x6578, 0x10802); - WREG32(0x657C, 0xBFE8BED8); - WREG32(0x6578, 0x20000); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20001); - WREG32(0x657C, 0x90008000); - WREG32(0x6578, 0x20002); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20003); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20100); - WREG32(0x657C, 0x80108000); - WREG32(0x6578, 0x20101); - WREG32(0x657C, 0x8FE0BF70); - WREG32(0x6578, 0x20102); - WREG32(0x657C, 0xBFE880C0); - WREG32(0x6578, 0x20103); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20200); - WREG32(0x657C, 0x8018BFF8); - WREG32(0x6578, 0x20201); - WREG32(0x657C, 0x8F80BF08); - WREG32(0x6578, 0x20202); - WREG32(0x657C, 0xBFD081A0); - WREG32(0x6578, 0x20203); - WREG32(0x657C, 0xBFF88000); - WREG32(0x6578, 0x20300); - WREG32(0x657C, 0x80188000); - WREG32(0x6578, 0x20301); - WREG32(0x657C, 0x8EE0BEC0); - WREG32(0x6578, 0x20302); - WREG32(0x657C, 0xBFB082A0); - WREG32(0x6578, 0x20303); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20400); - WREG32(0x657C, 0x80188000); - WREG32(0x6578, 0x20401); - WREG32(0x657C, 0x8E00BEA0); - WREG32(0x6578, 0x20402); - WREG32(0x657C, 0xBF8883C0); - WREG32(0x6578, 0x20403); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x20500); - WREG32(0x657C, 0x80188000); - WREG32(0x6578, 0x20501); - WREG32(0x657C, 0x8D00BE90); - WREG32(0x6578, 0x20502); - WREG32(0x657C, 0xBF588500); - WREG32(0x6578, 0x20503); - WREG32(0x657C, 0x80008008); - WREG32(0x6578, 0x20600); - WREG32(0x657C, 0x80188000); - WREG32(0x6578, 0x20601); - WREG32(0x657C, 0x8BC0BE98); - WREG32(0x6578, 0x20602); - WREG32(0x657C, 0xBF308660); - WREG32(0x6578, 0x20603); - WREG32(0x657C, 0x80008008); - WREG32(0x6578, 0x20700); - WREG32(0x657C, 0x80108000); - WREG32(0x6578, 0x20701); - WREG32(0x657C, 0x8A80BEB0); - WREG32(0x6578, 0x20702); - WREG32(0x657C, 0xBF0087C0); - WREG32(0x6578, 0x20703); - WREG32(0x657C, 0x80008008); - WREG32(0x6578, 0x20800); - WREG32(0x657C, 0x80108000); - WREG32(0x6578, 0x20801); - WREG32(0x657C, 0x8920BED0); - WREG32(0x6578, 0x20802); - WREG32(0x657C, 0xBED08920); - WREG32(0x6578, 0x20803); - WREG32(0x657C, 0x80008010); - WREG32(0x6578, 0x30000); - WREG32(0x657C, 0x90008000); - WREG32(0x6578, 0x30001); - WREG32(0x657C, 0x80008000); - WREG32(0x6578, 0x30100); - WREG32(0x657C, 0x8FE0BF90); - WREG32(0x6578, 0x30101); - WREG32(0x657C, 0xBFF880A0); - WREG32(0x6578, 0x30200); - WREG32(0x657C, 0x8F60BF40); - WREG32(0x6578, 0x30201); - WREG32(0x657C, 0xBFE88180); - WREG32(0x6578, 0x30300); - WREG32(0x657C, 0x8EC0BF00); - WREG32(0x6578, 0x30301); - WREG32(0x657C, 0xBFC88280); - WREG32(0x6578, 0x30400); - WREG32(0x657C, 0x8DE0BEE0); - WREG32(0x6578, 0x30401); - WREG32(0x657C, 0xBFA083A0); - WREG32(0x6578, 0x30500); - WREG32(0x657C, 0x8CE0BED0); - WREG32(0x6578, 0x30501); - WREG32(0x657C, 0xBF7884E0); - WREG32(0x6578, 0x30600); - WREG32(0x657C, 0x8BA0BED8); - WREG32(0x6578, 0x30601); - WREG32(0x657C, 0xBF508640); - WREG32(0x6578, 0x30700); - WREG32(0x657C, 0x8A60BEE8); - WREG32(0x6578, 0x30701); - WREG32(0x657C, 0xBF2087A0); - WREG32(0x6578, 0x30800); - WREG32(0x657C, 0x8900BF00); - WREG32(0x6578, 0x30801); - WREG32(0x657C, 0xBF008900); -} - static void atombios_yuv_setup(struct drm_encoder *encoder, bool enable) { @@ -1074,129 +843,6 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable) } static void -atombios_overscan_setup(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - SET_CRTC_OVERSCAN_PS_ALLOCATION args; - int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); - - memset(&args, 0, sizeof(args)); - - args.usOverscanRight = 0; - args.usOverscanLeft = 0; - args.usOverscanBottom = 0; - args.usOverscanTop = 0; - args.ucCRTC = radeon_crtc->crtc_id; - - if (radeon_encoder->flags & RADEON_USE_RMX) { - if (radeon_encoder->rmx_type == RMX_FULL) { - args.usOverscanRight = 0; - args.usOverscanLeft = 0; - args.usOverscanBottom = 0; - args.usOverscanTop = 0; - } else if (radeon_encoder->rmx_type == RMX_CENTER) { - args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; - args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; - args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; - args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; - } else if (radeon_encoder->rmx_type == RMX_ASPECT) { - int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; - int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; - - if (a1 > a2) { - args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; - args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; - } else if (a2 > a1) { - args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; - args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; - } - } - } - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - -} - -static void -atombios_scaler_setup(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - ENABLE_SCALER_PS_ALLOCATION args; - int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); - /* fixme - fill in enc_priv for atom dac */ - enum radeon_tv_std tv_std = TV_STD_NTSC; - - if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) - return; - - memset(&args, 0, sizeof(args)); - - args.ucScaler = radeon_crtc->crtc_id; - - if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { - switch (tv_std) { - case TV_STD_NTSC: - default: - args.ucTVStandard = ATOM_TV_NTSC; - break; - case TV_STD_PAL: - args.ucTVStandard = ATOM_TV_PAL; - break; - case TV_STD_PAL_M: - args.ucTVStandard = ATOM_TV_PALM; - break; - case TV_STD_PAL_60: - args.ucTVStandard = ATOM_TV_PAL60; - break; - case TV_STD_NTSC_J: - args.ucTVStandard = ATOM_TV_NTSCJ; - break; - case TV_STD_SCART_PAL: - args.ucTVStandard = ATOM_TV_PAL; /* ??? */ - break; - case TV_STD_SECAM: - args.ucTVStandard = ATOM_TV_SECAM; - break; - case TV_STD_PAL_CN: - args.ucTVStandard = ATOM_TV_PALCN; - break; - } - args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; - } else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) { - args.ucTVStandard = ATOM_TV_CV; - args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; - } else if (radeon_encoder->flags & RADEON_USE_RMX) { - if (radeon_encoder->rmx_type == RMX_FULL) - args.ucEnable = ATOM_SCALER_EXPANSION; - else if (radeon_encoder->rmx_type == RMX_CENTER) - args.ucEnable = ATOM_SCALER_CENTER; - else if (radeon_encoder->rmx_type == RMX_ASPECT) - args.ucEnable = ATOM_SCALER_EXPANSION; - } else { - if (ASIC_IS_AVIVO(rdev)) - args.ucEnable = ATOM_SCALER_DISABLE; - else - args.ucEnable = ATOM_SCALER_CENTER; - } - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - - if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) - && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { - atom_rv515_force_tv_scaler(rdev); - } - -} - -static void radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; @@ -1448,8 +1094,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, radeon_encoder->pixel_clock = adjusted_mode->clock; radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); - atombios_overscan_setup(encoder, mode, adjusted_mode); - atombios_scaler_setup(encoder); atombios_set_encoder_crtc_source(encoder); if (ASIC_IS_AVIVO(rdev)) { @@ -1667,6 +1311,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su radeon_encoder->encoder_id = encoder_id; radeon_encoder->devices = supported_device; + radeon_encoder->rmx_type = RMX_OFF; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index fa86d398945..3206c0ad7b6 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -101,9 +101,10 @@ static int radeonfb_setcolreg(unsigned regno, break; case 24: case 32: - fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | - (green & 0xff00) | - ((blue & 0xff00) >> 8); + fb->pseudo_palette[regno] = + (((red >> 8) & 0xff) << info->var.red.offset) | + (((green >> 8) & 0xff) << info->var.green.offset) | + (((blue >> 8) & 0xff) << info->var.blue.offset); break; } } @@ -154,6 +155,7 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var, var->transp.length = 0; var->transp.offset = 0; break; +#ifdef __LITTLE_ENDIAN case 15: var->red.offset = 10; var->green.offset = 5; @@ -194,6 +196,28 @@ static int radeonfb_check_var(struct fb_var_screeninfo *var, var->transp.length = 8; var->transp.offset = 24; break; +#else + case 24: + var->red.offset = 8; + var->green.offset = 16; + var->blue.offset = 24; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 0; + var->transp.offset = 0; + break; + case 32: + var->red.offset = 8; + var->green.offset = 16; + var->blue.offset = 24; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 8; + var->transp.offset = 0; + break; +#endif default: return -EINVAL; } @@ -447,10 +471,10 @@ static struct notifier_block paniced = { .notifier_call = radeonfb_panic, }; -static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp) +static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) { int aligned = width; - int align_large = (ASIC_IS_AVIVO(rdev)); + int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; int pitch_mask = 0; switch (bpp / 8) { @@ -478,36 +502,42 @@ int radeonfb_create(struct radeon_device *rdev, { struct fb_info *info; struct radeon_fb_device *rfbdev; - struct drm_framebuffer *fb; + struct drm_framebuffer *fb = NULL; struct radeon_framebuffer *rfb; struct drm_mode_fb_cmd mode_cmd; struct drm_gem_object *gobj = NULL; struct radeon_object *robj = NULL; struct device *device = &rdev->pdev->dev; int size, aligned_size, ret; + u64 fb_gpuaddr; void *fbptr = NULL; + unsigned long tmp; + bool fb_tiled = false; /* useful for testing */ mode_cmd.width = surface_width; mode_cmd.height = surface_height; mode_cmd.bpp = 32; /* need to align pitch with crtc limits */ - mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8); + mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); mode_cmd.depth = 24; size = mode_cmd.pitch * mode_cmd.height; aligned_size = ALIGN(size, PAGE_SIZE); ret = radeon_gem_object_create(rdev, aligned_size, 0, - RADEON_GEM_DOMAIN_VRAM, - false, ttm_bo_type_kernel, - false, &gobj); + RADEON_GEM_DOMAIN_VRAM, + false, ttm_bo_type_kernel, + false, &gobj); if (ret) { - printk(KERN_ERR "failed to allocate framebuffer\n"); + printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", + surface_width, surface_height); ret = -ENOMEM; goto out; } robj = gobj->driver_private; + if (fb_tiled) + radeon_object_set_tiling_flags(robj, RADEON_TILING_MACRO|RADEON_TILING_SURFACE, mode_cmd.pitch); mutex_lock(&rdev->ddev->struct_mutex); fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); if (fb == NULL) { @@ -515,12 +545,19 @@ int radeonfb_create(struct radeon_device *rdev, ret = -ENOMEM; goto out_unref; } + ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); + if (ret) { + printk(KERN_ERR "failed to pin framebuffer\n"); + ret = -ENOMEM; + goto out_unref; + } list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); rfb = to_radeon_framebuffer(fb); *rfb_p = rfb; rdev->fbdev_rfb = rfb; + rdev->fbdev_robj = robj; info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); if (info == NULL) { @@ -529,6 +566,9 @@ int radeonfb_create(struct radeon_device *rdev, } rfbdev = info->par; + if (fb_tiled) + radeon_object_check_tiling(robj, 0, 0); + ret = radeon_object_kmap(robj, &fbptr); if (ret) { goto out_unref; @@ -541,13 +581,13 @@ int radeonfb_create(struct radeon_device *rdev, info->fix.xpanstep = 1; /* doing it in hw */ info->fix.ypanstep = 1; /* doing it in hw */ info->fix.ywrapstep = 0; - info->fix.accel = FB_ACCEL_I830; + info->fix.accel = FB_ACCEL_NONE; info->fix.type_aux = 0; info->flags = FBINFO_DEFAULT; info->fbops = &radeonfb_ops; info->fix.line_length = fb->pitch; - info->screen_base = fbptr; - info->fix.smem_start = (unsigned long)fbptr; + tmp = fb_gpuaddr - rdev->mc.vram_location; + info->fix.smem_start = rdev->mc.aper_base + tmp; info->fix.smem_len = size; info->screen_base = fbptr; info->screen_size = size; @@ -562,8 +602,13 @@ int radeonfb_create(struct radeon_device *rdev, info->var.width = -1; info->var.xres = fb_width; info->var.yres = fb_height; - info->fix.mmio_start = pci_resource_start(rdev->pdev, 2); - info->fix.mmio_len = pci_resource_len(rdev->pdev, 2); + + /* setup aperture base/size for vesafb takeover */ + info->aperture_base = rdev->ddev->mode_config.fb_base; + info->aperture_size = rdev->mc.real_vram_size; + + info->fix.mmio_start = 0; + info->fix.mmio_len = 0; info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; @@ -590,6 +635,7 @@ int radeonfb_create(struct radeon_device *rdev, info->var.transp.offset = 0; info->var.transp.length = 0; break; +#ifdef __LITTLE_ENDIAN case 15: info->var.red.offset = 10; info->var.green.offset = 5; @@ -629,7 +675,29 @@ int radeonfb_create(struct radeon_device *rdev, info->var.transp.offset = 24; info->var.transp.length = 8; break; +#else + case 24: + info->var.red.offset = 8; + info->var.green.offset = 16; + info->var.blue.offset = 24; + info->var.red.length = 8; + info->var.green.length = 8; + info->var.blue.length = 8; + info->var.transp.offset = 0; + info->var.transp.length = 0; + break; + case 32: + info->var.red.offset = 8; + info->var.green.offset = 16; + info->var.blue.offset = 24; + info->var.red.length = 8; + info->var.green.length = 8; + info->var.blue.length = 8; + info->var.transp.offset = 0; + info->var.transp.length = 8; + break; default: +#endif break; } @@ -644,7 +712,7 @@ out_unref: if (robj) { radeon_object_kunmap(robj); } - if (ret) { + if (fb && ret) { list_del(&fb->filp_head); drm_gem_object_unreference(gobj); drm_framebuffer_cleanup(fb); @@ -813,6 +881,7 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) robj = rfb->obj->driver_private; unregister_framebuffer(info); radeon_object_kunmap(robj); + radeon_object_unpin(robj); framebuffer_release(info); } diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 96afbf5ae2a..b4e48dd2e85 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -195,7 +195,7 @@ retry: r = wait_event_interruptible_timeout(rdev->fence_drv.queue, radeon_fence_signaled(fence), timeout); if (unlikely(r == -ERESTARTSYS)) { - return -ERESTART; + return -EBUSY; } } else { r = wait_event_timeout(rdev->fence_drv.queue, diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index d343a15316e..2977539880f 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -177,7 +177,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, return -ENOMEM; } rdev->gart.pages[p] = pagelist[i]; - page_base = (uint32_t)rdev->gart.pages_addr[p]; + page_base = rdev->gart.pages_addr[p]; for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { radeon_gart_set_page(rdev, t, page_base); page_base += 4096; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index eb516034235..cded5180c75 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -157,9 +157,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_info *args = data; - args->vram_size = rdev->mc.vram_size; + args->vram_size = rdev->mc.real_vram_size; /* FIXME: report somethings that makes sense */ - args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024); + args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); args->gart_size = rdev->mc.gtt_size; return 0; } @@ -285,3 +285,44 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); return r; } + +int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_radeon_gem_set_tiling *args = data; + struct drm_gem_object *gobj; + struct radeon_object *robj; + int r = 0; + + DRM_DEBUG("%d \n", args->handle); + gobj = drm_gem_object_lookup(dev, filp, args->handle); + if (gobj == NULL) + return -EINVAL; + robj = gobj->driver_private; + radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gobj); + mutex_unlock(&dev->struct_mutex); + return r; +} + +int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_radeon_gem_get_tiling *args = data; + struct drm_gem_object *gobj; + struct radeon_object *robj; + int r = 0; + + DRM_DEBUG("\n"); + gobj = drm_gem_object_lookup(dev, filp, args->handle); + if (gobj == NULL) + return -EINVAL; + robj = gobj->driver_private; + radeon_object_get_tiling_flags(robj, &args->tiling_flags, + &args->pitch); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gobj); + mutex_unlock(&dev->struct_mutex); + return r; +} diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4612a7c146d..3357110e30c 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -58,6 +58,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) if (r) { DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n"); radeon_device_fini(rdev); + kfree(rdev); + dev->dev_private = NULL; return r; } return 0; @@ -291,5 +293,7 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), }; int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 8086ecf7f03..7d06dc98a42 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -29,6 +29,171 @@ #include "radeon_fixed.h" #include "radeon.h" +static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + int xres = mode->hdisplay; + int yres = mode->vdisplay; + bool hscale = true, vscale = true; + int hsync_wid; + int vsync_wid; + int hsync_start; + int blank_width; + u32 scale, inc, crtc_more_cntl; + u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active; + u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp; + u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp; + struct radeon_native_mode *native_mode = &radeon_crtc->native_mode; + + fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & + (RADEON_VERT_STRETCH_RESERVED | + RADEON_VERT_AUTO_RATIO_INC); + fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & + (RADEON_HORZ_FP_LOOP_STRETCH | + RADEON_HORZ_AUTO_RATIO_INC); + + crtc_more_cntl = 0; + if ((rdev->family == CHIP_RS100) || + (rdev->family == CHIP_RS200)) { + /* This is to workaround the asic bug for RMX, some versions + of BIOS dosen't have this register initialized correctly. */ + crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; + } + + + fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) + | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); + + hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; + if (!hsync_wid) + hsync_wid = 1; + hsync_start = mode->crtc_hsync_start - 8; + + fp_h_sync_strt_wid = ((hsync_start & 0x1fff) + | ((hsync_wid & 0x3f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NHSYNC) + ? RADEON_CRTC_H_SYNC_POL + : 0)); + + fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) + | ((mode->crtc_vdisplay - 1) << 16)); + + vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; + if (!vsync_wid) + vsync_wid = 1; + + fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) + | ((vsync_wid & 0x1f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NVSYNC) + ? RADEON_CRTC_V_SYNC_POL + : 0)); + + fp_horz_vert_active = 0; + + if (native_mode->panel_xres == 0 || + native_mode->panel_yres == 0) { + hscale = false; + vscale = false; + } else { + if (xres > native_mode->panel_xres) + xres = native_mode->panel_xres; + if (yres > native_mode->panel_yres) + yres = native_mode->panel_yres; + + if (xres == native_mode->panel_xres) + hscale = false; + if (yres == native_mode->panel_yres) + vscale = false; + } + + switch (radeon_crtc->rmx_type) { + case RMX_FULL: + case RMX_ASPECT: + if (!hscale) + fp_horz_stretch |= ((xres/8-1) << 16); + else { + inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; + scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) + / native_mode->panel_xres + 1; + fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | + RADEON_HORZ_STRETCH_BLEND | + RADEON_HORZ_STRETCH_ENABLE | + ((native_mode->panel_xres/8-1) << 16)); + } + + if (!vscale) + fp_vert_stretch |= ((yres-1) << 12); + else { + inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; + scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) + / native_mode->panel_yres + 1; + fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | + RADEON_VERT_STRETCH_ENABLE | + RADEON_VERT_STRETCH_BLEND | + ((native_mode->panel_yres-1) << 12)); + } + break; + case RMX_CENTER: + fp_horz_stretch |= ((xres/8-1) << 16); + fp_vert_stretch |= ((yres-1) << 12); + + crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | + RADEON_CRTC_AUTO_VERT_CENTER_EN); + + blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; + if (blank_width > 110) + blank_width = 110; + + fp_crtc_h_total_disp = (((blank_width) & 0x3ff) + | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); + + hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; + if (!hsync_wid) + hsync_wid = 1; + + fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) + | ((hsync_wid & 0x3f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NHSYNC) + ? RADEON_CRTC_H_SYNC_POL + : 0)); + + fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) + | ((mode->crtc_vdisplay - 1) << 16)); + + vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; + if (!vsync_wid) + vsync_wid = 1; + + fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) + | ((vsync_wid & 0x1f) << 16) + | ((mode->flags & DRM_MODE_FLAG_NVSYNC) + ? RADEON_CRTC_V_SYNC_POL + : 0))); + + fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | + (((native_mode->panel_xres / 8) & 0x1ff) << 16)); + break; + case RMX_OFF: + default: + fp_horz_stretch |= ((xres/8-1) << 16); + fp_vert_stretch |= ((yres-1) << 12); + break; + } + + WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); + WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); + WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); + WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); + WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); + WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); + WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); + WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); +} + void radeon_restore_common_regs(struct drm_device *dev) { /* don't need this yet */ @@ -235,6 +400,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, uint64_t base; uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; uint32_t crtc_pitch, pitch_pixels; + uint32_t tiling_flags; DRM_DEBUG("\n"); @@ -244,7 +410,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { return -EINVAL; } - crtc_offset = (u32)base; + /* if scanout was in GTT this really wouldn't work */ + /* crtc offset is from display base addr not FB location */ + radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; + + base -= radeon_crtc->legacy_display_base_addr; + crtc_offset_cntl = 0; pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); @@ -253,8 +424,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, (crtc->fb->bits_per_pixel * 8)); crtc_pitch |= crtc_pitch << 16; - /* TODO tiling */ - if (0) { + radeon_object_get_tiling_flags(obj->driver_private, + &tiling_flags, NULL); + if (tiling_flags & RADEON_TILING_MICRO) + DRM_ERROR("trying to scanout microtiled buffer\n"); + + if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | R300_CRTC_MICRO_TILE_BUFFER_DIS | @@ -270,15 +445,13 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; } - - /* TODO more tiling */ - if (0) { + if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) { crtc_tile_x0_y0 = x | (y << 16); base &= ~0x7ff; } else { int byteshift = crtc->fb->bits_per_pixel >> 4; - int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; + int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); crtc_offset_cntl |= (y % 16); } @@ -303,11 +476,9 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, base &= ~7; - /* update sarea TODO */ - crtc_offset = (u32)base; - WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location); + WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr); if (ASIC_IS_R300(rdev)) { if (radeon_crtc->crtc_id) @@ -751,6 +922,8 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) + return false; return true; } @@ -759,16 +932,25 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { - - DRM_DEBUG("\n"); + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; /* TODO TV */ - radeon_crtc_set_base(crtc, x, y, old_fb); radeon_set_crtc_timing(crtc, adjusted_mode); radeon_set_pll(crtc, adjusted_mode); - radeon_init_disp_bandwidth(crtc->dev); - + radeon_bandwidth_update(rdev); + if (radeon_crtc->crtc_id == 0) { + radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); + } else { + if (radeon_crtc->rmx_type != RMX_OFF) { + /* FIXME: only first crtc has rmx what should we + * do ? + */ + DRM_ERROR("Mode need scaling but only first crtc can do that.\n"); + } + } return 0; } @@ -799,478 +981,3 @@ void radeon_legacy_init_crtc(struct drm_device *dev, radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); } - -void radeon_init_disp_bw_legacy(struct drm_device *dev, - struct drm_display_mode *mode1, - uint32_t pixel_bytes1, - struct drm_display_mode *mode2, - uint32_t pixel_bytes2) -{ - struct radeon_device *rdev = dev->dev_private; - fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; - fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; - fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; - uint32_t temp, data, mem_trcd, mem_trp, mem_tras; - fixed20_12 memtcas_ff[8] = { - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(0), - fixed_init_half(1), - fixed_init_half(2), - fixed_init(0), - }; - fixed20_12 memtcas_rs480_ff[8] = { - fixed_init(0), - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(0), - fixed_init_half(1), - fixed_init_half(2), - fixed_init_half(3), - }; - fixed20_12 memtcas2_ff[8] = { - fixed_init(0), - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(4), - fixed_init(5), - fixed_init(6), - fixed_init(7), - }; - fixed20_12 memtrbs[8] = { - fixed_init(1), - fixed_init_half(1), - fixed_init(2), - fixed_init_half(2), - fixed_init(3), - fixed_init_half(3), - fixed_init(4), - fixed_init_half(4) - }; - fixed20_12 memtrbs_r4xx[8] = { - fixed_init(4), - fixed_init(5), - fixed_init(6), - fixed_init(7), - fixed_init(8), - fixed_init(9), - fixed_init(10), - fixed_init(11) - }; - fixed20_12 min_mem_eff; - fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; - fixed20_12 cur_latency_mclk, cur_latency_sclk; - fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, - disp_drain_rate2, read_return_rate; - fixed20_12 time_disp1_drop_priority; - int c; - int cur_size = 16; /* in octawords */ - int critical_point = 0, critical_point2; -/* uint32_t read_return_rate, time_disp1_drop_priority; */ - int stop_req, max_stop_req; - - min_mem_eff.full = rfixed_const_8(0); - /* get modes */ - if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { - uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); - mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); - mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); - /* check crtc enables */ - if (mode2) - mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); - if (mode1) - mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); - WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); - } - - /* - * determine is there is enough bw for current mode - */ - mclk_ff.full = rfixed_const(rdev->clock.default_mclk); - temp_ff.full = rfixed_const(100); - mclk_ff.full = rfixed_div(mclk_ff, temp_ff); - sclk_ff.full = rfixed_const(rdev->clock.default_sclk); - sclk_ff.full = rfixed_div(sclk_ff, temp_ff); - - temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); - temp_ff.full = rfixed_const(temp); - mem_bw.full = rfixed_mul(mclk_ff, temp_ff); - - pix_clk.full = 0; - pix_clk2.full = 0; - peak_disp_bw.full = 0; - if (mode1) { - temp_ff.full = rfixed_const(1000); - pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ - pix_clk.full = rfixed_div(pix_clk, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes1); - peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); - } - if (mode2) { - temp_ff.full = rfixed_const(1000); - pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ - pix_clk2.full = rfixed_div(pix_clk2, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes2); - peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); - } - - mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); - if (peak_disp_bw.full >= mem_bw.full) { - DRM_ERROR("You may not have enough display bandwidth for current mode\n" - "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); - } - - /* Get values from the EXT_MEM_CNTL register...converting its contents. */ - temp = RREG32(RADEON_MEM_TIMING_CNTL); - if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ - mem_trcd = ((temp >> 2) & 0x3) + 1; - mem_trp = ((temp & 0x3)) + 1; - mem_tras = ((temp & 0x70) >> 4) + 1; - } else if (rdev->family == CHIP_R300 || - rdev->family == CHIP_R350) { /* r300, r350 */ - mem_trcd = (temp & 0x7) + 1; - mem_trp = ((temp >> 8) & 0x7) + 1; - mem_tras = ((temp >> 11) & 0xf) + 4; - } else if (rdev->family == CHIP_RV350 || - rdev->family <= CHIP_RV380) { - /* rv3x0 */ - mem_trcd = (temp & 0x7) + 3; - mem_trp = ((temp >> 8) & 0x7) + 3; - mem_tras = ((temp >> 11) & 0xf) + 6; - } else if (rdev->family == CHIP_R420 || - rdev->family == CHIP_R423 || - rdev->family == CHIP_RV410) { - /* r4xx */ - mem_trcd = (temp & 0xf) + 3; - if (mem_trcd > 15) - mem_trcd = 15; - mem_trp = ((temp >> 8) & 0xf) + 3; - if (mem_trp > 15) - mem_trp = 15; - mem_tras = ((temp >> 12) & 0x1f) + 6; - if (mem_tras > 31) - mem_tras = 31; - } else { /* RV200, R200 */ - mem_trcd = (temp & 0x7) + 1; - mem_trp = ((temp >> 8) & 0x7) + 1; - mem_tras = ((temp >> 12) & 0xf) + 4; - } - /* convert to FF */ - trcd_ff.full = rfixed_const(mem_trcd); - trp_ff.full = rfixed_const(mem_trp); - tras_ff.full = rfixed_const(mem_tras); - - /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ - temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); - data = (temp & (7 << 20)) >> 20; - if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { - if (rdev->family == CHIP_RS480) /* don't think rs400 */ - tcas_ff = memtcas_rs480_ff[data]; - else - tcas_ff = memtcas_ff[data]; - } else - tcas_ff = memtcas2_ff[data]; - - if (rdev->family == CHIP_RS400 || - rdev->family == CHIP_RS480) { - /* extra cas latency stored in bits 23-25 0-4 clocks */ - data = (temp >> 23) & 0x7; - if (data < 5) - tcas_ff.full += rfixed_const(data); - } - - if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { - /* on the R300, Tcas is included in Trbs. - */ - temp = RREG32(RADEON_MEM_CNTL); - data = (R300_MEM_NUM_CHANNELS_MASK & temp); - if (data == 1) { - if (R300_MEM_USE_CD_CH_ONLY & temp) { - temp = RREG32(R300_MC_IND_INDEX); - temp &= ~R300_MC_IND_ADDR_MASK; - temp |= R300_MC_READ_CNTL_CD_mcind; - WREG32(R300_MC_IND_INDEX, temp); - temp = RREG32(R300_MC_IND_DATA); - data = (R300_MEM_RBS_POSITION_C_MASK & temp); - } else { - temp = RREG32(R300_MC_READ_CNTL_AB); - data = (R300_MEM_RBS_POSITION_A_MASK & temp); - } - } else { - temp = RREG32(R300_MC_READ_CNTL_AB); - data = (R300_MEM_RBS_POSITION_A_MASK & temp); - } - if (rdev->family == CHIP_RV410 || - rdev->family == CHIP_R420 || - rdev->family == CHIP_R423) - trbs_ff = memtrbs_r4xx[data]; - else - trbs_ff = memtrbs[data]; - tcas_ff.full += trbs_ff.full; - } - - sclk_eff_ff.full = sclk_ff.full; - - if (rdev->flags & RADEON_IS_AGP) { - fixed20_12 agpmode_ff; - agpmode_ff.full = rfixed_const(radeon_agpmode); - temp_ff.full = rfixed_const_666(16); - sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); - } - /* TODO PCIE lanes may affect this - agpmode == 16?? */ - - if (ASIC_IS_R300(rdev)) { - sclk_delay_ff.full = rfixed_const(250); - } else { - if ((rdev->family == CHIP_RV100) || - rdev->flags & RADEON_IS_IGP) { - if (rdev->mc.vram_is_ddr) - sclk_delay_ff.full = rfixed_const(41); - else - sclk_delay_ff.full = rfixed_const(33); - } else { - if (rdev->mc.vram_width == 128) - sclk_delay_ff.full = rfixed_const(57); - else - sclk_delay_ff.full = rfixed_const(41); - } - } - - mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); - - if (rdev->mc.vram_is_ddr) { - if (rdev->mc.vram_width == 32) { - k1.full = rfixed_const(40); - c = 3; - } else { - k1.full = rfixed_const(20); - c = 1; - } - } else { - k1.full = rfixed_const(40); - c = 3; - } - - temp_ff.full = rfixed_const(2); - mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); - temp_ff.full = rfixed_const(c); - mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); - temp_ff.full = rfixed_const(4); - mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); - mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); - mc_latency_mclk.full += k1.full; - - mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); - mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); - - /* - HW cursor time assuming worst case of full size colour cursor. - */ - temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); - temp_ff.full += trcd_ff.full; - if (temp_ff.full < tras_ff.full) - temp_ff.full = tras_ff.full; - cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); - - temp_ff.full = rfixed_const(cur_size); - cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); - /* - Find the total latency for the display data. - */ - disp_latency_overhead.full = rfixed_const(80); - disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); - mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; - mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; - - if (mc_latency_mclk.full > mc_latency_sclk.full) - disp_latency.full = mc_latency_mclk.full; - else - disp_latency.full = mc_latency_sclk.full; - - /* setup Max GRPH_STOP_REQ default value */ - if (ASIC_IS_RV100(rdev)) - max_stop_req = 0x5c; - else - max_stop_req = 0x7c; - - if (mode1) { - /* CRTC1 - Set GRPH_BUFFER_CNTL register using h/w defined optimal values. - GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] - */ - stop_req = mode1->hdisplay * pixel_bytes1 / 16; - - if (stop_req > max_stop_req) - stop_req = max_stop_req; - - /* - Find the drain rate of the display buffer. - */ - temp_ff.full = rfixed_const((16/pixel_bytes1)); - disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); - - /* - Find the critical point of the display buffer. - */ - crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); - crit_point_ff.full += rfixed_const_half(0); - - critical_point = rfixed_trunc(crit_point_ff); - - if (rdev->disp_priority == 2) { - critical_point = 0; - } - - /* - The critical point should never be above max_stop_req-4. Setting - GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. - */ - if (max_stop_req - critical_point < 4) - critical_point = 0; - - if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { - /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ - critical_point = 0x10; - } - - temp = RREG32(RADEON_GRPH_BUFFER_CNTL); - temp &= ~(RADEON_GRPH_STOP_REQ_MASK); - temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); - temp &= ~(RADEON_GRPH_START_REQ_MASK); - if ((rdev->family == CHIP_R350) && - (stop_req > 0x15)) { - stop_req -= 0x10; - } - temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); - temp |= RADEON_GRPH_BUFFER_SIZE; - temp &= ~(RADEON_GRPH_CRITICAL_CNTL | - RADEON_GRPH_CRITICAL_AT_SOF | - RADEON_GRPH_STOP_CNTL); - /* - Write the result into the register. - */ - WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | - (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); - -#if 0 - if ((rdev->family == CHIP_RS400) || - (rdev->family == CHIP_RS480)) { - /* attempt to program RS400 disp regs correctly ??? */ - temp = RREG32(RS400_DISP1_REG_CNTL); - temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | - RS400_DISP1_STOP_REQ_LEVEL_MASK); - WREG32(RS400_DISP1_REQ_CNTL1, (temp | - (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | - (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); - temp = RREG32(RS400_DMIF_MEM_CNTL1); - temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | - RS400_DISP1_CRITICAL_POINT_STOP_MASK); - WREG32(RS400_DMIF_MEM_CNTL1, (temp | - (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | - (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); - } -#endif - - DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", - /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ - (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); - } - - if (mode2) { - u32 grph2_cntl; - stop_req = mode2->hdisplay * pixel_bytes2 / 16; - - if (stop_req > max_stop_req) - stop_req = max_stop_req; - - /* - Find the drain rate of the display buffer. - */ - temp_ff.full = rfixed_const((16/pixel_bytes2)); - disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); - - grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); - grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); - grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); - grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); - if ((rdev->family == CHIP_R350) && - (stop_req > 0x15)) { - stop_req -= 0x10; - } - grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); - grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; - grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | - RADEON_GRPH_CRITICAL_AT_SOF | - RADEON_GRPH_STOP_CNTL); - - if ((rdev->family == CHIP_RS100) || - (rdev->family == CHIP_RS200)) - critical_point2 = 0; - else { - temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; - temp_ff.full = rfixed_const(temp); - temp_ff.full = rfixed_mul(mclk_ff, temp_ff); - if (sclk_ff.full < temp_ff.full) - temp_ff.full = sclk_ff.full; - - read_return_rate.full = temp_ff.full; - - if (mode1) { - temp_ff.full = read_return_rate.full - disp_drain_rate.full; - time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); - } else { - time_disp1_drop_priority.full = 0; - } - crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; - crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); - crit_point_ff.full += rfixed_const_half(0); - - critical_point2 = rfixed_trunc(crit_point_ff); - - if (rdev->disp_priority == 2) { - critical_point2 = 0; - } - - if (max_stop_req - critical_point2 < 4) - critical_point2 = 0; - - } - - if (critical_point2 == 0 && rdev->family == CHIP_R300) { - /* some R300 cards have problem with this set to 0 */ - critical_point2 = 0x10; - } - - WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | - (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); - - if ((rdev->family == CHIP_RS400) || - (rdev->family == CHIP_RS480)) { -#if 0 - /* attempt to program RS400 disp2 regs correctly ??? */ - temp = RREG32(RS400_DISP2_REQ_CNTL1); - temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | - RS400_DISP2_STOP_REQ_LEVEL_MASK); - WREG32(RS400_DISP2_REQ_CNTL1, (temp | - (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | - (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); - temp = RREG32(RS400_DISP2_REQ_CNTL2); - temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | - RS400_DISP2_CRITICAL_POINT_STOP_MASK); - WREG32(RS400_DISP2_REQ_CNTL2, (temp | - (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | - (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); -#endif - WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); - WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); - WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); - WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); - } - - DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", - (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); - } -} diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 2c2f42de1d4..34d0f58eb94 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -30,170 +30,6 @@ #include "atom.h" -static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - int xres = mode->hdisplay; - int yres = mode->vdisplay; - bool hscale = true, vscale = true; - int hsync_wid; - int vsync_wid; - int hsync_start; - uint32_t scale, inc; - uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active; - uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp; - struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; - - DRM_DEBUG("\n"); - - fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & - (RADEON_VERT_STRETCH_RESERVED | - RADEON_VERT_AUTO_RATIO_INC); - fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & - (RADEON_HORZ_FP_LOOP_STRETCH | - RADEON_HORZ_AUTO_RATIO_INC); - - crtc_more_cntl = 0; - if ((rdev->family == CHIP_RS100) || - (rdev->family == CHIP_RS200)) { - /* This is to workaround the asic bug for RMX, some versions - of BIOS dosen't have this register initialized correctly. */ - crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; - } - - - fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) - | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); - - hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; - if (!hsync_wid) - hsync_wid = 1; - hsync_start = mode->crtc_hsync_start - 8; - - fp_h_sync_strt_wid = ((hsync_start & 0x1fff) - | ((hsync_wid & 0x3f) << 16) - | ((mode->flags & DRM_MODE_FLAG_NHSYNC) - ? RADEON_CRTC_H_SYNC_POL - : 0)); - - fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) - | ((mode->crtc_vdisplay - 1) << 16)); - - vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; - if (!vsync_wid) - vsync_wid = 1; - - fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) - | ((vsync_wid & 0x1f) << 16) - | ((mode->flags & DRM_MODE_FLAG_NVSYNC) - ? RADEON_CRTC_V_SYNC_POL - : 0)); - - fp_horz_vert_active = 0; - - if (native_mode->panel_xres == 0 || - native_mode->panel_yres == 0) { - hscale = false; - vscale = false; - } else { - if (xres > native_mode->panel_xres) - xres = native_mode->panel_xres; - if (yres > native_mode->panel_yres) - yres = native_mode->panel_yres; - - if (xres == native_mode->panel_xres) - hscale = false; - if (yres == native_mode->panel_yres) - vscale = false; - } - - if (radeon_encoder->flags & RADEON_USE_RMX) { - if (radeon_encoder->rmx_type != RMX_CENTER) { - if (!hscale) - fp_horz_stretch |= ((xres/8-1) << 16); - else { - inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; - scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) - / native_mode->panel_xres + 1; - fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | - RADEON_HORZ_STRETCH_BLEND | - RADEON_HORZ_STRETCH_ENABLE | - ((native_mode->panel_xres/8-1) << 16)); - } - - if (!vscale) - fp_vert_stretch |= ((yres-1) << 12); - else { - inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; - scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) - / native_mode->panel_yres + 1; - fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | - RADEON_VERT_STRETCH_ENABLE | - RADEON_VERT_STRETCH_BLEND | - ((native_mode->panel_yres-1) << 12)); - } - } else if (radeon_encoder->rmx_type == RMX_CENTER) { - int blank_width; - - fp_horz_stretch |= ((xres/8-1) << 16); - fp_vert_stretch |= ((yres-1) << 12); - - crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | - RADEON_CRTC_AUTO_VERT_CENTER_EN); - - blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; - if (blank_width > 110) - blank_width = 110; - - fp_crtc_h_total_disp = (((blank_width) & 0x3ff) - | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); - - hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; - if (!hsync_wid) - hsync_wid = 1; - - fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) - | ((hsync_wid & 0x3f) << 16) - | ((mode->flags & DRM_MODE_FLAG_NHSYNC) - ? RADEON_CRTC_H_SYNC_POL - : 0)); - - fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) - | ((mode->crtc_vdisplay - 1) << 16)); - - vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; - if (!vsync_wid) - vsync_wid = 1; - - fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) - | ((vsync_wid & 0x1f) << 16) - | ((mode->flags & DRM_MODE_FLAG_NVSYNC) - ? RADEON_CRTC_V_SYNC_POL - : 0))); - - fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | - (((native_mode->panel_xres / 8) & 0x1ff) << 16)); - } - } else { - fp_horz_stretch |= ((xres/8-1) << 16); - fp_vert_stretch |= ((yres-1) << 12); - } - - WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); - WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); - WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); - WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); - WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); - WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); - WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); - WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); - -} - static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; @@ -287,9 +123,6 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, DRM_DEBUG("\n"); - if (radeon_crtc->crtc_id == 0) - radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); - lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; @@ -318,7 +151,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev)) { - if (radeon_encoder->flags & RADEON_USE_RMX) + if (radeon_encoder->rmx_type != RMX_OFF) lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; } else lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; @@ -350,8 +183,6 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, drm_mode_set_crtcinfo(adjusted_mode, 0); - radeon_encoder->flags &= ~RADEON_USE_RMX; - if (radeon_encoder->rmx_type != RMX_OFF) radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); @@ -455,9 +286,6 @@ static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder, DRM_DEBUG("\n"); - if (radeon_crtc->crtc_id == 0) - radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); - if (radeon_crtc->crtc_id == 0) { if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & @@ -653,9 +481,6 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, DRM_DEBUG("\n"); - if (radeon_crtc->crtc_id == 0) - radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); - tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); tmp &= 0xfffff; if (rdev->family == CHIP_RV280) { @@ -711,7 +536,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, if (radeon_crtc->crtc_id == 0) { if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; - if (radeon_encoder->flags & RADEON_USE_RMX) + if (radeon_encoder->rmx_type != RMX_OFF) fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; else fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; @@ -820,9 +645,6 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, DRM_DEBUG("\n"); - if (radeon_crtc->crtc_id == 0) - radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); - if (rdev->is_atom_bios) { radeon_encoder->pixel_clock = adjusted_mode->clock; atombios_external_tmds_setup(encoder, ATOM_ENABLE); @@ -856,7 +678,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, if (radeon_crtc->crtc_id == 0) { if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; - if (radeon_encoder->flags & RADEON_USE_RMX) + if (radeon_encoder->rmx_type != RMX_OFF) fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; else fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; @@ -1014,9 +836,6 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, DRM_DEBUG("\n"); - if (radeon_crtc->crtc_id == 0) - radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); - if (rdev->family != CHIP_R200) { tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); if (rdev->family == CHIP_R420 || @@ -1243,6 +1062,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t radeon_encoder->encoder_id = encoder_id; radeon_encoder->devices = supported_device; + radeon_encoder->rmx_type = RMX_OFF; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 9173b687462..3b09a1f2d8f 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -36,6 +36,9 @@ #include <linux/i2c.h> #include <linux/i2c-id.h> #include <linux/i2c-algo-bit.h> +#include "radeon_fixed.h" + +struct radeon_device; #define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) #define to_radeon_connector(x) container_of(x, struct radeon_connector, base) @@ -124,6 +127,7 @@ struct radeon_tmds_pll { #define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) #define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) +#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) struct radeon_pll { uint16_t reference_freq; @@ -170,6 +174,18 @@ struct radeon_mode_info { struct atom_context *atom_context; enum radeon_connector_table connector_table; bool mode_config_initialized; + struct radeon_crtc *crtcs[2]; +}; + +struct radeon_native_mode { + /* preferred mode */ + uint32_t panel_xres, panel_yres; + uint32_t hoverplus, hsync_width; + uint32_t hblank; + uint32_t voverplus, vsync_width; + uint32_t vblank; + uint32_t dotclock; + uint32_t flags; }; struct radeon_crtc { @@ -185,19 +201,13 @@ struct radeon_crtc { uint64_t cursor_addr; int cursor_width; int cursor_height; -}; - -#define RADEON_USE_RMX 1 - -struct radeon_native_mode { - /* preferred mode */ - uint32_t panel_xres, panel_yres; - uint32_t hoverplus, hsync_width; - uint32_t hblank; - uint32_t voverplus, vsync_width; - uint32_t vblank; - uint32_t dotclock; - uint32_t flags; + uint32_t legacy_display_base_addr; + uint32_t legacy_cursor_offset; + enum radeon_rmx_type rmx_type; + uint32_t devices; + fixed20_12 vsc; + fixed20_12 hsc; + struct radeon_native_mode native_mode; }; struct radeon_encoder_primary_dac { @@ -383,16 +393,9 @@ void radeon_enc_destroy(struct drm_encoder *encoder); void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); void radeon_combios_asic_init(struct drm_device *dev); extern int radeon_static_clocks_init(struct drm_device *dev); -void radeon_init_disp_bw_legacy(struct drm_device *dev, - struct drm_display_mode *mode1, - uint32_t pixel_bytes1, - struct drm_display_mode *mode2, - uint32_t pixel_bytes2); -void radeon_init_disp_bw_avivo(struct drm_device *dev, - struct drm_display_mode *mode1, - uint32_t pixel_bytes1, - struct drm_display_mode *mode2, - uint32_t pixel_bytes2); -void radeon_init_disp_bandwidth(struct drm_device *dev); +bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void atom_rv515_force_tv_scaler(struct radeon_device *rdev); #endif diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 983e8df5e00..e98cae3bf4a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -44,6 +44,9 @@ struct radeon_object { uint64_t gpu_addr; void *kptr; bool is_iomem; + uint32_t tiling_flags; + uint32_t pitch; + int surface_reg; }; int radeon_ttm_init(struct radeon_device *rdev); @@ -70,6 +73,7 @@ static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) robj = container_of(tobj, struct radeon_object, tobj); list_del_init(&robj->list); + radeon_object_clear_surface_reg(robj); kfree(robj); } @@ -99,16 +103,16 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) { uint32_t flags = 0; if (domain & RADEON_GEM_DOMAIN_VRAM) { - flags |= TTM_PL_FLAG_VRAM; + flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; } if (domain & RADEON_GEM_DOMAIN_GTT) { - flags |= TTM_PL_FLAG_TT; + flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; } if (domain & RADEON_GEM_DOMAIN_CPU) { - flags |= TTM_PL_FLAG_SYSTEM; + flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; } if (!flags) { - flags |= TTM_PL_FLAG_SYSTEM; + flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; } return flags; } @@ -141,6 +145,7 @@ int radeon_object_create(struct radeon_device *rdev, } robj->rdev = rdev; robj->gobj = gobj; + robj->surface_reg = -1; INIT_LIST_HEAD(&robj->list); flags = radeon_object_flags_from_domain(domain); @@ -223,7 +228,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, { uint32_t flags; uint32_t tmp; - void *fbptr; int r; flags = radeon_object_flags_from_domain(domain); @@ -242,10 +246,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); return r; } - if (robj->rdev->fbdev_robj == robj) { - mutex_lock(&robj->rdev->fbdev_info->lock); - radeon_object_kunmap(robj); - } tmp = robj->tobj.mem.placement; ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; @@ -261,23 +261,12 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, DRM_ERROR("radeon: failed to pin object.\n"); } radeon_object_unreserve(robj); - if (robj->rdev->fbdev_robj == robj) { - if (!r) { - r = radeon_object_kmap(robj, &fbptr); - } - if (!r) { - robj->rdev->fbdev_info->screen_base = fbptr; - robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr; - } - mutex_unlock(&robj->rdev->fbdev_info->lock); - } return r; } void radeon_object_unpin(struct radeon_object *robj) { uint32_t flags; - void *fbptr; int r; spin_lock(&robj->tobj.lock); @@ -297,10 +286,6 @@ void radeon_object_unpin(struct radeon_object *robj) DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); return; } - if (robj->rdev->fbdev_robj == robj) { - mutex_lock(&robj->rdev->fbdev_info->lock); - radeon_object_kunmap(robj); - } flags = robj->tobj.mem.placement; robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; r = ttm_buffer_object_validate(&robj->tobj, @@ -310,16 +295,6 @@ void radeon_object_unpin(struct radeon_object *robj) DRM_ERROR("radeon: failed to unpin buffer.\n"); } radeon_object_unreserve(robj); - if (robj->rdev->fbdev_robj == robj) { - if (!r) { - r = radeon_object_kmap(robj, &fbptr); - } - if (!r) { - robj->rdev->fbdev_info->screen_base = fbptr; - robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr; - } - mutex_unlock(&robj->rdev->fbdev_info->lock); - } } int radeon_object_wait(struct radeon_object *robj) @@ -334,7 +309,7 @@ int radeon_object_wait(struct radeon_object *robj) } spin_lock(&robj->tobj.lock); if (robj->tobj.sync_obj) { - r = ttm_bo_wait(&robj->tobj, true, false, false); + r = ttm_bo_wait(&robj->tobj, true, true, false); } spin_unlock(&robj->tobj.lock); radeon_object_unreserve(robj); @@ -433,7 +408,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) struct radeon_object *robj; struct radeon_fence *old_fence = NULL; struct list_head *i; - uint32_t flags; int r; r = radeon_object_list_reserve(head); @@ -444,27 +418,25 @@ int radeon_object_list_validate(struct list_head *head, void *fence) list_for_each(i, head) { lobj = list_entry(i, struct radeon_object_list, list); robj = lobj->robj; - if (lobj->wdomain) { - flags = radeon_object_flags_from_domain(lobj->wdomain); - flags |= TTM_PL_FLAG_TT; - } else { - flags = radeon_object_flags_from_domain(lobj->rdomain); - flags |= TTM_PL_FLAG_TT; - flags |= TTM_PL_FLAG_VRAM; - } if (!robj->pin_count) { - robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; + if (lobj->wdomain) { + robj->tobj.proposed_placement = + radeon_object_flags_from_domain(lobj->wdomain); + } else { + robj->tobj.proposed_placement = + radeon_object_flags_from_domain(lobj->rdomain); + } r = ttm_buffer_object_validate(&robj->tobj, robj->tobj.proposed_placement, true, false); if (unlikely(r)) { - radeon_object_list_unreserve(head); DRM_ERROR("radeon: failed to validate.\n"); return r; } radeon_object_gpu_addr(robj); } lobj->gpu_offset = robj->gpu_addr; + lobj->tiling_flags = robj->tiling_flags; if (fence) { old_fence = (struct radeon_fence *)robj->tobj.sync_obj; robj->tobj.sync_obj = radeon_fence_ref(fence); @@ -509,3 +481,127 @@ unsigned long radeon_object_size(struct radeon_object *robj) { return robj->tobj.num_pages << PAGE_SHIFT; } + +int radeon_object_get_surface_reg(struct radeon_object *robj) +{ + struct radeon_device *rdev = robj->rdev; + struct radeon_surface_reg *reg; + struct radeon_object *old_object; + int steal; + int i; + + if (!robj->tiling_flags) + return 0; + + if (robj->surface_reg >= 0) { + reg = &rdev->surface_regs[robj->surface_reg]; + i = robj->surface_reg; + goto out; + } + + steal = -1; + for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { + + reg = &rdev->surface_regs[i]; + if (!reg->robj) + break; + + old_object = reg->robj; + if (old_object->pin_count == 0) + steal = i; + } + + /* if we are all out */ + if (i == RADEON_GEM_MAX_SURFACES) { + if (steal == -1) + return -ENOMEM; + /* find someone with a surface reg and nuke their BO */ + reg = &rdev->surface_regs[steal]; + old_object = reg->robj; + /* blow away the mapping */ + DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); + ttm_bo_unmap_virtual(&old_object->tobj); + old_object->surface_reg = -1; + i = steal; + } + + robj->surface_reg = i; + reg->robj = robj; + +out: + radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, + robj->tobj.mem.mm_node->start << PAGE_SHIFT, + robj->tobj.num_pages << PAGE_SHIFT); + return 0; +} + +void radeon_object_clear_surface_reg(struct radeon_object *robj) +{ + struct radeon_device *rdev = robj->rdev; + struct radeon_surface_reg *reg; + + if (robj->surface_reg == -1) + return; + + reg = &rdev->surface_regs[robj->surface_reg]; + radeon_clear_surface_reg(rdev, robj->surface_reg); + + reg->robj = NULL; + robj->surface_reg = -1; +} + +void radeon_object_set_tiling_flags(struct radeon_object *robj, + uint32_t tiling_flags, uint32_t pitch) +{ + robj->tiling_flags = tiling_flags; + robj->pitch = pitch; +} + +void radeon_object_get_tiling_flags(struct radeon_object *robj, + uint32_t *tiling_flags, + uint32_t *pitch) +{ + if (tiling_flags) + *tiling_flags = robj->tiling_flags; + if (pitch) + *pitch = robj->pitch; +} + +int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, + bool force_drop) +{ + if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) + return 0; + + if (force_drop) { + radeon_object_clear_surface_reg(robj); + return 0; + } + + if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { + if (!has_moved) + return 0; + + if (robj->surface_reg >= 0) + radeon_object_clear_surface_reg(robj); + return 0; + } + + if ((robj->surface_reg >= 0) && !has_moved) + return 0; + + return radeon_object_get_surface_reg(robj); +} + +void radeon_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem) +{ + struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); + radeon_object_check_tiling(robj, 0, 1); +} + +void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) +{ + struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); + radeon_object_check_tiling(robj, 0, 0); +} diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index a853261d188..60d159308b8 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -126,32 +126,19 @@ static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib) } } -static void radeon_ib_cpu_flush(struct radeon_device *rdev, - struct radeon_ib *ib) -{ - unsigned long tmp; - unsigned i; - - /* To force CPU cache flush ugly but seems reliable */ - for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) { - tmp = readl(&ib->ptr[i]); - } -} - int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) { int r = 0; mutex_lock(&rdev->ib_pool.mutex); radeon_ib_align(rdev, ib); - radeon_ib_cpu_flush(rdev, ib); if (!ib->length_dw || !rdev->cp.ready) { /* TODO: Nothings in the ib we should report. */ mutex_unlock(&rdev->ib_pool.mutex); DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); return -EINVAL; } - /* 64 dwords should be enought for fence too */ + /* 64 dwords should be enough for fence too */ r = radeon_ring_lock(rdev, 64); if (r) { DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); diff --git a/drivers/gpu/drm/radeon/radeon_share.h b/drivers/gpu/drm/radeon/radeon_share.h new file mode 100644 index 00000000000..63a773578f1 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_share.h @@ -0,0 +1,39 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef __RADEON_SHARE_H__ +#define __RADEON_SHARE_H__ + +void r100_vram_init_sizes(struct radeon_device *rdev); + +void rs690_line_buffer_adjust(struct radeon_device *rdev, + struct drm_display_mode *mode1, + struct drm_display_mode *mode2); + +void rv515_bandwidth_avivo_update(struct radeon_device *rdev); + +#endif diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c new file mode 100644 index 00000000000..03c33cf4e14 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -0,0 +1,209 @@ +/* + * Copyright 2009 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Michel Dänzer + */ +#include <drm/drmP.h> +#include <drm/radeon_drm.h> +#include "radeon_reg.h" +#include "radeon.h" + + +/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ +void radeon_test_moves(struct radeon_device *rdev) +{ + struct radeon_object *vram_obj = NULL; + struct radeon_object **gtt_obj = NULL; + struct radeon_fence *fence = NULL; + uint64_t gtt_addr, vram_addr; + unsigned i, n, size; + int r; + + size = 1024 * 1024; + + /* Number of tests = + * (Total GTT - IB pool - writeback page - ring buffer) / test size + */ + n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 - + rdev->cp.ring_size) / size; + + gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); + if (!gtt_obj) { + DRM_ERROR("Failed to allocate %d pointers\n", n); + r = 1; + goto out_cleanup; + } + + r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, + false, &vram_obj); + if (r) { + DRM_ERROR("Failed to create VRAM object\n"); + goto out_cleanup; + } + + r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); + if (r) { + DRM_ERROR("Failed to pin VRAM object\n"); + goto out_cleanup; + } + + for (i = 0; i < n; i++) { + void *gtt_map, *vram_map; + void **gtt_start, **gtt_end; + void **vram_start, **vram_end; + + r = radeon_object_create(rdev, NULL, size, true, + RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i); + if (r) { + DRM_ERROR("Failed to create GTT object %d\n", i); + goto out_cleanup; + } + + r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); + if (r) { + DRM_ERROR("Failed to pin GTT object %d\n", i); + goto out_cleanup; + } + + r = radeon_object_kmap(gtt_obj[i], >t_map); + if (r) { + DRM_ERROR("Failed to map GTT object %d\n", i); + goto out_cleanup; + } + + for (gtt_start = gtt_map, gtt_end = gtt_map + size; + gtt_start < gtt_end; + gtt_start++) + *gtt_start = gtt_start; + + radeon_object_kunmap(gtt_obj[i]); + + r = radeon_fence_create(rdev, &fence); + if (r) { + DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i); + goto out_cleanup; + } + + r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence); + if (r) { + DRM_ERROR("Failed GTT->VRAM copy %d\n", i); + goto out_cleanup; + } + + r = radeon_fence_wait(fence, false); + if (r) { + DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); + goto out_cleanup; + } + + radeon_fence_unref(&fence); + + r = radeon_object_kmap(vram_obj, &vram_map); + if (r) { + DRM_ERROR("Failed to map VRAM object after copy %d\n", i); + goto out_cleanup; + } + + for (gtt_start = gtt_map, gtt_end = gtt_map + size, + vram_start = vram_map, vram_end = vram_map + size; + vram_start < vram_end; + gtt_start++, vram_start++) { + if (*vram_start != gtt_start) { + DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " + "expected 0x%p (GTT map 0x%p-0x%p)\n", + i, *vram_start, gtt_start, gtt_map, + gtt_end); + radeon_object_kunmap(vram_obj); + goto out_cleanup; + } + *vram_start = vram_start; + } + + radeon_object_kunmap(vram_obj); + + r = radeon_fence_create(rdev, &fence); + if (r) { + DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i); + goto out_cleanup; + } + + r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence); + if (r) { + DRM_ERROR("Failed VRAM->GTT copy %d\n", i); + goto out_cleanup; + } + + r = radeon_fence_wait(fence, false); + if (r) { + DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); + goto out_cleanup; + } + + radeon_fence_unref(&fence); + + r = radeon_object_kmap(gtt_obj[i], >t_map); + if (r) { + DRM_ERROR("Failed to map GTT object after copy %d\n", i); + goto out_cleanup; + } + + for (gtt_start = gtt_map, gtt_end = gtt_map + size, + vram_start = vram_map, vram_end = vram_map + size; + gtt_start < gtt_end; + gtt_start++, vram_start++) { + if (*gtt_start != vram_start) { + DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " + "expected 0x%p (VRAM map 0x%p-0x%p)\n", + i, *gtt_start, vram_start, vram_map, + vram_end); + radeon_object_kunmap(gtt_obj[i]); + goto out_cleanup; + } + } + + radeon_object_kunmap(gtt_obj[i]); + + DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", + gtt_addr - rdev->mc.gtt_location); + } + +out_cleanup: + if (vram_obj) { + radeon_object_unpin(vram_obj); + radeon_object_unref(&vram_obj); + } + if (gtt_obj) { + for (i = 0; i < n; i++) { + if (gtt_obj[i]) { + radeon_object_unpin(gtt_obj[i]); + radeon_object_unref(>t_obj[i]); + } + } + kfree(gtt_obj); + } + if (fence) { + radeon_fence_unref(&fence); + } + if (r) { + printk(KERN_WARNING "Error while testing BO move.\n"); + } +} + diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1227a97f516..15c3531377e 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -355,23 +355,26 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, if (!rdev->cp.ready) { /* use memcpy */ DRM_ERROR("CP is not ready use memcpy.\n"); - return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + goto memcpy; } if (old_mem->mem_type == TTM_PL_VRAM && new_mem->mem_type == TTM_PL_SYSTEM) { - return radeon_move_vram_ram(bo, evict, interruptible, + r = radeon_move_vram_ram(bo, evict, interruptible, no_wait, new_mem); } else if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) { - return radeon_move_ram_vram(bo, evict, interruptible, + r = radeon_move_ram_vram(bo, evict, interruptible, no_wait, new_mem); } else { r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); - if (unlikely(r)) { - return r; - } } + + if (r) { +memcpy: + r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + return r; } @@ -429,6 +432,8 @@ static struct ttm_bo_driver radeon_bo_driver = { .sync_obj_flush = &radeon_sync_obj_flush, .sync_obj_unref = &radeon_sync_obj_unref, .sync_obj_ref = &radeon_sync_obj_ref, + .move_notify = &radeon_bo_move_notify, + .fault_reserve_notify = &radeon_bo_fault_reserve_notify, }; int radeon_ttm_init(struct radeon_device *rdev) @@ -442,13 +447,14 @@ int radeon_ttm_init(struct radeon_device *rdev) /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&rdev->mman.bdev, rdev->mman.mem_global_ref.object, - &radeon_bo_driver, DRM_FILE_PAGE_OFFSET); + &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, + rdev->need_dma32); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; } r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, - ((rdev->mc.aper_size) >> PAGE_SHIFT)); + ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); if (r) { DRM_ERROR("Failed initializing VRAM heap.\n"); return r; @@ -465,7 +471,7 @@ int radeon_ttm_init(struct radeon_device *rdev) return r; } DRM_INFO("radeon: %uM of VRAM memory ready\n", - rdev->mc.vram_size / (1024 * 1024)); + rdev->mc.real_vram_size / (1024 * 1024)); r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, ((rdev->mc.gtt_size) >> PAGE_SHIFT)); if (r) { diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index cc074b5a8f7..b29affd9c5d 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -29,6 +29,7 @@ #include <drm/drmP.h> #include "radeon_reg.h" #include "radeon.h" +#include "radeon_share.h" /* rs400,rs480 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -164,7 +165,9 @@ int rs400_gart_enable(struct radeon_device *rdev) WREG32(RADEON_BUS_CNTL, tmp); } /* Table should be in 32bits address space so ignore bits above. */ - tmp = rdev->gart.table_addr & 0xfffff000; + tmp = (u32)rdev->gart.table_addr & 0xfffff000; + tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4; + WREG32_MC(RS480_GART_BASE, tmp); /* TODO: more tweaking here */ WREG32_MC(RS480_GART_FEATURE_ID, @@ -201,10 +204,17 @@ void rs400_gart_disable(struct radeon_device *rdev) int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) { + uint32_t entry; + if (i < 0 || i > rdev->gart.num_gpu_pages) { return -EINVAL; } - rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC); + + entry = (lower_32_bits(addr) & PAGE_MASK) | + ((upper_32_bits(addr) & 0xff) << 4) | + 0xc; + entry = cpu_to_le32(entry); + rdev->gart.table.ram.ptr[i] = entry; return 0; } @@ -223,10 +233,9 @@ int rs400_mc_init(struct radeon_device *rdev) rs400_gpu_init(rdev); rs400_gart_disable(rdev); - rdev->mc.gtt_location = rdev->mc.vram_size; + rdev->mc.gtt_location = rdev->mc.mc_vram_size; rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); - rdev->mc.vram_location = 0xFFFFFFFFUL; r = radeon_mc_setup(rdev); if (r) { return r; @@ -238,7 +247,7 @@ int rs400_mc_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); WREG32(RADEON_MC_FB_LOCATION, tmp); @@ -284,21 +293,12 @@ void rs400_gpu_init(struct radeon_device *rdev) */ void rs400_vram_info(struct radeon_device *rdev) { - uint32_t tom; - rs400_gart_adjust_size(rdev); /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; rdev->mc.vram_width = 128; - /* read NB_TOM to get the amount of ram stolen for the GPU */ - tom = RREG32(RADEON_NB_TOM); - rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); - - /* Could aper size report 0 ? */ - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + r100_vram_init_sizes(rdev); } diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index ab0c967553e..bbea6dee4a9 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -223,7 +223,7 @@ int rs600_mc_init(struct radeon_device *rdev) printk(KERN_WARNING "Failed to wait MC idle while " "programming pipes. Bad things might happen.\n"); } - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); WREG32_MC(RS600_MC_FB_LOCATION, tmp); @@ -301,6 +301,11 @@ void rs600_vram_info(struct radeon_device *rdev) rdev->mc.vram_width = 128; } +void rs600_bandwidth_update(struct radeon_device *rdev) +{ + /* FIXME: implement, should this be like rs690 ? */ +} + /* * Indirect registers accessor diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 79ba85042b5..839595b0072 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -28,6 +28,9 @@ #include "drmP.h" #include "radeon_reg.h" #include "radeon.h" +#include "rs690r.h" +#include "atom.h" +#include "atom-bits.h" /* rs690,rs740 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -64,7 +67,7 @@ int rs690_mc_init(struct radeon_device *rdev) rs400_gart_disable(rdev); /* Setup GPU memory space */ - rdev->mc.gtt_location = rdev->mc.vram_size; + rdev->mc.gtt_location = rdev->mc.mc_vram_size; rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); rdev->mc.vram_location = 0xFFFFFFFFUL; @@ -79,7 +82,7 @@ int rs690_mc_init(struct radeon_device *rdev) printk(KERN_WARNING "Failed to wait MC idle while " "programming pipes. Bad things might happen.\n"); } - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); @@ -138,9 +141,82 @@ void rs690_gpu_init(struct radeon_device *rdev) /* * VRAM info. */ +void rs690_pm_info(struct radeon_device *rdev) +{ + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); + struct _ATOM_INTEGRATED_SYSTEM_INFO *info; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2; + void *ptr; + uint16_t data_offset; + uint8_t frev, crev; + fixed20_12 tmp; + + atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, + &frev, &crev, &data_offset); + ptr = rdev->mode_info.atom_context->bios + data_offset; + info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; + info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; + /* Get various system informations from bios */ + switch (crev) { + case 1: + tmp.full = rfixed_const(100); + rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); + rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); + rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); + rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); + rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); + break; + case 2: + tmp.full = rfixed_const(100); + rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); + rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); + rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); + rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); + rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); + rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); + rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); + break; + default: + tmp.full = rfixed_const(100); + /* We assume the slower possible clock ie worst case */ + /* DDR 333Mhz */ + rdev->pm.igp_sideport_mclk.full = rfixed_const(333); + /* FIXME: system clock ? */ + rdev->pm.igp_system_mclk.full = rfixed_const(100); + rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); + rdev->pm.igp_ht_link_clk.full = rfixed_const(200); + rdev->pm.igp_ht_link_width.full = rfixed_const(8); + DRM_ERROR("No integrated system info for your GPU, using safe default\n"); + break; + } + /* Compute various bandwidth */ + /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ + tmp.full = rfixed_const(4); + rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); + /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 + * = ht_clk * ht_width / 5 + */ + tmp.full = rfixed_const(5); + rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, + rdev->pm.igp_ht_link_width); + rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); + if (tmp.full < rdev->pm.max_bandwidth.full) { + /* HT link is a limiting factor */ + rdev->pm.max_bandwidth.full = tmp.full; + } + /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 + * = (sideport_clk * 14) / 10 + */ + tmp.full = rfixed_const(14); + rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); + tmp.full = rfixed_const(10); + rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); +} + void rs690_vram_info(struct radeon_device *rdev) { uint32_t tmp; + fixed20_12 a; rs400_gart_adjust_size(rdev); /* DDR for all card after R300 & IGP */ @@ -152,12 +228,409 @@ void rs690_vram_info(struct radeon_device *rdev) } else { rdev->mc.vram_width = 64; } - rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); + rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); + rdev->mc.mc_vram_size = rdev->mc.real_vram_size; rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + rs690_pm_info(rdev); + /* FIXME: we should enforce default clock in case GPU is not in + * default setup + */ + a.full = rfixed_const(100); + rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); + rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); + a.full = rfixed_const(16); + /* core_bandwidth = sclk(Mhz) * 16 */ + rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); +} + +void rs690_line_buffer_adjust(struct radeon_device *rdev, + struct drm_display_mode *mode1, + struct drm_display_mode *mode2) +{ + u32 tmp; + + /* + * Line Buffer Setup + * There is a single line buffer shared by both display controllers. + * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between + * the display controllers. The paritioning can either be done + * manually or via one of four preset allocations specified in bits 1:0: + * 0 - line buffer is divided in half and shared between crtc + * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 + * 2 - D1 gets the whole buffer + * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 + * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual + * allocation mode. In manual allocation mode, D1 always starts at 0, + * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. + */ + tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; + tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; + /* auto */ + if (mode1 && mode2) { + if (mode1->hdisplay > mode2->hdisplay) { + if (mode1->hdisplay > 2560) + tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; + else + tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + } else if (mode2->hdisplay > mode1->hdisplay) { + if (mode2->hdisplay > 2560) + tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; + else + tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + } else + tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + } else if (mode1) { + tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; + } else if (mode2) { + tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; + } + WREG32(DC_LB_MEMORY_SPLIT, tmp); } +struct rs690_watermark { + u32 lb_request_fifo_depth; + fixed20_12 num_line_pair; + fixed20_12 estimated_width; + fixed20_12 worst_case_latency; + fixed20_12 consumption_rate; + fixed20_12 active_time; + fixed20_12 dbpp; + fixed20_12 priority_mark_max; + fixed20_12 priority_mark; + fixed20_12 sclk; +}; + +void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, + struct radeon_crtc *crtc, + struct rs690_watermark *wm) +{ + struct drm_display_mode *mode = &crtc->base.mode; + fixed20_12 a, b, c; + fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; + fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; + /* FIXME: detect IGP with sideport memory, i don't think there is any + * such product available + */ + bool sideport = false; + + if (!crtc->base.enabled) { + /* FIXME: wouldn't it better to set priority mark to maximum */ + wm->lb_request_fifo_depth = 4; + return; + } + + if (crtc->vsc.full > rfixed_const(2)) + wm->num_line_pair.full = rfixed_const(2); + else + wm->num_line_pair.full = rfixed_const(1); + + b.full = rfixed_const(mode->crtc_hdisplay); + c.full = rfixed_const(256); + a.full = rfixed_mul(wm->num_line_pair, b); + request_fifo_depth.full = rfixed_div(a, c); + if (a.full < rfixed_const(4)) { + wm->lb_request_fifo_depth = 4; + } else { + wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); + } + + /* Determine consumption rate + * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) + * vtaps = number of vertical taps, + * vsc = vertical scaling ratio, defined as source/destination + * hsc = horizontal scaling ration, defined as source/destination + */ + a.full = rfixed_const(mode->clock); + b.full = rfixed_const(1000); + a.full = rfixed_div(a, b); + pclk.full = rfixed_div(b, a); + if (crtc->rmx_type != RMX_OFF) { + b.full = rfixed_const(2); + if (crtc->vsc.full > b.full) + b.full = crtc->vsc.full; + b.full = rfixed_mul(b, crtc->hsc); + c.full = rfixed_const(2); + b.full = rfixed_div(b, c); + consumption_time.full = rfixed_div(pclk, b); + } else { + consumption_time.full = pclk.full; + } + a.full = rfixed_const(1); + wm->consumption_rate.full = rfixed_div(a, consumption_time); + + + /* Determine line time + * LineTime = total time for one line of displayhtotal + * LineTime = total number of horizontal pixels + * pclk = pixel clock period(ns) + */ + a.full = rfixed_const(crtc->base.mode.crtc_htotal); + line_time.full = rfixed_mul(a, pclk); + + /* Determine active time + * ActiveTime = time of active region of display within one line, + * hactive = total number of horizontal active pixels + * htotal = total number of horizontal pixels + */ + a.full = rfixed_const(crtc->base.mode.crtc_htotal); + b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); + wm->active_time.full = rfixed_mul(line_time, b); + wm->active_time.full = rfixed_div(wm->active_time, a); + + /* Maximun bandwidth is the minimun bandwidth of all component */ + rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; + if (sideport) { + if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && + rdev->pm.sideport_bandwidth.full) + rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; + read_delay_latency.full = rfixed_const(370 * 800 * 1000); + read_delay_latency.full = rfixed_div(read_delay_latency, + rdev->pm.igp_sideport_mclk); + } else { + if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && + rdev->pm.k8_bandwidth.full) + rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; + if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && + rdev->pm.ht_bandwidth.full) + rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; + read_delay_latency.full = rfixed_const(5000); + } + + /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ + a.full = rfixed_const(16); + rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); + a.full = rfixed_const(1000); + rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); + /* Determine chunk time + * ChunkTime = the time it takes the DCP to send one chunk of data + * to the LB which consists of pipeline delay and inter chunk gap + * sclk = system clock(ns) + */ + a.full = rfixed_const(256 * 13); + chunk_time.full = rfixed_mul(rdev->pm.sclk, a); + a.full = rfixed_const(10); + chunk_time.full = rfixed_div(chunk_time, a); + + /* Determine the worst case latency + * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) + * WorstCaseLatency = worst case time from urgent to when the MC starts + * to return data + * READ_DELAY_IDLE_MAX = constant of 1us + * ChunkTime = time it takes the DCP to send one chunk of data to the LB + * which consists of pipeline delay and inter chunk gap + */ + if (rfixed_trunc(wm->num_line_pair) > 1) { + a.full = rfixed_const(3); + wm->worst_case_latency.full = rfixed_mul(a, chunk_time); + wm->worst_case_latency.full += read_delay_latency.full; + } else { + a.full = rfixed_const(2); + wm->worst_case_latency.full = rfixed_mul(a, chunk_time); + wm->worst_case_latency.full += read_delay_latency.full; + } + + /* Determine the tolerable latency + * TolerableLatency = Any given request has only 1 line time + * for the data to be returned + * LBRequestFifoDepth = Number of chunk requests the LB can + * put into the request FIFO for a display + * LineTime = total time for one line of display + * ChunkTime = the time it takes the DCP to send one chunk + * of data to the LB which consists of + * pipeline delay and inter chunk gap + */ + if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { + tolerable_latency.full = line_time.full; + } else { + tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); + tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; + tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); + tolerable_latency.full = line_time.full - tolerable_latency.full; + } + /* We assume worst case 32bits (4 bytes) */ + wm->dbpp.full = rfixed_const(4 * 8); + + /* Determine the maximum priority mark + * width = viewport width in pixels + */ + a.full = rfixed_const(16); + wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); + wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); + + /* Determine estimated width */ + estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; + estimated_width.full = rfixed_div(estimated_width, consumption_time); + if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { + wm->priority_mark.full = rfixed_const(10); + } else { + a.full = rfixed_const(16); + wm->priority_mark.full = rfixed_div(estimated_width, a); + wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; + } +} + +void rs690_bandwidth_update(struct radeon_device *rdev) +{ + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + struct rs690_watermark wm0; + struct rs690_watermark wm1; + u32 tmp; + fixed20_12 priority_mark02, priority_mark12, fill_rate; + fixed20_12 a, b; + + if (rdev->mode_info.crtcs[0]->base.enabled) + mode0 = &rdev->mode_info.crtcs[0]->base.mode; + if (rdev->mode_info.crtcs[1]->base.enabled) + mode1 = &rdev->mode_info.crtcs[1]->base.mode; + /* + * Set display0/1 priority up in the memory controller for + * modes if the user specifies HIGH for displaypriority + * option. + */ + if (rdev->disp_priority == 2) { + tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); + tmp &= ~MC_DISP1R_INIT_LAT_MASK; + tmp &= ~MC_DISP0R_INIT_LAT_MASK; + if (mode1) + tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); + if (mode0) + tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); + WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); + } + rs690_line_buffer_adjust(rdev, mode0, mode1); + + if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) + WREG32(DCP_CONTROL, 0); + if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) + WREG32(DCP_CONTROL, 2); + + rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); + rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); + + tmp = (wm0.lb_request_fifo_depth - 1); + tmp |= (wm1.lb_request_fifo_depth - 1) << 16; + WREG32(LB_MAX_REQ_OUTSTANDING, tmp); + + if (mode0 && mode1) { + if (rfixed_trunc(wm0.dbpp) > 64) + a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; + if (rfixed_trunc(wm1.dbpp) > 64) + b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); + else + b.full = wm1.num_line_pair.full; + a.full += b.full; + fill_rate.full = rfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm0.active_time); + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + a.full = a.full + b.full; + b.full = rfixed_const(16 * 1000); + priority_mark02.full = rfixed_div(a, b); + } else { + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark02.full = rfixed_div(a, b); + } + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm1.active_time); + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + a.full = a.full + b.full; + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } else { + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; + if (rfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; + if (rfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + } else if (mode0) { + if (rfixed_trunc(wm0.dbpp) > 64) + a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; + fill_rate.full = rfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm0.active_time); + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + a.full = a.full + b.full; + b.full = rfixed_const(16 * 1000); + priority_mark02.full = rfixed_div(a, b); + } else { + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark02.full = rfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; + if (rfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); + WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); + } else { + if (rfixed_trunc(wm1.dbpp) > 64) + a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); + else + a.full = wm1.num_line_pair.full; + fill_rate.full = rfixed_div(wm1.sclk, a); + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm1.active_time); + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + a.full = a.full + b.full; + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } else { + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; + if (rfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); + WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); + WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + } +} /* * Indirect registers accessor diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h new file mode 100644 index 00000000000..c0d9faa2175 --- /dev/null +++ b/drivers/gpu/drm/radeon/rs690r.h @@ -0,0 +1,99 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef RS690R_H +#define RS690R_H + +/* RS690/RS740 registers */ +#define MC_INDEX 0x0078 +# define MC_INDEX_MASK 0x1FF +# define MC_INDEX_WR_EN (1 << 9) +# define MC_INDEX_WR_ACK 0x7F +#define MC_DATA 0x007C +#define HDP_FB_LOCATION 0x0134 +#define DC_LB_MEMORY_SPLIT 0x6520 +#define DC_LB_MEMORY_SPLIT_MASK 0x00000003 +#define DC_LB_MEMORY_SPLIT_SHIFT 0 +#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 +#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 +#define DC_LB_MEMORY_SPLIT_D1_ONLY 2 +#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 +#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) +#define DC_LB_DISP1_END_ADR_SHIFT 4 +#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 +#define D1MODE_PRIORITY_A_CNT 0x6548 +#define MODE_PRIORITY_MARK_MASK 0x00007FFF +#define MODE_PRIORITY_OFF (1 << 16) +#define MODE_PRIORITY_ALWAYS_ON (1 << 20) +#define MODE_PRIORITY_FORCE_MASK (1 << 24) +#define D1MODE_PRIORITY_B_CNT 0x654C +#define LB_MAX_REQ_OUTSTANDING 0x6D58 +#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F +#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 +#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 +#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 +#define DCP_CONTROL 0x6C9C +#define D2MODE_PRIORITY_A_CNT 0x6D48 +#define D2MODE_PRIORITY_B_CNT 0x6D4C + +/* MC indirect registers */ +#define MC_STATUS_IDLE (1 << 0) +#define MC_MISC_CNTL 0x18 +#define DISABLE_GTW (1 << 1) +#define GART_INDEX_REG_EN (1 << 12) +#define BLOCK_GFX_D3_EN (1 << 14) +#define GART_FEATURE_ID 0x2B +#define HANG_EN (1 << 11) +#define TLB_ENABLE (1 << 18) +#define P2P_ENABLE (1 << 19) +#define GTW_LAC_EN (1 << 25) +#define LEVEL2_GART (0 << 30) +#define LEVEL1_GART (1 << 30) +#define PDC_EN (1 << 31) +#define GART_BASE 0x2C +#define GART_CACHE_CNTRL 0x2E +# define GART_CACHE_INVALIDATE (1 << 0) +#define MC_STATUS 0x90 +#define MCCFG_FB_LOCATION 0x100 +#define MC_FB_START_MASK 0x0000FFFF +#define MC_FB_START_SHIFT 0 +#define MC_FB_TOP_MASK 0xFFFF0000 +#define MC_FB_TOP_SHIFT 16 +#define MCCFG_AGP_LOCATION 0x101 +#define MC_AGP_START_MASK 0x0000FFFF +#define MC_AGP_START_SHIFT 0 +#define MC_AGP_TOP_MASK 0xFFFF0000 +#define MC_AGP_TOP_SHIFT 16 +#define MCCFG_AGP_BASE 0x102 +#define MCCFG_AGP_BASE_2 0x103 +#define MC_INIT_MISC_LAT_TIMER 0x104 +#define MC_DISP0R_INIT_LAT_SHIFT 8 +#define MC_DISP0R_INIT_LAT_MASK 0x00000F00 +#define MC_DISP1R_INIT_LAT_SHIFT 12 +#define MC_DISP1R_INIT_LAT_MASK 0x0000F000 + +#endif diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index ffea37b1b3e..fd8f3ca716e 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -27,8 +27,9 @@ */ #include <linux/seq_file.h> #include "drmP.h" -#include "radeon_reg.h" +#include "rv515r.h" #include "radeon.h" +#include "radeon_share.h" /* rv515 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -99,26 +100,26 @@ int rv515_mc_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } /* Write VRAM size in case we are limiting it */ - WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); - tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); + tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); WREG32(0x134, tmp); - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; - tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16); - tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); - WREG32_MC(RV515_MC_FB_LOCATION, tmp); - WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; + tmp = REG_SET(MC_FB_TOP, tmp >> 16); + tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); + WREG32_MC(MC_FB_LOCATION, tmp); + WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16); WREG32(0x310, rdev->mc.vram_location); if (rdev->flags & RADEON_IS_AGP) { tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; - tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16); - tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16); - WREG32_MC(RV515_MC_AGP_LOCATION, tmp); - WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base); - WREG32_MC(RV515_MC_AGP_BASE_2, 0); + tmp = REG_SET(MC_AGP_TOP, tmp >> 16); + tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16); + WREG32_MC(MC_AGP_LOCATION, tmp); + WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base); + WREG32_MC(MC_AGP_BASE_2, 0); } else { - WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF); - WREG32_MC(RV515_MC_AGP_BASE, 0); - WREG32_MC(RV515_MC_AGP_BASE_2, 0); + WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF); + WREG32_MC(MC_AGP_BASE, 0); + WREG32_MC(MC_AGP_BASE_2, 0); } return 0; } @@ -136,95 +137,67 @@ void rv515_mc_fini(struct radeon_device *rdev) */ void rv515_ring_start(struct radeon_device *rdev) { - unsigned gb_tile_config; int r; - /* Sub pixel 1/12 so we can have 4K rendering according to doc */ - gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16; - switch (rdev->num_gb_pipes) { - case 2: - gb_tile_config |= R300_PIPE_COUNT_R300; - break; - case 3: - gb_tile_config |= R300_PIPE_COUNT_R420_3P; - break; - case 4: - gb_tile_config |= R300_PIPE_COUNT_R420; - break; - case 1: - default: - gb_tile_config |= R300_PIPE_COUNT_RV350; - break; - } - r = radeon_ring_lock(rdev, 64); if (r) { return; } - radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); - radeon_ring_write(rdev, - RADEON_ISYNC_ANY2D_IDLE3D | - RADEON_ISYNC_ANY3D_IDLE2D | - RADEON_ISYNC_WAIT_IDLEGUI | - RADEON_ISYNC_CPSCRATCH_IDLEGUI); - radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0)); - radeon_ring_write(rdev, gb_tile_config); - radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); + radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0)); radeon_ring_write(rdev, - RADEON_WAIT_2D_IDLECLEAN | - RADEON_WAIT_3D_IDLECLEAN); + ISYNC_ANY2D_IDLE3D | + ISYNC_ANY3D_IDLE2D | + ISYNC_WAIT_IDLEGUI | + ISYNC_CPSCRATCH_IDLEGUI); + radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); + radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); radeon_ring_write(rdev, PACKET0(0x170C, 0)); radeon_ring_write(rdev, 1 << 31); - radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); + radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); radeon_ring_write(rdev, 0); - radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); + radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, PACKET0(0x42C8, 0)); radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); - radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0)); + radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); radeon_ring_write(rdev, 0); - radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); - radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); - radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); - radeon_ring_write(rdev, - RADEON_WAIT_2D_IDLECLEAN | - RADEON_WAIT_3D_IDLECLEAN); - radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0)); + radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); + radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); + radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); + radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); + radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0)); radeon_ring_write(rdev, 0); - radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); - radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); - radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); - radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); - radeon_ring_write(rdev, - ((6 << R300_MS_X0_SHIFT) | - (6 << R300_MS_Y0_SHIFT) | - (6 << R300_MS_X1_SHIFT) | - (6 << R300_MS_Y1_SHIFT) | - (6 << R300_MS_X2_SHIFT) | - (6 << R300_MS_Y2_SHIFT) | - (6 << R300_MSBD0_Y_SHIFT) | - (6 << R300_MSBD0_X_SHIFT))); - radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0)); + radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); + radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); + radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0)); radeon_ring_write(rdev, - ((6 << R300_MS_X3_SHIFT) | - (6 << R300_MS_Y3_SHIFT) | - (6 << R300_MS_X4_SHIFT) | - (6 << R300_MS_Y4_SHIFT) | - (6 << R300_MS_X5_SHIFT) | - (6 << R300_MS_Y5_SHIFT) | - (6 << R300_MSBD1_SHIFT))); - radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); - radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); - radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0)); + ((6 << MS_X0_SHIFT) | + (6 << MS_Y0_SHIFT) | + (6 << MS_X1_SHIFT) | + (6 << MS_Y1_SHIFT) | + (6 << MS_X2_SHIFT) | + (6 << MS_Y2_SHIFT) | + (6 << MSBD0_Y_SHIFT) | + (6 << MSBD0_X_SHIFT))); + radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0)); radeon_ring_write(rdev, - R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); - radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); - radeon_ring_write(rdev, - R300_GEOMETRY_ROUND_NEAREST | - R300_COLOR_ROUND_NEAREST); + ((6 << MS_X3_SHIFT) | + (6 << MS_Y3_SHIFT) | + (6 << MS_X4_SHIFT) | + (6 << MS_Y4_SHIFT) | + (6 << MS_X5_SHIFT) | + (6 << MS_Y5_SHIFT) | + (6 << MSBD1_SHIFT))); + radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0)); + radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL); + radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0)); + radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE); + radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0)); + radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); radeon_ring_write(rdev, PACKET0(0x20C8, 0)); radeon_ring_write(rdev, 0); radeon_ring_unlock_commit(rdev); @@ -242,8 +215,8 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev) for (i = 0; i < rdev->usec_timeout; i++) { /* read MC_STATUS */ - tmp = RREG32_MC(RV515_MC_STATUS); - if (tmp & RV515_MC_STATUS_IDLE) { + tmp = RREG32_MC(MC_STATUS); + if (tmp & MC_STATUS_IDLE) { return 0; } DRM_UDELAY(1); @@ -291,33 +264,33 @@ int rv515_ga_reset(struct radeon_device *rdev) reinit_cp = rdev->cp.ready; rdev->cp.ready = false; for (i = 0; i < rdev->usec_timeout; i++) { - WREG32(RADEON_CP_CSQ_MODE, 0); - WREG32(RADEON_CP_CSQ_CNTL, 0); - WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); - (void)RREG32(RADEON_RBBM_SOFT_RESET); + WREG32(CP_CSQ_MODE, 0); + WREG32(CP_CSQ_CNTL, 0); + WREG32(RBBM_SOFT_RESET, 0x32005); + (void)RREG32(RBBM_SOFT_RESET); udelay(200); - WREG32(RADEON_RBBM_SOFT_RESET, 0); + WREG32(RBBM_SOFT_RESET, 0); /* Wait to prevent race in RBBM_STATUS */ mdelay(1); - tmp = RREG32(RADEON_RBBM_STATUS); + tmp = RREG32(RBBM_STATUS); if (tmp & ((1 << 20) | (1 << 26))) { DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); /* GA still busy soft reset it */ WREG32(0x429C, 0x200); - WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); + WREG32(VAP_PVS_STATE_FLUSH_REG, 0); WREG32(0x43E0, 0); WREG32(0x43E4, 0); WREG32(0x24AC, 0); } /* Wait to prevent race in RBBM_STATUS */ mdelay(1); - tmp = RREG32(RADEON_RBBM_STATUS); + tmp = RREG32(RBBM_STATUS); if (!(tmp & ((1 << 20) | (1 << 26)))) { break; } } for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RADEON_RBBM_STATUS); + tmp = RREG32(RBBM_STATUS); if (!(tmp & ((1 << 20) | (1 << 26)))) { DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", tmp); @@ -331,7 +304,7 @@ int rv515_ga_reset(struct radeon_device *rdev) } DRM_UDELAY(1); } - tmp = RREG32(RADEON_RBBM_STATUS); + tmp = RREG32(RBBM_STATUS); DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); return -1; } @@ -341,7 +314,7 @@ int rv515_gpu_reset(struct radeon_device *rdev) uint32_t status; /* reset order likely matter */ - status = RREG32(RADEON_RBBM_STATUS); + status = RREG32(RBBM_STATUS); /* reset HDP */ r100_hdp_reset(rdev); /* reset rb2d */ @@ -353,12 +326,12 @@ int rv515_gpu_reset(struct radeon_device *rdev) rv515_ga_reset(rdev); } /* reset CP */ - status = RREG32(RADEON_RBBM_STATUS); + status = RREG32(RBBM_STATUS); if (status & (1 << 16)) { r100_cp_reset(rdev); } /* Check if GPU is idle */ - status = RREG32(RADEON_RBBM_STATUS); + status = RREG32(RBBM_STATUS); if (status & (1 << 31)) { DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); return -1; @@ -377,8 +350,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev) rdev->mc.vram_width = 128; rdev->mc.vram_is_ddr = true; - tmp = RREG32_MC(RV515_MC_CNTL); - tmp &= RV515_MEM_NUM_CHANNELS_MASK; + tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK; switch (tmp) { case 0: rdev->mc.vram_width = 64; @@ -394,11 +366,17 @@ static void rv515_vram_get_type(struct radeon_device *rdev) void rv515_vram_info(struct radeon_device *rdev) { + fixed20_12 a; + rv515_vram_get_type(rdev); - rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + r100_vram_init_sizes(rdev); + /* FIXME: we should enforce default clock in case GPU is not in + * default setup + */ + a.full = rfixed_const(100); + rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); + rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); } @@ -409,35 +387,35 @@ uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) { uint32_t r; - WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); - r = RREG32(R520_MC_IND_DATA); - WREG32(R520_MC_IND_INDEX, 0); + WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); + r = RREG32(MC_IND_DATA); + WREG32(MC_IND_INDEX, 0); return r; } void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { - WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); - WREG32(R520_MC_IND_DATA, (v)); - WREG32(R520_MC_IND_INDEX, 0); + WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); + WREG32(MC_IND_DATA, (v)); + WREG32(MC_IND_INDEX, 0); } uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) { uint32_t r; - WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); - (void)RREG32(RADEON_PCIE_INDEX); - r = RREG32(RADEON_PCIE_DATA); + WREG32(PCIE_INDEX, ((reg) & 0x7ff)); + (void)RREG32(PCIE_INDEX); + r = RREG32(PCIE_DATA); return r; } void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { - WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); - (void)RREG32(RADEON_PCIE_INDEX); - WREG32(RADEON_PCIE_DATA, (v)); - (void)RREG32(RADEON_PCIE_DATA); + WREG32(PCIE_INDEX, ((reg) & 0x7ff)); + (void)RREG32(PCIE_INDEX); + WREG32(PCIE_DATA, (v)); + (void)RREG32(PCIE_DATA); } @@ -452,13 +430,13 @@ static int rv515_debugfs_pipes_info(struct seq_file *m, void *data) struct radeon_device *rdev = dev->dev_private; uint32_t tmp; - tmp = RREG32(R400_GB_PIPE_SELECT); + tmp = RREG32(GB_PIPE_SELECT); seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); - tmp = RREG32(R500_SU_REG_DEST); + tmp = RREG32(SU_REG_DEST); seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); - tmp = RREG32(R300_GB_TILE_CONFIG); + tmp = RREG32(GB_TILE_CONFIG); seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); - tmp = RREG32(R300_DST_PIPE_CONFIG); + tmp = RREG32(DST_PIPE_CONFIG); seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); return 0; } @@ -509,9 +487,9 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) /* * Asic initialization */ -static const unsigned r500_reg_safe_bm[159] = { +static const unsigned r500_reg_safe_bm[219] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, - 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, @@ -549,14 +527,575 @@ static const unsigned r500_reg_safe_bm[159] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, + 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, }; - - int rv515_init(struct radeon_device *rdev) { rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); return 0; } + +void atom_rv515_force_tv_scaler(struct radeon_device *rdev) +{ + + WREG32(0x659C, 0x0); + WREG32(0x6594, 0x705); + WREG32(0x65A4, 0x10001); + WREG32(0x65D8, 0x0); + WREG32(0x65B0, 0x0); + WREG32(0x65C0, 0x0); + WREG32(0x65D4, 0x0); + WREG32(0x6578, 0x0); + WREG32(0x657C, 0x841880A8); + WREG32(0x6578, 0x1); + WREG32(0x657C, 0x84208680); + WREG32(0x6578, 0x2); + WREG32(0x657C, 0xBFF880B0); + WREG32(0x6578, 0x100); + WREG32(0x657C, 0x83D88088); + WREG32(0x6578, 0x101); + WREG32(0x657C, 0x84608680); + WREG32(0x6578, 0x102); + WREG32(0x657C, 0xBFF080D0); + WREG32(0x6578, 0x200); + WREG32(0x657C, 0x83988068); + WREG32(0x6578, 0x201); + WREG32(0x657C, 0x84A08680); + WREG32(0x6578, 0x202); + WREG32(0x657C, 0xBFF080F8); + WREG32(0x6578, 0x300); + WREG32(0x657C, 0x83588058); + WREG32(0x6578, 0x301); + WREG32(0x657C, 0x84E08660); + WREG32(0x6578, 0x302); + WREG32(0x657C, 0xBFF88120); + WREG32(0x6578, 0x400); + WREG32(0x657C, 0x83188040); + WREG32(0x6578, 0x401); + WREG32(0x657C, 0x85008660); + WREG32(0x6578, 0x402); + WREG32(0x657C, 0xBFF88150); + WREG32(0x6578, 0x500); + WREG32(0x657C, 0x82D88030); + WREG32(0x6578, 0x501); + WREG32(0x657C, 0x85408640); + WREG32(0x6578, 0x502); + WREG32(0x657C, 0xBFF88180); + WREG32(0x6578, 0x600); + WREG32(0x657C, 0x82A08018); + WREG32(0x6578, 0x601); + WREG32(0x657C, 0x85808620); + WREG32(0x6578, 0x602); + WREG32(0x657C, 0xBFF081B8); + WREG32(0x6578, 0x700); + WREG32(0x657C, 0x82608010); + WREG32(0x6578, 0x701); + WREG32(0x657C, 0x85A08600); + WREG32(0x6578, 0x702); + WREG32(0x657C, 0x800081F0); + WREG32(0x6578, 0x800); + WREG32(0x657C, 0x8228BFF8); + WREG32(0x6578, 0x801); + WREG32(0x657C, 0x85E085E0); + WREG32(0x6578, 0x802); + WREG32(0x657C, 0xBFF88228); + WREG32(0x6578, 0x10000); + WREG32(0x657C, 0x82A8BF00); + WREG32(0x6578, 0x10001); + WREG32(0x657C, 0x82A08CC0); + WREG32(0x6578, 0x10002); + WREG32(0x657C, 0x8008BEF8); + WREG32(0x6578, 0x10100); + WREG32(0x657C, 0x81F0BF28); + WREG32(0x6578, 0x10101); + WREG32(0x657C, 0x83608CA0); + WREG32(0x6578, 0x10102); + WREG32(0x657C, 0x8018BED0); + WREG32(0x6578, 0x10200); + WREG32(0x657C, 0x8148BF38); + WREG32(0x6578, 0x10201); + WREG32(0x657C, 0x84408C80); + WREG32(0x6578, 0x10202); + WREG32(0x657C, 0x8008BEB8); + WREG32(0x6578, 0x10300); + WREG32(0x657C, 0x80B0BF78); + WREG32(0x6578, 0x10301); + WREG32(0x657C, 0x85008C20); + WREG32(0x6578, 0x10302); + WREG32(0x657C, 0x8020BEA0); + WREG32(0x6578, 0x10400); + WREG32(0x657C, 0x8028BF90); + WREG32(0x6578, 0x10401); + WREG32(0x657C, 0x85E08BC0); + WREG32(0x6578, 0x10402); + WREG32(0x657C, 0x8018BE90); + WREG32(0x6578, 0x10500); + WREG32(0x657C, 0xBFB8BFB0); + WREG32(0x6578, 0x10501); + WREG32(0x657C, 0x86C08B40); + WREG32(0x6578, 0x10502); + WREG32(0x657C, 0x8010BE90); + WREG32(0x6578, 0x10600); + WREG32(0x657C, 0xBF58BFC8); + WREG32(0x6578, 0x10601); + WREG32(0x657C, 0x87A08AA0); + WREG32(0x6578, 0x10602); + WREG32(0x657C, 0x8010BE98); + WREG32(0x6578, 0x10700); + WREG32(0x657C, 0xBF10BFF0); + WREG32(0x6578, 0x10701); + WREG32(0x657C, 0x886089E0); + WREG32(0x6578, 0x10702); + WREG32(0x657C, 0x8018BEB0); + WREG32(0x6578, 0x10800); + WREG32(0x657C, 0xBED8BFE8); + WREG32(0x6578, 0x10801); + WREG32(0x657C, 0x89408940); + WREG32(0x6578, 0x10802); + WREG32(0x657C, 0xBFE8BED8); + WREG32(0x6578, 0x20000); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20001); + WREG32(0x657C, 0x90008000); + WREG32(0x6578, 0x20002); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20003); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20100); + WREG32(0x657C, 0x80108000); + WREG32(0x6578, 0x20101); + WREG32(0x657C, 0x8FE0BF70); + WREG32(0x6578, 0x20102); + WREG32(0x657C, 0xBFE880C0); + WREG32(0x6578, 0x20103); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20200); + WREG32(0x657C, 0x8018BFF8); + WREG32(0x6578, 0x20201); + WREG32(0x657C, 0x8F80BF08); + WREG32(0x6578, 0x20202); + WREG32(0x657C, 0xBFD081A0); + WREG32(0x6578, 0x20203); + WREG32(0x657C, 0xBFF88000); + WREG32(0x6578, 0x20300); + WREG32(0x657C, 0x80188000); + WREG32(0x6578, 0x20301); + WREG32(0x657C, 0x8EE0BEC0); + WREG32(0x6578, 0x20302); + WREG32(0x657C, 0xBFB082A0); + WREG32(0x6578, 0x20303); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20400); + WREG32(0x657C, 0x80188000); + WREG32(0x6578, 0x20401); + WREG32(0x657C, 0x8E00BEA0); + WREG32(0x6578, 0x20402); + WREG32(0x657C, 0xBF8883C0); + WREG32(0x6578, 0x20403); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x20500); + WREG32(0x657C, 0x80188000); + WREG32(0x6578, 0x20501); + WREG32(0x657C, 0x8D00BE90); + WREG32(0x6578, 0x20502); + WREG32(0x657C, 0xBF588500); + WREG32(0x6578, 0x20503); + WREG32(0x657C, 0x80008008); + WREG32(0x6578, 0x20600); + WREG32(0x657C, 0x80188000); + WREG32(0x6578, 0x20601); + WREG32(0x657C, 0x8BC0BE98); + WREG32(0x6578, 0x20602); + WREG32(0x657C, 0xBF308660); + WREG32(0x6578, 0x20603); + WREG32(0x657C, 0x80008008); + WREG32(0x6578, 0x20700); + WREG32(0x657C, 0x80108000); + WREG32(0x6578, 0x20701); + WREG32(0x657C, 0x8A80BEB0); + WREG32(0x6578, 0x20702); + WREG32(0x657C, 0xBF0087C0); + WREG32(0x6578, 0x20703); + WREG32(0x657C, 0x80008008); + WREG32(0x6578, 0x20800); + WREG32(0x657C, 0x80108000); + WREG32(0x6578, 0x20801); + WREG32(0x657C, 0x8920BED0); + WREG32(0x6578, 0x20802); + WREG32(0x657C, 0xBED08920); + WREG32(0x6578, 0x20803); + WREG32(0x657C, 0x80008010); + WREG32(0x6578, 0x30000); + WREG32(0x657C, 0x90008000); + WREG32(0x6578, 0x30001); + WREG32(0x657C, 0x80008000); + WREG32(0x6578, 0x30100); + WREG32(0x657C, 0x8FE0BF90); + WREG32(0x6578, 0x30101); + WREG32(0x657C, 0xBFF880A0); + WREG32(0x6578, 0x30200); + WREG32(0x657C, 0x8F60BF40); + WREG32(0x6578, 0x30201); + WREG32(0x657C, 0xBFE88180); + WREG32(0x6578, 0x30300); + WREG32(0x657C, 0x8EC0BF00); + WREG32(0x6578, 0x30301); + WREG32(0x657C, 0xBFC88280); + WREG32(0x6578, 0x30400); + WREG32(0x657C, 0x8DE0BEE0); + WREG32(0x6578, 0x30401); + WREG32(0x657C, 0xBFA083A0); + WREG32(0x6578, 0x30500); + WREG32(0x657C, 0x8CE0BED0); + WREG32(0x6578, 0x30501); + WREG32(0x657C, 0xBF7884E0); + WREG32(0x6578, 0x30600); + WREG32(0x657C, 0x8BA0BED8); + WREG32(0x6578, 0x30601); + WREG32(0x657C, 0xBF508640); + WREG32(0x6578, 0x30700); + WREG32(0x657C, 0x8A60BEE8); + WREG32(0x6578, 0x30701); + WREG32(0x657C, 0xBF2087A0); + WREG32(0x6578, 0x30800); + WREG32(0x657C, 0x8900BF00); + WREG32(0x6578, 0x30801); + WREG32(0x657C, 0xBF008900); +} + +struct rv515_watermark { + u32 lb_request_fifo_depth; + fixed20_12 num_line_pair; + fixed20_12 estimated_width; + fixed20_12 worst_case_latency; + fixed20_12 consumption_rate; + fixed20_12 active_time; + fixed20_12 dbpp; + fixed20_12 priority_mark_max; + fixed20_12 priority_mark; + fixed20_12 sclk; +}; + +void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, + struct radeon_crtc *crtc, + struct rv515_watermark *wm) +{ + struct drm_display_mode *mode = &crtc->base.mode; + fixed20_12 a, b, c; + fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; + fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; + + if (!crtc->base.enabled) { + /* FIXME: wouldn't it better to set priority mark to maximum */ + wm->lb_request_fifo_depth = 4; + return; + } + + if (crtc->vsc.full > rfixed_const(2)) + wm->num_line_pair.full = rfixed_const(2); + else + wm->num_line_pair.full = rfixed_const(1); + + b.full = rfixed_const(mode->crtc_hdisplay); + c.full = rfixed_const(256); + a.full = rfixed_mul(wm->num_line_pair, b); + request_fifo_depth.full = rfixed_div(a, c); + if (a.full < rfixed_const(4)) { + wm->lb_request_fifo_depth = 4; + } else { + wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); + } + + /* Determine consumption rate + * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) + * vtaps = number of vertical taps, + * vsc = vertical scaling ratio, defined as source/destination + * hsc = horizontal scaling ration, defined as source/destination + */ + a.full = rfixed_const(mode->clock); + b.full = rfixed_const(1000); + a.full = rfixed_div(a, b); + pclk.full = rfixed_div(b, a); + if (crtc->rmx_type != RMX_OFF) { + b.full = rfixed_const(2); + if (crtc->vsc.full > b.full) + b.full = crtc->vsc.full; + b.full = rfixed_mul(b, crtc->hsc); + c.full = rfixed_const(2); + b.full = rfixed_div(b, c); + consumption_time.full = rfixed_div(pclk, b); + } else { + consumption_time.full = pclk.full; + } + a.full = rfixed_const(1); + wm->consumption_rate.full = rfixed_div(a, consumption_time); + + + /* Determine line time + * LineTime = total time for one line of displayhtotal + * LineTime = total number of horizontal pixels + * pclk = pixel clock period(ns) + */ + a.full = rfixed_const(crtc->base.mode.crtc_htotal); + line_time.full = rfixed_mul(a, pclk); + + /* Determine active time + * ActiveTime = time of active region of display within one line, + * hactive = total number of horizontal active pixels + * htotal = total number of horizontal pixels + */ + a.full = rfixed_const(crtc->base.mode.crtc_htotal); + b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); + wm->active_time.full = rfixed_mul(line_time, b); + wm->active_time.full = rfixed_div(wm->active_time, a); + + /* Determine chunk time + * ChunkTime = the time it takes the DCP to send one chunk of data + * to the LB which consists of pipeline delay and inter chunk gap + * sclk = system clock(Mhz) + */ + a.full = rfixed_const(600 * 1000); + chunk_time.full = rfixed_div(a, rdev->pm.sclk); + read_delay_latency.full = rfixed_const(1000); + + /* Determine the worst case latency + * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) + * WorstCaseLatency = worst case time from urgent to when the MC starts + * to return data + * READ_DELAY_IDLE_MAX = constant of 1us + * ChunkTime = time it takes the DCP to send one chunk of data to the LB + * which consists of pipeline delay and inter chunk gap + */ + if (rfixed_trunc(wm->num_line_pair) > 1) { + a.full = rfixed_const(3); + wm->worst_case_latency.full = rfixed_mul(a, chunk_time); + wm->worst_case_latency.full += read_delay_latency.full; + } else { + wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; + } + + /* Determine the tolerable latency + * TolerableLatency = Any given request has only 1 line time + * for the data to be returned + * LBRequestFifoDepth = Number of chunk requests the LB can + * put into the request FIFO for a display + * LineTime = total time for one line of display + * ChunkTime = the time it takes the DCP to send one chunk + * of data to the LB which consists of + * pipeline delay and inter chunk gap + */ + if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { + tolerable_latency.full = line_time.full; + } else { + tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); + tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; + tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); + tolerable_latency.full = line_time.full - tolerable_latency.full; + } + /* We assume worst case 32bits (4 bytes) */ + wm->dbpp.full = rfixed_const(2 * 16); + + /* Determine the maximum priority mark + * width = viewport width in pixels + */ + a.full = rfixed_const(16); + wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); + wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); + + /* Determine estimated width */ + estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; + estimated_width.full = rfixed_div(estimated_width, consumption_time); + if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { + wm->priority_mark.full = rfixed_const(10); + } else { + a.full = rfixed_const(16); + wm->priority_mark.full = rfixed_div(estimated_width, a); + wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; + } +} + +void rv515_bandwidth_avivo_update(struct radeon_device *rdev) +{ + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + struct rv515_watermark wm0; + struct rv515_watermark wm1; + u32 tmp; + fixed20_12 priority_mark02, priority_mark12, fill_rate; + fixed20_12 a, b; + + if (rdev->mode_info.crtcs[0]->base.enabled) + mode0 = &rdev->mode_info.crtcs[0]->base.mode; + if (rdev->mode_info.crtcs[1]->base.enabled) + mode1 = &rdev->mode_info.crtcs[1]->base.mode; + rs690_line_buffer_adjust(rdev, mode0, mode1); + + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); + rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); + + tmp = wm0.lb_request_fifo_depth; + tmp |= wm1.lb_request_fifo_depth << 16; + WREG32(LB_MAX_REQ_OUTSTANDING, tmp); + + if (mode0 && mode1) { + if (rfixed_trunc(wm0.dbpp) > 64) + a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; + if (rfixed_trunc(wm1.dbpp) > 64) + b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); + else + b.full = wm1.num_line_pair.full; + a.full += b.full; + fill_rate.full = rfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm0.active_time); + a.full = rfixed_const(16); + b.full = rfixed_div(b, a); + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + priority_mark02.full = a.full + b.full; + } else { + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark02.full = rfixed_div(a, b); + } + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm1.active_time); + a.full = rfixed_const(16); + b.full = rfixed_div(b, a); + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + priority_mark12.full = a.full + b.full; + } else { + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; + if (rfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; + if (rfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + } else if (mode0) { + if (rfixed_trunc(wm0.dbpp) > 64) + a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); + else + a.full = wm0.num_line_pair.full; + fill_rate.full = rfixed_div(wm0.sclk, a); + if (wm0.consumption_rate.full > fill_rate.full) { + b.full = wm0.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm0.active_time); + a.full = rfixed_const(16); + b.full = rfixed_div(b, a); + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + priority_mark02.full = a.full + b.full; + } else { + a.full = rfixed_mul(wm0.worst_case_latency, + wm0.consumption_rate); + b.full = rfixed_const(16); + priority_mark02.full = rfixed_div(a, b); + } + if (wm0.priority_mark.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark.full; + if (rfixed_trunc(priority_mark02) < 0) + priority_mark02.full = 0; + if (wm0.priority_mark_max.full > priority_mark02.full) + priority_mark02.full = wm0.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); + WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); + } else { + if (rfixed_trunc(wm1.dbpp) > 64) + a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); + else + a.full = wm1.num_line_pair.full; + fill_rate.full = rfixed_div(wm1.sclk, a); + if (wm1.consumption_rate.full > fill_rate.full) { + b.full = wm1.consumption_rate.full - fill_rate.full; + b.full = rfixed_mul(b, wm1.active_time); + a.full = rfixed_const(16); + b.full = rfixed_div(b, a); + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + priority_mark12.full = a.full + b.full; + } else { + a.full = rfixed_mul(wm1.worst_case_latency, + wm1.consumption_rate); + b.full = rfixed_const(16 * 1000); + priority_mark12.full = rfixed_div(a, b); + } + if (wm1.priority_mark.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark.full; + if (rfixed_trunc(priority_mark12) < 0) + priority_mark12.full = 0; + if (wm1.priority_mark_max.full > priority_mark12.full) + priority_mark12.full = wm1.priority_mark_max.full; + WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); + WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); + WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + } +} + +void rv515_bandwidth_update(struct radeon_device *rdev) +{ + uint32_t tmp; + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + + if (rdev->mode_info.crtcs[0]->base.enabled) + mode0 = &rdev->mode_info.crtcs[0]->base.mode; + if (rdev->mode_info.crtcs[1]->base.enabled) + mode1 = &rdev->mode_info.crtcs[1]->base.mode; + /* + * Set display0/1 priority up in the memory controller for + * modes if the user specifies HIGH for displaypriority + * option. + */ + if (rdev->disp_priority == 2) { + tmp = RREG32_MC(MC_MISC_LAT_TIMER); + tmp &= ~MC_DISP1R_INIT_LAT_MASK; + tmp &= ~MC_DISP0R_INIT_LAT_MASK; + if (mode1) + tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); + if (mode0) + tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); + WREG32_MC(MC_MISC_LAT_TIMER, tmp); + } + rv515_bandwidth_avivo_update(rdev); +} diff --git a/drivers/gpu/drm/radeon/rv515r.h b/drivers/gpu/drm/radeon/rv515r.h new file mode 100644 index 00000000000..f3cf8403990 --- /dev/null +++ b/drivers/gpu/drm/radeon/rv515r.h @@ -0,0 +1,170 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef RV515R_H +#define RV515R_H + +/* RV515 registers */ +#define PCIE_INDEX 0x0030 +#define PCIE_DATA 0x0034 +#define MC_IND_INDEX 0x0070 +#define MC_IND_WR_EN (1 << 24) +#define MC_IND_DATA 0x0074 +#define RBBM_SOFT_RESET 0x00F0 +#define CONFIG_MEMSIZE 0x00F8 +#define HDP_FB_LOCATION 0x0134 +#define CP_CSQ_CNTL 0x0740 +#define CP_CSQ_MODE 0x0744 +#define CP_CSQ_ADDR 0x07F0 +#define CP_CSQ_DATA 0x07F4 +#define CP_CSQ_STAT 0x07F8 +#define CP_CSQ2_STAT 0x07FC +#define RBBM_STATUS 0x0E40 +#define DST_PIPE_CONFIG 0x170C +#define WAIT_UNTIL 0x1720 +#define WAIT_2D_IDLE (1 << 14) +#define WAIT_3D_IDLE (1 << 15) +#define WAIT_2D_IDLECLEAN (1 << 16) +#define WAIT_3D_IDLECLEAN (1 << 17) +#define ISYNC_CNTL 0x1724 +#define ISYNC_ANY2D_IDLE3D (1 << 0) +#define ISYNC_ANY3D_IDLE2D (1 << 1) +#define ISYNC_TRIG2D_IDLE3D (1 << 2) +#define ISYNC_TRIG3D_IDLE2D (1 << 3) +#define ISYNC_WAIT_IDLEGUI (1 << 4) +#define ISYNC_CPSCRATCH_IDLEGUI (1 << 5) +#define VAP_INDEX_OFFSET 0x208C +#define VAP_PVS_STATE_FLUSH_REG 0x2284 +#define GB_ENABLE 0x4008 +#define GB_MSPOS0 0x4010 +#define MS_X0_SHIFT 0 +#define MS_Y0_SHIFT 4 +#define MS_X1_SHIFT 8 +#define MS_Y1_SHIFT 12 +#define MS_X2_SHIFT 16 +#define MS_Y2_SHIFT 20 +#define MSBD0_Y_SHIFT 24 +#define MSBD0_X_SHIFT 28 +#define GB_MSPOS1 0x4014 +#define MS_X3_SHIFT 0 +#define MS_Y3_SHIFT 4 +#define MS_X4_SHIFT 8 +#define MS_Y4_SHIFT 12 +#define MS_X5_SHIFT 16 +#define MS_Y5_SHIFT 20 +#define MSBD1_SHIFT 24 +#define GB_TILE_CONFIG 0x4018 +#define ENABLE_TILING (1 << 0) +#define PIPE_COUNT_MASK 0x0000000E +#define PIPE_COUNT_SHIFT 1 +#define TILE_SIZE_8 (0 << 4) +#define TILE_SIZE_16 (1 << 4) +#define TILE_SIZE_32 (2 << 4) +#define SUBPIXEL_1_12 (0 << 16) +#define SUBPIXEL_1_16 (1 << 16) +#define GB_SELECT 0x401C +#define GB_AA_CONFIG 0x4020 +#define GB_PIPE_SELECT 0x402C +#define GA_ENHANCE 0x4274 +#define GA_DEADLOCK_CNTL (1 << 0) +#define GA_FASTSYNC_CNTL (1 << 1) +#define GA_POLY_MODE 0x4288 +#define FRONT_PTYPE_POINT (0 << 4) +#define FRONT_PTYPE_LINE (1 << 4) +#define FRONT_PTYPE_TRIANGE (2 << 4) +#define BACK_PTYPE_POINT (0 << 7) +#define BACK_PTYPE_LINE (1 << 7) +#define BACK_PTYPE_TRIANGE (2 << 7) +#define GA_ROUND_MODE 0x428C +#define GEOMETRY_ROUND_TRUNC (0 << 0) +#define GEOMETRY_ROUND_NEAREST (1 << 0) +#define COLOR_ROUND_TRUNC (0 << 2) +#define COLOR_ROUND_NEAREST (1 << 2) +#define SU_REG_DEST 0x42C8 +#define RB3D_DSTCACHE_CTLSTAT 0x4E4C +#define RB3D_DC_FLUSH (2 << 0) +#define RB3D_DC_FREE (2 << 2) +#define RB3D_DC_FINISH (1 << 4) +#define ZB_ZCACHE_CTLSTAT 0x4F18 +#define ZC_FLUSH (1 << 0) +#define ZC_FREE (1 << 1) +#define DC_LB_MEMORY_SPLIT 0x6520 +#define DC_LB_MEMORY_SPLIT_MASK 0x00000003 +#define DC_LB_MEMORY_SPLIT_SHIFT 0 +#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 +#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 +#define DC_LB_MEMORY_SPLIT_D1_ONLY 2 +#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 +#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) +#define DC_LB_DISP1_END_ADR_SHIFT 4 +#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 +#define D1MODE_PRIORITY_A_CNT 0x6548 +#define MODE_PRIORITY_MARK_MASK 0x00007FFF +#define MODE_PRIORITY_OFF (1 << 16) +#define MODE_PRIORITY_ALWAYS_ON (1 << 20) +#define MODE_PRIORITY_FORCE_MASK (1 << 24) +#define D1MODE_PRIORITY_B_CNT 0x654C +#define LB_MAX_REQ_OUTSTANDING 0x6D58 +#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F +#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 +#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 +#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 +#define D2MODE_PRIORITY_A_CNT 0x6D48 +#define D2MODE_PRIORITY_B_CNT 0x6D4C + +/* ix[MC] registers */ +#define MC_FB_LOCATION 0x01 +#define MC_FB_START_MASK 0x0000FFFF +#define MC_FB_START_SHIFT 0 +#define MC_FB_TOP_MASK 0xFFFF0000 +#define MC_FB_TOP_SHIFT 16 +#define MC_AGP_LOCATION 0x02 +#define MC_AGP_START_MASK 0x0000FFFF +#define MC_AGP_START_SHIFT 0 +#define MC_AGP_TOP_MASK 0xFFFF0000 +#define MC_AGP_TOP_SHIFT 16 +#define MC_AGP_BASE 0x03 +#define MC_AGP_BASE_2 0x04 +#define MC_CNTL 0x5 +#define MEM_NUM_CHANNELS_MASK 0x00000003 +#define MC_STATUS 0x08 +#define MC_STATUS_IDLE (1 << 4) +#define MC_MISC_LAT_TIMER 0x09 +#define MC_CPR_INIT_LAT_MASK 0x0000000F +#define MC_VF_INIT_LAT_MASK 0x000000F0 +#define MC_DISP0R_INIT_LAT_MASK 0x00000F00 +#define MC_DISP0R_INIT_LAT_SHIFT 8 +#define MC_DISP1R_INIT_LAT_MASK 0x0000F000 +#define MC_DISP1R_INIT_LAT_SHIFT 12 +#define MC_FIXED_INIT_LAT_MASK 0x000F0000 +#define MC_E2R_INIT_LAT_MASK 0x00F00000 +#define SAME_PAGE_PRIO_MASK 0x0F000000 +#define MC_GLOBW_INIT_LAT_MASK 0xF0000000 + + +#endif + diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index da50cc51ede..21d8ffd5730 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -67,7 +67,7 @@ int rv770_mc_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } - tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24); tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24); WREG32(R700_MC_VM_FB_LOCATION, tmp); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c1c407f7cca..c2b0d710d10 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -43,7 +43,6 @@ #define TTM_BO_HASH_ORDER 13 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); -static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); static inline uint32_t ttm_bo_type_flags(unsigned type) @@ -224,6 +223,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) TTM_ASSERT_LOCKED(&bo->mutex); bo->ttm = NULL; + if (bdev->need_dma32) + page_flags |= TTM_PAGE_FLAG_DMA32; + switch (bo->type) { case ttm_bo_type_device: if (zero_alloc) @@ -304,6 +306,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } + if (bdev->driver->move_notify) + bdev->driver->move_notify(bo, mem); + if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); @@ -655,31 +660,52 @@ retry_pre_get: return 0; } +static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, + uint32_t cur_placement, + uint32_t proposed_placement) +{ + uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; + uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; + + /** + * Keep current caching if possible. + */ + + if ((cur_placement & caching) != 0) + result |= (cur_placement & caching); + else if ((man->default_caching & caching) != 0) + result |= man->default_caching; + else if ((TTM_PL_FLAG_CACHED & caching) != 0) + result |= TTM_PL_FLAG_CACHED; + else if ((TTM_PL_FLAG_WC & caching) != 0) + result |= TTM_PL_FLAG_WC; + else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) + result |= TTM_PL_FLAG_UNCACHED; + + return result; +} + + static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, bool disallow_fixed, uint32_t mem_type, - uint32_t mask, uint32_t *res_mask) + uint32_t proposed_placement, + uint32_t *masked_placement) { uint32_t cur_flags = ttm_bo_type_flags(mem_type); if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) return false; - if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) + if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) return false; - if ((mask & man->available_caching) == 0) + if ((proposed_placement & man->available_caching) == 0) return false; - if (mask & man->default_caching) - cur_flags |= man->default_caching; - else if (mask & TTM_PL_FLAG_CACHED) - cur_flags |= TTM_PL_FLAG_CACHED; - else if (mask & TTM_PL_FLAG_WC) - cur_flags |= TTM_PL_FLAG_WC; - else - cur_flags |= TTM_PL_FLAG_UNCACHED; - *res_mask = cur_flags; + cur_flags |= (proposed_placement & man->available_caching); + + *masked_placement = cur_flags; return true; } @@ -723,6 +749,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (!type_ok) continue; + cur_flags = ttm_bo_select_caching(man, bo->mem.placement, + cur_flags); + if (mem_type == TTM_PL_SYSTEM) break; @@ -779,6 +808,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, proposed_placement, &cur_flags)) continue; + cur_flags = ttm_bo_select_caching(man, bo->mem.placement, + cur_flags); + ret = ttm_bo_mem_force_space(bdev, mem, mem_type, interruptible, no_wait); @@ -1150,13 +1182,14 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { - struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + struct ttm_mem_type_manager *man; int ret = -EINVAL; if (mem_type >= TTM_NUM_MEM_TYPES) { printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); return ret; } + man = &bdev->man[mem_type]; if (!man->has_type) { printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " @@ -1305,7 +1338,8 @@ EXPORT_SYMBOL(ttm_bo_device_release); int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_mem_global *mem_glob, - struct ttm_bo_driver *driver, uint64_t file_page_offset) + struct ttm_bo_driver *driver, uint64_t file_page_offset, + bool need_dma32) { int ret = -EINVAL; @@ -1342,6 +1376,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bdev->ddestroy); INIT_LIST_HEAD(&bdev->swap_lru); bdev->dev_mapping = NULL; + bdev->need_dma32 = need_dma32; ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); if (unlikely(ret != 0)) { @@ -1419,6 +1454,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); } +EXPORT_SYMBOL(ttm_bo_unmap_virtual); static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) { @@ -1540,6 +1576,10 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, driver->sync_obj_unref(&sync_obj); driver->sync_obj_unref(&tmp_obj); spin_lock(&bo->lock); + } else { + spin_unlock(&bo->lock); + driver->sync_obj_unref(&sync_obj); + spin_lock(&bo->lock); } } return 0; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 517c8455963..ad4ada07c6c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -34,7 +34,6 @@ #include <linux/highmem.h> #include <linux/wait.h> #include <linux/vmalloc.h> -#include <linux/version.h> #include <linux/module.h> void ttm_bo_free_old_node(struct ttm_buffer_object *bo) @@ -137,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page) } static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, - unsigned long page) + unsigned long page, + pgprot_t prot) { struct page *d = ttm_tt_get_page(ttm, page); void *dst; @@ -146,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, return -ENOMEM; src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); - dst = kmap(d); + +#ifdef CONFIG_X86 + dst = kmap_atomic_prot(d, KM_USER0, prot); +#else + if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) + dst = vmap(&d, 1, 0, prot); + else + dst = kmap(d); +#endif if (!dst) return -ENOMEM; memcpy_fromio(dst, src, PAGE_SIZE); - kunmap(d); + +#ifdef CONFIG_X86 + kunmap_atomic(dst, KM_USER0); +#else + if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) + vunmap(dst); + else + kunmap(d); +#endif + return 0; } static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, - unsigned long page) + unsigned long page, + pgprot_t prot) { struct page *s = ttm_tt_get_page(ttm, page); void *src; @@ -165,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, return -ENOMEM; dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); - src = kmap(s); +#ifdef CONFIG_X86 + src = kmap_atomic_prot(s, KM_USER0, prot); +#else + if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) + src = vmap(&s, 1, 0, prot); + else + src = kmap(s); +#endif if (!src) return -ENOMEM; memcpy_toio(dst, src, PAGE_SIZE); - kunmap(s); + +#ifdef CONFIG_X86 + kunmap_atomic(src, KM_USER0); +#else + if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) + vunmap(src); + else + kunmap(s); +#endif + return 0; } @@ -215,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, for (i = 0; i < new_mem->num_pages; ++i) { page = i * dir + add; - if (old_iomap == NULL) - ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); - else if (new_iomap == NULL) - ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); - else + if (old_iomap == NULL) { + pgprot_t prot = ttm_io_prot(old_mem->placement, + PAGE_KERNEL); + ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, + prot); + } else if (new_iomap == NULL) { + pgprot_t prot = ttm_io_prot(new_mem->placement, + PAGE_KERNEL); + ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, + prot); + } else ret = ttm_copy_io_page(new_iomap, old_iomap, page); if (ret) goto out1; @@ -510,8 +550,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, if (evict) { ret = ttm_bo_wait(bo, false, false, false); spin_unlock(&bo->lock); - driver->sync_obj_unref(&bo->sync_obj); - + if (tmp_obj) + driver->sync_obj_unref(&tmp_obj); if (ret) return ret; @@ -533,6 +573,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); spin_unlock(&bo->lock); + if (tmp_obj) + driver->sync_obj_unref(&tmp_obj); ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 27b146c54fb..33de7637c0c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -32,7 +32,6 @@ #include <ttm/ttm_bo_driver.h> #include <ttm/ttm_placement.h> #include <linux/mm.h> -#include <linux/version.h> #include <linux/rbtree.h> #include <linux/module.h> #include <linux/uaccess.h> @@ -102,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_NOPAGE; } + if (bdev->driver->fault_reserve_notify) + bdev->driver->fault_reserve_notify(bo); + /* * Wait for buffer data in transit, due to a pipelined * move. @@ -328,7 +330,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, goto out_unref; kmap_offset = dev_offset - bo->vm_node->start; - if (unlikely(kmap_offset) >= bo->num_pages) { + if (unlikely(kmap_offset >= bo->num_pages)) { ret = -EFBIG; goto out_unref; } @@ -402,7 +404,7 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, bool dummy; kmap_offset = (*f_pos >> PAGE_SHIFT); - if (unlikely(kmap_offset) >= bo->num_pages) + if (unlikely(kmap_offset >= bo->num_pages)) return -EFBIG; page_offset = *f_pos & ~PAGE_MASK; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 0331fa74cd3..b8b6c4a5f98 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -28,7 +28,6 @@ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ -#include <linux/version.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/highmem.h> @@ -87,10 +86,16 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) unsigned long i; for (i = 0; i < num_pages; ++i) { - if (pages[i]) { - unsigned long start = (unsigned long)page_address(pages[i]); - flush_dcache_range(start, start + PAGE_SIZE); - } + struct page *page = pages[i]; + void *page_virtual; + + if (unlikely(page == NULL)) + continue; + + page_virtual = kmap_atomic(page, KM_USER0); + flush_dcache_range((unsigned long) page_virtual, + (unsigned long) page_virtual + PAGE_SIZE); + kunmap_atomic(page_virtual, KM_USER0); } #else if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) @@ -132,10 +137,17 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm) static struct page *ttm_tt_alloc_page(unsigned page_flags) { + gfp_t gfp_flags = GFP_USER; + if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) - return alloc_page(GFP_HIGHUSER | __GFP_ZERO); + gfp_flags |= __GFP_ZERO; + + if (page_flags & TTM_PAGE_FLAG_DMA32) + gfp_flags |= __GFP_DMA32; + else + gfp_flags |= __GFP_HIGHMEM; - return alloc_page(GFP_HIGHUSER); + return alloc_page(gfp_flags); } static void ttm_tt_free_user_pages(struct ttm_tt *ttm) diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c index c248c1d3726..5935b8842e8 100644 --- a/drivers/gpu/drm/via/via_irq.c +++ b/drivers/gpu/drm/via/via_irq.c @@ -183,7 +183,7 @@ int via_enable_vblank(struct drm_device *dev, int crtc) } status = VIA_READ(VIA_REG_INTERRUPT); - VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE); + VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE); VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); @@ -194,6 +194,10 @@ int via_enable_vblank(struct drm_device *dev, int crtc) void via_disable_vblank(struct drm_device *dev, int crtc) { drm_via_private_t *dev_priv = dev->dev_private; + u32 status; + + status = VIA_READ(VIA_REG_INTERRUPT); + VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE); VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); |