summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig17
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_auth.c29
-rw-r--r--drivers/gpu/drm/drm_bufs.c27
-rw-r--r--drivers/gpu/drm/drm_context.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c2446
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c826
-rw-r--r--drivers/gpu/drm/drm_drv.c97
-rw-r--r--drivers/gpu/drm/drm_edid.c732
-rw-r--r--drivers/gpu/drm/drm_fops.c227
-rw-r--r--drivers/gpu/drm/drm_gem.c111
-rw-r--r--drivers/gpu/drm/drm_hashtab.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c61
-rw-r--r--drivers/gpu/drm/drm_irq.c77
-rw-r--r--drivers/gpu/drm/drm_lock.c42
-rw-r--r--drivers/gpu/drm/drm_mm.c1
-rw-r--r--drivers/gpu/drm/drm_modes.c576
-rw-r--r--drivers/gpu/drm/drm_proc.c71
-rw-r--r--drivers/gpu/drm/drm_stub.c142
-rw-r--r--drivers/gpu/drm/drm_sysfs.c329
-rw-r--r--drivers/gpu/drm/drm_vm.c7
-rw-r--r--drivers/gpu/drm/i915/Makefile17
-rw-r--r--drivers/gpu/drm/i915/dvo.h157
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c454
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c368
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c442
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c302
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c335
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c360
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c42
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h121
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1340
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c338
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c193
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h405
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c284
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1618
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h146
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c495
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c925
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c184
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c525
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c83
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1128
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h327
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c1725
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c8
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c5
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c6
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h1
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c2
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c166
-rw-r--r--drivers/gpu/drm/via/via_irq.c1
-rw-r--r--drivers/gpu/drm/via/via_map.c11
65 files changed, 17670 insertions, 896 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index a8b33c2ec8d..5130b72d593 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -7,6 +7,8 @@
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
+ select I2C
+ select I2C_ALGOBIT
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@@ -65,6 +67,10 @@ config DRM_I830
will load the correct one.
config DRM_I915
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ depends on FB
tristate "i915 driver"
help
Choose this option if you have a system that has Intel 830M, 845G,
@@ -76,6 +82,17 @@ config DRM_I915
endchoice
+config DRM_I915_KMS
+ bool "Enable modesetting on intel by default"
+ depends on DRM_I915
+ help
+ Choose this option if you want kernel modesetting enabled by default,
+ and you have a new enough userspace to support this. Running old
+ userspaces with this enabled will cause pain. Note that this causes
+ the driver to bind to PCI devices, which precludes loading things
+ like intelfb.
+
+
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 74da99495e2..30022c4a5c1 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,7 +9,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
- drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
+ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+ drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index a73462723d2..ca7a9ef5007 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -45,14 +45,15 @@
* the one with matching magic number, while holding the drm_device::struct_mutex
* lock.
*/
-static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
+static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
{
struct drm_file *retval = NULL;
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
+ struct drm_device *dev = master->minor->dev;
mutex_lock(&dev->struct_mutex);
- if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
+ if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
retval = pt->priv;
}
@@ -71,11 +72,11 @@ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic
* associated the magic number hash key in drm_device::magiclist, while holding
* the drm_device::struct_mutex lock.
*/
-static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
+static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
drm_magic_t magic)
{
struct drm_magic_entry *entry;
-
+ struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
@@ -83,11 +84,10 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
return -ENOMEM;
memset(entry, 0, sizeof(*entry));
entry->priv = priv;
-
entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex);
- drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
- list_add_tail(&entry->head, &dev->magicfree);
+ drm_ht_insert_item(&master->magiclist, &entry->hash_item);
+ list_add_tail(&entry->head, &master->magicfree);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -102,20 +102,21 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
* Searches and unlinks the entry in drm_device::magiclist with the magic
* number hash key, while holding the drm_device::struct_mutex lock.
*/
-static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
+static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
{
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
+ struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
mutex_lock(&dev->struct_mutex);
- if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
+ if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
- drm_ht_remove_item(&dev->magiclist, hash);
+ drm_ht_remove_item(&master->magiclist, hash);
list_del(&pt->head);
mutex_unlock(&dev->struct_mutex);
@@ -153,9 +154,9 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
++sequence; /* reserve 0 */
auth->magic = sequence++;
spin_unlock(&lock);
- } while (drm_find_file(dev, auth->magic));
+ } while (drm_find_file(file_priv->master, auth->magic));
file_priv->magic = auth->magic;
- drm_add_magic(dev, file_priv, auth->magic);
+ drm_add_magic(file_priv->master, file_priv, auth->magic);
}
DRM_DEBUG("%u\n", auth->magic);
@@ -181,9 +182,9 @@ int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file;
DRM_DEBUG("%u\n", auth->magic);
- if ((file = drm_find_file(dev, auth->magic))) {
+ if ((file = drm_find_file(file_priv->master, auth->magic))) {
file->authenticated = 1;
- drm_remove_magic(dev, auth->magic);
+ drm_remove_magic(file_priv->master, auth->magic);
return 0;
}
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index bde64b84166..72c667f9bee 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -54,9 +54,9 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
{
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
- if (entry->map && map->type == entry->map->type &&
+ if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) &&
((entry->map->offset == map->offset) ||
- (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
+ ((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
return entry;
}
}
@@ -210,12 +210,12 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
map->offset = (unsigned long)map->handle;
if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */
- if (dev->lock.hw_lock != NULL) {
+ if (dev->primary->master->lock.hw_lock != NULL) {
vfree(map->handle);
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EBUSY;
}
- dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
+ dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
}
break;
case _DRM_AGP: {
@@ -262,6 +262,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
break;
+ case _DRM_GEM:
+ DRM_ERROR("tried to rmmap GEM object\n");
+ break;
}
case _DRM_SCATTER_GATHER:
if (!dev->sg) {
@@ -319,6 +322,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
list->user_token = list->hash.key << PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex);
+ list->master = dev->primary->master;
*maplist = list;
return 0;
}
@@ -345,7 +349,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_map_list *maplist;
int err;
- if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
+ if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
return -EPERM;
err = drm_addmap_core(dev, map->offset, map->size, map->type,
@@ -380,10 +384,12 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
struct drm_map_list *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
int found = 0;
+ struct drm_master *master;
/* Find the list entry for the map and remove it */
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
+ master = r_list->master;
list_del(&r_list->head);
drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
@@ -409,6 +415,13 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
break;
case _DRM_SHM:
vfree(map->handle);
+ if (master) {
+ if (dev->sigdata.lock == master->lock.hw_lock)
+ dev->sigdata.lock = NULL;
+ master->lock.hw_lock = NULL; /* SHM removed */
+ master->lock.file_priv = NULL;
+ wake_up_interruptible(&master->lock.lock_queue);
+ }
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:
@@ -419,11 +432,15 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
+ case _DRM_GEM:
+ DRM_ERROR("tried to rmmap GEM object\n");
+ break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return 0;
}
+EXPORT_SYMBOL(drm_rmmap_locked);
int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
{
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index d505f695421..809ec0f0345 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -256,12 +256,13 @@ static int drm_context_switch(struct drm_device * dev, int old, int new)
* hardware lock is held, clears the drm_device::context_flag and wakes up
* drm_device::context_wait.
*/
-static int drm_context_switch_complete(struct drm_device * dev, int new)
+static int drm_context_switch_complete(struct drm_device *dev,
+ struct drm_file *file_priv, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
@@ -420,7 +421,7 @@ int drm_newctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
- drm_context_switch_complete(dev, ctx->handle);
+ drm_context_switch_complete(dev, file_priv, ctx->handle);
return 0;
}
@@ -442,9 +443,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
- file_priv->remove_auth_on_close = 1;
- }
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, ctx->handle);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
new file mode 100644
index 00000000000..53c87254be4
--- /dev/null
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -0,0 +1,2446 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include <linux/list.h>
+#include "drm.h"
+#include "drmP.h"
+#include "drm_crtc.h"
+
+struct drm_prop_enum_list {
+ int type;
+ char *name;
+};
+
+/* Avoid boilerplate. I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list) \
+ char *fnname(int val) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(list); i++) { \
+ if (list[i].type == val) \
+ return list[i].name; \
+ } \
+ return "(unknown)"; \
+ }
+
+/*
+ * Global properties
+ */
+static struct drm_prop_enum_list drm_dpms_enum_list[] =
+{ { DRM_MODE_DPMS_ON, "On" },
+ { DRM_MODE_DPMS_STANDBY, "Standby" },
+ { DRM_MODE_DPMS_SUSPEND, "Suspend" },
+ { DRM_MODE_DPMS_OFF, "Off" }
+};
+
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/*
+ * Optional properties
+ */
+static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+{
+ { DRM_MODE_SCALE_NON_GPU, "Non-GPU" },
+ { DRM_MODE_SCALE_FULLSCREEN, "Fullscreen" },
+ { DRM_MODE_SCALE_NO_SCALE, "No scale" },
+ { DRM_MODE_SCALE_ASPECT, "Aspect" },
+};
+
+static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+{
+ { DRM_MODE_DITHERING_OFF, "Off" },
+ { DRM_MODE_DITHERING_ON, "On" },
+};
+
+/*
+ * Non-global properties, but "required" for certain connectors.
+ */
+static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+ drm_dvi_i_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_tv_select_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+{
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+ drm_tv_subconnector_enum_list)
+
+struct drm_conn_prop_enum_list {
+ int type;
+ char *name;
+ int count;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
+ { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
+ { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
+ { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
+ { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
+ { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
+ { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
+ { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
+ { DRM_MODE_CONNECTOR_Component, "Component", 0 },
+ { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
+};
+
+static struct drm_prop_enum_list drm_encoder_enum_list[] =
+{ { DRM_MODE_ENCODER_NONE, "None" },
+ { DRM_MODE_ENCODER_DAC, "DAC" },
+ { DRM_MODE_ENCODER_TMDS, "TMDS" },
+ { DRM_MODE_ENCODER_LVDS, "LVDS" },
+ { DRM_MODE_ENCODER_TVDAC, "TV" },
+};
+
+char *drm_get_encoder_name(struct drm_encoder *encoder)
+{
+ static char buf[32];
+
+ snprintf(buf, 32, "%s-%d",
+ drm_encoder_enum_list[encoder->encoder_type].name,
+ encoder->base.id);
+ return buf;
+}
+
+char *drm_get_connector_name(struct drm_connector *connector)
+{
+ static char buf[32];
+
+ snprintf(buf, 32, "%s-%d",
+ drm_connector_enum_list[connector->connector_type].name,
+ connector->connector_type_id);
+ return buf;
+}
+EXPORT_SYMBOL(drm_get_connector_name);
+
+char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+ if (status == connector_status_connected)
+ return "connected";
+ else if (status == connector_status_disconnected)
+ return "disconnected";
+ else
+ return "unknown";
+}
+
+/**
+ * drm_mode_object_get - allocate a new identifier
+ * @dev: DRM device
+ * @ptr: object pointer, used to generate unique ID
+ * @type: object type
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space. Used
+ * for tracking modes, CRTCs and connectors.
+ *
+ * RETURNS:
+ * New unique (relative to other objects in @dev) integer identifier for the
+ * object.
+ */
+static int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type)
+{
+ int new_id = 0;
+ int ret;
+
+ WARN(!mutex_is_locked(&dev->mode_config.mutex),
+ "%s called w/o mode_config lock\n", __FUNCTION__);
+again:
+ if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("Ran out memory getting a mode number\n");
+ return -EINVAL;
+ }
+
+ ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
+ if (ret == -EAGAIN)
+ goto again;
+
+ obj->id = new_id;
+ obj->type = obj_type;
+ return 0;
+}
+
+/**
+ * drm_mode_object_put - free an identifer
+ * @dev: DRM device
+ * @id: ID to free
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Free @id from @dev's unique identifier pool.
+ */
+static void drm_mode_object_put(struct drm_device *dev,
+ struct drm_mode_object *object)
+{
+ idr_remove(&dev->mode_config.crtc_idr, object->id);
+}
+
+void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
+{
+ struct drm_mode_object *obj;
+
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (!obj || (obj->type != type) || (obj->id != id))
+ return NULL;
+
+ return obj;
+}
+EXPORT_SYMBOL(drm_mode_object_find);
+
+/**
+ * drm_crtc_from_fb - find the CRTC structure associated with an fb
+ * @dev: DRM device
+ * @fb: framebuffer in question
+ *
+ * LOCKING:
+ * Caller must hold mode_config lock.
+ *
+ * Find CRTC in the mode_config structure that matches @fb.
+ *
+ * RETURNS:
+ * Pointer to the CRTC or NULL if it wasn't found.
+ */
+struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev,
+ struct drm_framebuffer *fb)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb == fb)
+ return crtc;
+ }
+ return NULL;
+}
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * RETURNS:
+ * Zero on success, error code on falure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ int ret;
+
+ ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+ if (ret) {
+ return ret;
+ }
+
+ fb->dev = dev;
+ fb->funcs = funcs;
+ dev->mode_config.num_fb++;
+ list_add(&fb->head, &dev->mode_config.fb_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_framebuffer_init);
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
+ * it, setting it to NULL.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_crtc *crtc;
+
+ /* remove from any CRTC */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->fb == fb)
+ crtc->fb = NULL;
+ }
+
+ drm_mode_object_put(dev, &fb->base);
+ list_del(&fb->head);
+ dev->mode_config.num_fb--;
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_crtc_init - Initialise a new CRTC object
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Inits a new object created as base part of an driver crtc object.
+ */
+void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs)
+{
+ crtc->dev = dev;
+ crtc->funcs = funcs;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+
+ list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
+ dev->mode_config.num_crtc++;
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_crtc_init);
+
+/**
+ * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * @crtc: CRTC to cleanup
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Cleanup @crtc. Removes from drm modesetting space
+ * does NOT free object, caller does that.
+ */
+void drm_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+
+ if (crtc->gamma_store) {
+ kfree(crtc->gamma_store);
+ crtc->gamma_store = NULL;
+ }
+
+ drm_mode_object_put(dev, &crtc->base);
+ list_del(&crtc->head);
+ dev->mode_config.num_crtc--;
+}
+EXPORT_SYMBOL(drm_crtc_cleanup);
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Add @mode to @connector's mode list for later use.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ list_add(&mode->head, &connector->probed_modes);
+}
+EXPORT_SYMBOL(drm_mode_probed_add);
+
+/**
+ * drm_mode_remove - remove and free a mode
+ * @connector: connector list to modify
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Remove @mode from @connector's mode list, then free it.
+ */
+void drm_mode_remove(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ list_del(&mode->head);
+ kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_remove);
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @name: user visible name of the connector
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ */
+void drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type)
+{
+ mutex_lock(&dev->mode_config.mutex);
+
+ connector->dev = dev;
+ connector->funcs = funcs;
+ drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+ connector->connector_type = connector_type;
+ connector->connector_type_id =
+ ++drm_connector_enum_list[connector_type].count; /* TODO */
+ INIT_LIST_HEAD(&connector->user_modes);
+ INIT_LIST_HEAD(&connector->probed_modes);
+ INIT_LIST_HEAD(&connector->modes);
+ connector->edid_blob_ptr = NULL;
+
+ list_add_tail(&connector->head, &dev->mode_config.connector_list);
+ dev->mode_config.num_connector++;
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.edid_property, 0);
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.dpms_property, 0);
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_connector_init);
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *t;
+
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+ drm_mode_remove(connector, mode);
+
+ list_for_each_entry_safe(mode, t, &connector->modes, head)
+ drm_mode_remove(connector, mode);
+
+ list_for_each_entry_safe(mode, t, &connector->user_modes, head)
+ drm_mode_remove(connector, mode);
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_mode_object_put(dev, &connector->base);
+ list_del(&connector->head);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cleanup);
+
+void drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type)
+{
+ mutex_lock(&dev->mode_config.mutex);
+
+ encoder->dev = dev;
+
+ drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+ encoder->encoder_type = encoder_type;
+ encoder->funcs = funcs;
+
+ list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+ dev->mode_config.num_encoder++;
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_encoder_init);
+
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ mutex_lock(&dev->mode_config.mutex);
+ drm_mode_object_put(dev, &encoder->base);
+ list_del(&encoder->head);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_encoder_cleanup);
+
+/**
+ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Create a new drm_display_mode, give it an ID, and return it.
+ *
+ * RETURNS:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+ struct drm_display_mode *nmode;
+
+ nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+ if (!nmode)
+ return NULL;
+
+ drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
+ return nmode;
+}
+EXPORT_SYMBOL(drm_mode_create);
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free @mode's unique identifier, then free it.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ drm_mode_object_put(dev, &mode->base);
+
+ kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_destroy);
+
+static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+{
+ struct drm_property *edid;
+ struct drm_property *dpms;
+ int i;
+
+ /*
+ * Standard properties (apply to all connectors)
+ */
+ edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "EDID", 0);
+ dev->mode_config.edid_property = edid;
+
+ dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
+ for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
+ drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
+ drm_dpms_enum_list[i].name);
+ dev->mode_config.dpms_property = dpms;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+ struct drm_property *dvi_i_selector;
+ struct drm_property *dvi_i_subconnector;
+ int i;
+
+ if (dev->mode_config.dvi_i_select_subconnector_property)
+ return 0;
+
+ dvi_i_selector =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "select subconnector",
+ ARRAY_SIZE(drm_dvi_i_select_enum_list));
+ for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
+ drm_property_add_enum(dvi_i_selector, i,
+ drm_dvi_i_select_enum_list[i].type,
+ drm_dvi_i_select_enum_list[i].name);
+ dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+ dvi_i_subconnector =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM |
+ DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+ for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
+ drm_property_add_enum(dvi_i_subconnector, i,
+ drm_dvi_i_subconnector_enum_list[i].type,
+ drm_dvi_i_subconnector_enum_list[i].name);
+ dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device. Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+ char *modes[])
+{
+ struct drm_property *tv_selector;
+ struct drm_property *tv_subconnector;
+ int i;
+
+ if (dev->mode_config.tv_select_subconnector_property)
+ return 0;
+
+ /*
+ * Basic connector properties
+ */
+ tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "select subconnector",
+ ARRAY_SIZE(drm_tv_select_enum_list));
+ for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
+ drm_property_add_enum(tv_selector, i,
+ drm_tv_select_enum_list[i].type,
+ drm_tv_select_enum_list[i].name);
+ dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+ tv_subconnector =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM |
+ DRM_MODE_PROP_IMMUTABLE, "subconnector",
+ ARRAY_SIZE(drm_tv_subconnector_enum_list));
+ for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
+ drm_property_add_enum(tv_subconnector, i,
+ drm_tv_subconnector_enum_list[i].type,
+ drm_tv_subconnector_enum_list[i].name);
+ dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+ /*
+ * Other, TV specific properties: margins & TV modes.
+ */
+ dev->mode_config.tv_left_margin_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "left margin", 2);
+ dev->mode_config.tv_left_margin_property->values[0] = 0;
+ dev->mode_config.tv_left_margin_property->values[1] = 100;
+
+ dev->mode_config.tv_right_margin_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "right margin", 2);
+ dev->mode_config.tv_right_margin_property->values[0] = 0;
+ dev->mode_config.tv_right_margin_property->values[1] = 100;
+
+ dev->mode_config.tv_top_margin_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "top margin", 2);
+ dev->mode_config.tv_top_margin_property->values[0] = 0;
+ dev->mode_config.tv_top_margin_property->values[1] = 100;
+
+ dev->mode_config.tv_bottom_margin_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "bottom margin", 2);
+ dev->mode_config.tv_bottom_margin_property->values[0] = 0;
+ dev->mode_config.tv_bottom_margin_property->values[1] = 100;
+
+ dev->mode_config.tv_mode_property =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", num_modes);
+ for (i = 0; i < num_modes; i++)
+ drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+ i, modes[i]);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+ struct drm_property *scaling_mode;
+ int i;
+
+ if (dev->mode_config.scaling_mode_property)
+ return 0;
+
+ scaling_mode =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
+ ARRAY_SIZE(drm_scaling_mode_enum_list));
+ for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
+ drm_property_add_enum(scaling_mode, i,
+ drm_scaling_mode_enum_list[i].type,
+ drm_scaling_mode_enum_list[i].name);
+
+ dev->mode_config.scaling_mode_property = scaling_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+/**
+ * drm_mode_create_dithering_property - create dithering property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dithering_property(struct drm_device *dev)
+{
+ struct drm_property *dithering_mode;
+ int i;
+
+ if (dev->mode_config.dithering_mode_property)
+ return 0;
+
+ dithering_mode =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
+ ARRAY_SIZE(drm_dithering_mode_enum_list));
+ for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
+ drm_property_add_enum(dithering_mode, i,
+ drm_dithering_mode_enum_list[i].type,
+ drm_dithering_mode_enum_list[i].name);
+ dev->mode_config.dithering_mode_property = dithering_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dithering_property);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * None, should happen single threaded at init time.
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+ mutex_init(&dev->mode_config.mutex);
+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
+ INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+ INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_list);
+ INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ idr_init(&dev->mode_config.crtc_idr);
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_mode_create_standard_connector_properties(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ /* Just to be sure */
+ dev->mode_config.num_fb = 0;
+ dev->mode_config.num_connector = 0;
+ dev->mode_config.num_crtc = 0;
+ dev->mode_config.num_encoder = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+{
+ uint32_t total_objects = 0;
+
+ total_objects += dev->mode_config.num_crtc;
+ total_objects += dev->mode_config.num_connector;
+ total_objects += dev->mode_config.num_encoder;
+
+ if (total_objects == 0)
+ return -EINVAL;
+
+ group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
+ if (!group->id_list)
+ return -ENOMEM;
+
+ group->num_crtcs = 0;
+ group->num_connectors = 0;
+ group->num_encoders = 0;
+ return 0;
+}
+
+int drm_mode_group_init_legacy_group(struct drm_device *dev,
+ struct drm_mode_group *group)
+{
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ if ((ret = drm_mode_group_init(dev, group)))
+ return ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ group->id_list[group->num_crtcs++] = crtc->base.id;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders++] =
+ encoder->base.id;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders +
+ group->num_connectors++] = connector->base.id;
+
+ return 0;
+}
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+ struct drm_connector *connector, *ot;
+ struct drm_crtc *crtc, *ct;
+ struct drm_encoder *encoder, *enct;
+ struct drm_framebuffer *fb, *fbt;
+ struct drm_property *property, *pt;
+
+ list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+ head) {
+ encoder->funcs->destroy(encoder);
+ }
+
+ list_for_each_entry_safe(connector, ot,
+ &dev->mode_config.connector_list, head) {
+ connector->funcs->destroy(connector);
+ }
+
+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+ head) {
+ drm_property_destroy(dev, property);
+ }
+
+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+ fb->funcs->destroy(fb);
+ }
+
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+/**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+ * @in: drm_display_mode to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+ * the user.
+ */
+void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+ struct drm_display_mode *in)
+{
+ out->clock = in->clock;
+ out->hdisplay = in->hdisplay;
+ out->hsync_start = in->hsync_start;
+ out->hsync_end = in->hsync_end;
+ out->htotal = in->htotal;
+ out->hskew = in->hskew;
+ out->vdisplay = in->vdisplay;
+ out->vsync_start = in->vsync_start;
+ out->vsync_end = in->vsync_end;
+ out->vtotal = in->vtotal;
+ out->vscan = in->vscan;
+ out->vrefresh = in->vrefresh;
+ out->flags = in->flags;
+ out->type = in->type;
+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * @out: drm_display_mode to return to the user
+ * @in: drm_mode_modeinfo to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ */
+void drm_crtc_convert_umode(struct drm_display_mode *out,
+ struct drm_mode_modeinfo *in)
+{
+ out->clock = in->clock;
+ out->hdisplay = in->hdisplay;
+ out->hsync_start = in->hsync_start;
+ out->hsync_end = in->hsync_end;
+ out->htotal = in->htotal;
+ out->hskew = in->hskew;
+ out->vdisplay = in->vdisplay;
+ out->vsync_start = in->vsync_start;
+ out->vsync_end = in->vsync_end;
+ out->vtotal = in->vtotal;
+ out->vscan = in->vscan;
+ out->vrefresh = in->vrefresh;
+ out->flags = in->flags;
+ out->type = in->type;
+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_card_res *card_res = data;
+ struct list_head *lh;
+ struct drm_framebuffer *fb;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int ret = 0;
+ int connector_count = 0;
+ int crtc_count = 0;
+ int fb_count = 0;
+ int encoder_count = 0;
+ int copied = 0, i;
+ uint32_t __user *fb_id;
+ uint32_t __user *crtc_id;
+ uint32_t __user *connector_id;
+ uint32_t __user *encoder_id;
+ struct drm_mode_group *mode_group;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /*
+ * For the non-control nodes we need to limit the list of resources
+ * by IDs in the group list for this node
+ */
+ list_for_each(lh, &file_priv->fbs)
+ fb_count++;
+
+ mode_group = &file_priv->master->minor->mode_group;
+ if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+
+ list_for_each(lh, &dev->mode_config.crtc_list)
+ crtc_count++;
+
+ list_for_each(lh, &dev->mode_config.connector_list)
+ connector_count++;
+
+ list_for_each(lh, &dev->mode_config.encoder_list)
+ encoder_count++;
+ } else {
+
+ crtc_count = mode_group->num_crtcs;
+ connector_count = mode_group->num_connectors;
+ encoder_count = mode_group->num_encoders;
+ }
+
+ card_res->max_height = dev->mode_config.max_height;
+ card_res->min_height = dev->mode_config.min_height;
+ card_res->max_width = dev->mode_config.max_width;
+ card_res->min_width = dev->mode_config.min_width;
+
+ /* handle this in 4 parts */
+ /* FBs */
+ if (card_res->count_fbs >= fb_count) {
+ copied = 0;
+ fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+ list_for_each_entry(fb, &file_priv->fbs, head) {
+ if (put_user(fb->base.id, fb_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ card_res->count_fbs = fb_count;
+
+ /* CRTCs */
+ if (card_res->count_crtcs >= crtc_count) {
+ copied = 0;
+ crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+ if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ head) {
+ DRM_DEBUG("CRTC ID is %d\n", crtc->base.id);
+ if (put_user(crtc->base.id, crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ for (i = 0; i < mode_group->num_crtcs; i++) {
+ if (put_user(mode_group->id_list[i],
+ crtc_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ card_res->count_crtcs = crtc_count;
+
+ /* Encoders */
+ if (card_res->count_encoders >= encoder_count) {
+ copied = 0;
+ encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+ if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+ list_for_each_entry(encoder,
+ &dev->mode_config.encoder_list,
+ head) {
+ DRM_DEBUG("ENCODER ID is %d\n",
+ encoder->base.id);
+ if (put_user(encoder->base.id, encoder_id +
+ copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
+ if (put_user(mode_group->id_list[i],
+ encoder_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+
+ }
+ }
+ card_res->count_encoders = encoder_count;
+
+ /* Connectors */
+ if (card_res->count_connectors >= connector_count) {
+ copied = 0;
+ connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+ if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list,
+ head) {
+ DRM_DEBUG("CONNECTOR ID is %d\n",
+ connector->base.id);
+ if (put_user(connector->base.id,
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ } else {
+ int start = mode_group->num_crtcs +
+ mode_group->num_encoders;
+ for (i = start; i < start + mode_group->num_connectors; i++) {
+ if (put_user(mode_group->id_list[i],
+ connector_id + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ card_res->count_connectors = connector_count;
+
+ DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs,
+ card_res->count_connectors, card_res->count_encoders);
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_getcrtc - get CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a CRTC configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc *crtc_resp = data;
+ struct drm_crtc *crtc;
+ struct drm_mode_object *obj;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ crtc_resp->x = crtc->x;
+ crtc_resp->y = crtc->y;
+ crtc_resp->gamma_size = crtc->gamma_size;
+ if (crtc->fb)
+ crtc_resp->fb_id = crtc->fb->base.id;
+ else
+ crtc_resp->fb_id = 0;
+
+ if (crtc->enabled) {
+
+ drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+ crtc_resp->mode_valid = 1;
+
+ } else {
+ crtc_resp->mode_valid = 0;
+ }
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_getconnector - get connector configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a connector configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getconnector(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_connector *out_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ int mode_count = 0;
+ int props_count = 0;
+ int encoders_count = 0;
+ int ret = 0;
+ int copied = 0;
+ int i;
+ struct drm_mode_modeinfo u_mode;
+ struct drm_mode_modeinfo __user *mode_ptr;
+ uint32_t __user *prop_ptr;
+ uint64_t __user *prop_values;
+ uint32_t __user *encoder_ptr;
+
+ memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+ DRM_DEBUG("connector id %d:\n", out_resp->connector_id);
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, out_resp->connector_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] != 0) {
+ props_count++;
+ }
+ }
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ encoders_count++;
+ }
+ }
+
+ if (out_resp->count_modes == 0) {
+ connector->funcs->fill_modes(connector,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ }
+
+ /* delayed so we get modes regardless of pre-fill_modes state */
+ list_for_each_entry(mode, &connector->modes, head)
+ mode_count++;
+
+ out_resp->connector_id = connector->base.id;
+ out_resp->connector_type = connector->connector_type;
+ out_resp->connector_type_id = connector->connector_type_id;
+ out_resp->mm_width = connector->display_info.width_mm;
+ out_resp->mm_height = connector->display_info.height_mm;
+ out_resp->subpixel = connector->display_info.subpixel_order;
+ out_resp->connection = connector->status;
+ if (connector->encoder)
+ out_resp->encoder_id = connector->encoder->base.id;
+ else
+ out_resp->encoder_id = 0;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if ((out_resp->count_modes >= mode_count) && mode_count) {
+ copied = 0;
+ mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+ list_for_each_entry(mode, &connector->modes, head) {
+ drm_crtc_convert_to_umode(&u_mode, mode);
+ if (copy_to_user(mode_ptr + copied,
+ &u_mode, sizeof(u_mode))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_modes = mode_count;
+
+ if ((out_resp->count_props >= props_count) && props_count) {
+ copied = 0;
+ prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
+ prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] != 0) {
+ if (put_user(connector->property_ids[i],
+ prop_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (put_user(connector->property_values[i],
+ prop_values + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ out_resp->count_props = props_count;
+
+ if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+ copied = 0;
+ encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ if (put_user(connector->encoder_ids[i],
+ encoder_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ out_resp->count_encoders = encoders_count;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_getencoder(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_encoder *enc_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, enc_resp->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ encoder = obj_to_encoder(obj);
+
+ if (encoder->crtc)
+ enc_resp->crtc_id = encoder->crtc->base.id;
+ else
+ enc_resp->crtc_id = 0;
+ enc_resp->encoder_type = encoder->encoder_type;
+ enc_resp->encoder_id = encoder->base.id;
+ enc_resp->possible_crtcs = encoder->possible_crtcs;
+ enc_resp->possible_clones = encoder->possible_clones;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Build a new CRTC configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_mode_crtc *crtc_req = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc, *crtcfb;
+ struct drm_connector **connector_set = NULL, *connector;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_display_mode *mode = NULL;
+ struct drm_mode_set set;
+ uint32_t __user *set_connectors_ptr;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ if (crtc_req->mode_valid) {
+ /* If we have a mode we need a framebuffer. */
+ /* If we pass -1, set the mode with the currently bound fb */
+ if (crtc_req->fb_id == -1) {
+ list_for_each_entry(crtcfb,
+ &dev->mode_config.crtc_list, head) {
+ if (crtcfb == crtc) {
+ DRM_DEBUG("Using current fb for setmode\n");
+ fb = crtc->fb;
+ }
+ }
+ } else {
+ obj = drm_mode_object_find(dev, crtc_req->fb_id,
+ DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+ }
+
+ mode = drm_mode_create(dev);
+ drm_crtc_convert_umode(mode, &crtc_req->mode);
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ }
+
+ if (crtc_req->count_connectors == 0 && mode) {
+ DRM_DEBUG("Count connectors is 0 but mode set\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (crtc_req->count_connectors > 0 && !mode && !fb) {
+ DRM_DEBUG("Count connectors is %d but no mode or fb set\n",
+ crtc_req->count_connectors);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (crtc_req->count_connectors > 0) {
+ u32 out_id;
+
+ /* Avoid unbounded kernel memory allocation */
+ if (crtc_req->count_connectors > config->num_connector) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ connector_set = kmalloc(crtc_req->count_connectors *
+ sizeof(struct drm_connector *),
+ GFP_KERNEL);
+ if (!connector_set) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < crtc_req->count_connectors; i++) {
+ set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+ if (get_user(out_id, &set_connectors_ptr[i])) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, out_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ DRM_DEBUG("Connector id %d unknown\n", out_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ connector_set[i] = connector;
+ }
+ }
+
+ set.crtc = crtc;
+ set.x = crtc_req->x;
+ set.y = crtc_req->y;
+ set.mode = mode;
+ set.connectors = connector_set;
+ set.num_connectors = crtc_req->count_connectors;
+ set.fb =fb;
+ ret = crtc->funcs->set_config(&set);
+
+out:
+ kfree(connector_set);
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_cursor *req = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ DRM_DEBUG("\n");
+
+ if (!req->flags) {
+ DRM_ERROR("no operation set\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG("Unknown CRTC ID %d\n", req->crtc_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (!crtc->funcs->cursor_set) {
+ DRM_ERROR("crtc does not support cursor\n");
+ ret = -ENXIO;
+ goto out;
+ }
+ /* Turns off the cursor if handle is 0 */
+ ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+ req->width, req->height);
+ }
+
+ if (req->flags & DRM_MODE_CURSOR_MOVE) {
+ if (crtc->funcs->cursor_move) {
+ ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+ } else {
+ DRM_ERROR("crtc does not support cursor\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd *r = data;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ if ((config->min_width > r->width) || (r->width > config->max_width)) {
+ DRM_ERROR("mode new framebuffer width not within limits\n");
+ return -EINVAL;
+ }
+ if ((config->min_height > r->height) || (r->height > config->max_height)) {
+ DRM_ERROR("mode new framebuffer height not within limits\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /* TODO check buffer is sufficently large */
+ /* TODO setup destructor callback */
+
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+ if (!fb) {
+ DRM_ERROR("could not create framebuffer\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ r->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file_priv->fbs);
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_framebuffer *fbl = NULL;
+ uint32_t *id = data;
+ int ret = 0;
+ int found = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
+ /* TODO check that we realy get a framebuffer back. */
+ if (!obj) {
+ DRM_ERROR("mode invalid framebuffer id\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+
+ list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+ if (fb == fbl)
+ found = 1;
+
+ if (!found) {
+ DRM_ERROR("tried to remove a fb that we didn't own\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* TODO release all crtc connected to the framebuffer */
+ /* TODO unhock the destructor from the buffer object */
+
+ list_del(&fb->filp_head);
+ fb->funcs->destroy(fb);
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd *r = data;
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_ERROR("invalid framebuffer id\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+
+ r->height = fb->height;
+ r->width = fb->width;
+ r->depth = fb->depth;
+ r->bpp = fb->bits_per_pixel;
+ r->pitch = fb->pitch;
+ fb->funcs->create_handle(fb, file_priv, &r->handle);
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @filp: file * from the ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+void drm_fb_release(struct file *filp)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_framebuffer *fb, *tfb;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+ list_del(&fb->filp_head);
+ fb->funcs->destroy(fb);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+/**
+ * drm_mode_attachmode - add a mode to the user mode list
+ * @dev: DRM device
+ * @connector: connector to add the mode to
+ * @mode: mode to add
+ *
+ * Add @mode to @connector's user mode list.
+ */
+static int drm_mode_attachmode(struct drm_device *dev,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int ret = 0;
+
+ list_add_tail(&mode->head, &connector->user_modes);
+ return ret;
+}
+
+int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+ int ret = 0;
+ struct drm_display_mode *dup_mode;
+ int need_dup = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ break;
+ if (connector->encoder->crtc == crtc) {
+ if (need_dup)
+ dup_mode = drm_mode_duplicate(dev, mode);
+ else
+ dup_mode = mode;
+ ret = drm_mode_attachmode(dev, connector, dup_mode);
+ if (ret)
+ return ret;
+ need_dup = 1;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_attachmode_crtc);
+
+static int drm_mode_detachmode(struct drm_device *dev,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int found = 0;
+ int ret = 0;
+ struct drm_display_mode *match_mode, *t;
+
+ list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
+ if (drm_mode_equal(match_mode, mode)) {
+ list_del(&match_mode->head);
+ drm_mode_destroy(dev, match_mode);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_mode_detachmode(dev, connector, mode);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_detachmode_crtc);
+
+/**
+ * drm_fb_attachmode - Attach a user mode to an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * This attaches a user specified mode to an connector.
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_attachmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_mode_cmd *mode_cmd = data;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ struct drm_mode_object *obj;
+ struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ mode = drm_mode_create(dev);
+ if (!mode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ drm_crtc_convert_umode(mode, umode);
+
+ ret = drm_mode_attachmode(dev, connector, mode);
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+
+/**
+ * drm_fb_detachmode - Detach a user specified mode from an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_detachmode_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_mode_cmd *mode_cmd = data;
+ struct drm_connector *connector;
+ struct drm_display_mode mode;
+ struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ drm_crtc_convert_umode(&mode, umode);
+ ret = drm_mode_detachmode(dev, connector, &mode);
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values)
+{
+ struct drm_property *property = NULL;
+
+ property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+ if (!property)
+ return NULL;
+
+ if (num_values) {
+ property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+ if (!property->values)
+ goto fail;
+ }
+
+ drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+ property->flags = flags;
+ property->num_values = num_values;
+ INIT_LIST_HEAD(&property->enum_blob_list);
+
+ if (name)
+ strncpy(property->name, name, DRM_PROP_NAME_LEN);
+
+ list_add_tail(&property->head, &dev->mode_config.property_list);
+ return property;
+fail:
+ kfree(property);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name)
+{
+ struct drm_property_enum *prop_enum;
+
+ if (!(property->flags & DRM_MODE_PROP_ENUM))
+ return -EINVAL;
+
+ if (!list_empty(&property->enum_blob_list)) {
+ list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+ if (prop_enum->value == value) {
+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ return 0;
+ }
+ }
+ }
+
+ prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+ if (!prop_enum)
+ return -ENOMEM;
+
+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ prop_enum->value = value;
+
+ property->values[index] = value;
+ list_add_tail(&prop_enum->head, &property->enum_blob_list);
+ return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+ struct drm_property_enum *prop_enum, *pt;
+
+ list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
+ list_del(&prop_enum->head);
+ kfree(prop_enum);
+ }
+
+ if (property->num_values)
+ kfree(property->values);
+ drm_mode_object_put(dev, &property->base);
+ list_del(&property->head);
+ kfree(property);
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+int drm_connector_attach_property(struct drm_connector *connector,
+ struct drm_property *property, uint64_t init_val)
+{
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] == 0) {
+ connector->property_ids[i] = property->base.id;
+ connector->property_values[i] = init_val;
+ break;
+ }
+ }
+
+ if (i == DRM_CONNECTOR_MAX_PROPERTY)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_property);
+
+int drm_connector_property_set_value(struct drm_connector *connector,
+ struct drm_property *property, uint64_t value)
+{
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] == property->base.id) {
+ connector->property_values[i] = value;
+ break;
+ }
+ }
+
+ if (i == DRM_CONNECTOR_MAX_PROPERTY)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_property_set_value);
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+ struct drm_property *property, uint64_t *val)
+{
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] == property->base.id) {
+ *val = connector->property_values[i];
+ break;
+ }
+ }
+
+ if (i == DRM_CONNECTOR_MAX_PROPERTY)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_property_get_value);
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_get_property *out_resp = data;
+ struct drm_property *property;
+ int enum_count = 0;
+ int blob_count = 0;
+ int value_count = 0;
+ int ret = 0, i;
+ int copied;
+ struct drm_property_enum *prop_enum;
+ struct drm_mode_property_enum __user *enum_ptr;
+ struct drm_property_blob *prop_blob;
+ uint32_t *blob_id_ptr;
+ uint64_t __user *values_ptr;
+ uint32_t __user *blob_length_ptr;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+ if (!obj) {
+ ret = -EINVAL;
+ goto done;
+ }
+ property = obj_to_property(obj);
+
+ if (property->flags & DRM_MODE_PROP_ENUM) {
+ list_for_each_entry(prop_enum, &property->enum_blob_list, head)
+ enum_count++;
+ } else if (property->flags & DRM_MODE_PROP_BLOB) {
+ list_for_each_entry(prop_blob, &property->enum_blob_list, head)
+ blob_count++;
+ }
+
+ value_count = property->num_values;
+
+ strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+ out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+ out_resp->flags = property->flags;
+
+ if ((out_resp->count_values >= value_count) && value_count) {
+ values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+ for (i = 0; i < value_count; i++) {
+ if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+ }
+ out_resp->count_values = value_count;
+
+ if (property->flags & DRM_MODE_PROP_ENUM) {
+ if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+ copied = 0;
+ enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+ list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+
+ if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (copy_to_user(&enum_ptr[copied].name,
+ &prop_enum->name, DRM_PROP_NAME_LEN)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_enum_blobs = enum_count;
+ }
+
+ if (property->flags & DRM_MODE_PROP_BLOB) {
+ if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+ copied = 0;
+ blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
+ blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+
+ list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
+ if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (put_user(prop_blob->length, blob_length_ptr + copied)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ copied++;
+ }
+ }
+ out_resp->count_enum_blobs = blob_count;
+ }
+done:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
+ void *data)
+{
+ struct drm_property_blob *blob;
+
+ if (!length || !data)
+ return NULL;
+
+ blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+ if (!blob)
+ return NULL;
+
+ blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
+ blob->length = length;
+
+ memcpy(blob->data, data, length);
+
+ drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+
+ list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
+ return blob;
+}
+
+static void drm_property_destroy_blob(struct drm_device *dev,
+ struct drm_property_blob *blob)
+{
+ drm_mode_object_put(dev, &blob->base);
+ list_del(&blob->head);
+ kfree(blob);
+}
+
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_object *obj;
+ struct drm_mode_get_blob *out_resp = data;
+ struct drm_property_blob *blob;
+ int ret = 0;
+ void *blob_ptr;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
+ if (!obj) {
+ ret = -EINVAL;
+ goto done;
+ }
+ blob = obj_to_blob(obj);
+
+ if (out_resp->length == blob->length) {
+ blob_ptr = (void *)(unsigned long)out_resp->data;
+ if (copy_to_user(blob_ptr, blob->data, blob->length)){
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+ out_resp->length = blob->length;
+
+done:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ int ret = 0;
+
+ if (connector->edid_blob_ptr)
+ drm_property_destroy_blob(dev, connector->edid_blob_ptr);
+
+ /* Delete edid, when there is none. */
+ if (!edid) {
+ connector->edid_blob_ptr = NULL;
+ ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+ return ret;
+ }
+
+ connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+
+ ret = drm_connector_property_set_value(connector,
+ dev->mode_config.edid_property,
+ connector->edid_blob_ptr->base.id);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_connector_set_property *out_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_property *property;
+ struct drm_connector *connector;
+ int ret = -EINVAL;
+ int i;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ goto out;
+ }
+ connector = obj_to_connector(obj);
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ if (connector->property_ids[i] == out_resp->prop_id)
+ break;
+ }
+
+ if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+ if (!obj) {
+ goto out;
+ }
+ property = obj_to_property(obj);
+
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ goto out;
+
+ if (property->flags & DRM_MODE_PROP_RANGE) {
+ if (out_resp->value < property->values[0])
+ goto out;
+
+ if (out_resp->value > property->values[1])
+ goto out;
+ } else {
+ int found = 0;
+ for (i = 0; i < property->num_values; i++) {
+ if (property->values[i] == out_resp->value) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ goto out;
+ }
+ }
+
+ if (connector->funcs->set_property)
+ ret = connector->funcs->set_property(connector, property, out_resp->value);
+
+ /* store the property value if succesful */
+ if (!ret)
+ drm_connector_property_set_value(connector, property, out_resp->value);
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ int i;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0) {
+ connector->encoder_ids[i] = encoder->base.id;
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ int i;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == encoder->base.id) {
+ connector->encoder_ids[i] = 0;
+ if (connector->encoder == encoder)
+ connector->encoder = NULL;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
+
+bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size)
+{
+ crtc->gamma_size = gamma_size;
+
+ crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
+ if (!crtc->gamma_store) {
+ crtc->gamma_size = 0;
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ g_base = r_base + size;
+ if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ b_base = g_base + size;
+ if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+
+}
+
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ g_base = r_base + size;
+ if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ b_base = g_base + size;
+ if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
new file mode 100644
index 00000000000..d8a982b7129
--- /dev/null
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -0,0 +1,826 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * Detailed mode info for 800x600@60Hz
+ */
+static struct drm_display_mode std_mode[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+/**
+ * drm_helper_probe_connector_modes - get complete set of display modes
+ * @dev: DRM device
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Based on @dev's mode_config layout, scan all the connectors and try to detect
+ * modes on them. Modes will first be added to the connector's probed_modes
+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
+ * put into the normal modes list.
+ *
+ * Intended to be used either at bootup time or when major configuration
+ * changes have occurred.
+ *
+ * FIXME: take into account monitor limits
+ */
+void drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *t;
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ int ret;
+
+ DRM_DEBUG("%s\n", drm_get_connector_name(connector));
+ /* set all modes to the unverified state */
+ list_for_each_entry_safe(mode, t, &connector->modes, head)
+ mode->status = MODE_UNVERIFIED;
+
+ connector->status = connector->funcs->detect(connector);
+
+ if (connector->status == connector_status_disconnected) {
+ DRM_DEBUG("%s is disconnected\n",
+ drm_get_connector_name(connector));
+ /* TODO set EDID to NULL */
+ return;
+ }
+
+ ret = (*connector_funcs->get_modes)(connector);
+
+ if (ret) {
+ drm_mode_connector_list_update(connector);
+ }
+
+ if (maxX && maxY)
+ drm_mode_validate_size(dev, &connector->modes, maxX,
+ maxY, 0);
+ list_for_each_entry_safe(mode, t, &connector->modes, head) {
+ if (mode->status == MODE_OK)
+ mode->status = connector_funcs->mode_valid(connector,
+ mode);
+ }
+
+
+ drm_mode_prune_invalid(dev, &connector->modes, true);
+
+ if (list_empty(&connector->modes)) {
+ struct drm_display_mode *stdmode;
+
+ DRM_DEBUG("No valid modes on %s\n",
+ drm_get_connector_name(connector));
+
+ /* Should we do this here ???
+ * When no valid EDID modes are available we end up
+ * here and bailed in the past, now we add a standard
+ * 640x480@60Hz mode and carry on.
+ */
+ stdmode = drm_mode_duplicate(dev, &std_mode[0]);
+ drm_mode_probed_add(connector, stdmode);
+ drm_mode_list_concat(&connector->probed_modes,
+ &connector->modes);
+
+ DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n",
+ drm_get_connector_name(connector));
+ }
+
+ drm_mode_sort(&connector->modes);
+
+ DRM_DEBUG("Probed modes for %s\n", drm_get_connector_name(connector));
+ list_for_each_entry_safe(mode, t, &connector->modes, head) {
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_debug_printmodeline(mode);
+ }
+}
+EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
+
+void drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
+ uint32_t maxY)
+{
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+ }
+}
+EXPORT_SYMBOL(drm_helper_probe_connector_modes);
+
+
+/**
+ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
+ * @crtc: CRTC to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @crtc's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @crtc is part of the mode_config, false otherwise.
+ */
+bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev = crtc->dev;
+ /* FIXME: Locking around list access? */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc == crtc)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(drm_helper_crtc_in_use);
+
+/**
+ * drm_disable_unused_functions - disable unused objects
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
+ * by calling its dpms function, which should power it off.
+ */
+void drm_helper_disable_unused_functions(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder_funcs = encoder->helper_private;
+ if (!encoder->crtc)
+ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ if (!crtc->enabled) {
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ crtc->fb = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_helper_disable_unused_functions);
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (drm_mode_width(mode) > width ||
+ drm_mode_height(mode) > height)
+ continue;
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+ bool enable;
+
+ if (strict) {
+ enable = connector->status == connector_status_connected;
+ } else {
+ enable = connector->status != connector_status_disconnected;
+ }
+ return enable;
+}
+
+static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
+{
+ bool any_enabled = false;
+ struct drm_connector *connector;
+ int i = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ enabled[i] = drm_connector_enabled(connector, true);
+ any_enabled |= enabled[i];
+ i++;
+ }
+
+ if (any_enabled)
+ return;
+
+ i = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ enabled[i] = drm_connector_enabled(connector, false);
+ i++;
+ }
+}
+
+static bool drm_target_preferred(struct drm_device *dev,
+ struct drm_display_mode **modes,
+ bool *enabled, int width, int height)
+{
+ struct drm_connector *connector;
+ int i = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ if (enabled[i] == false) {
+ i++;
+ continue;
+ }
+
+ modes[i] = drm_has_preferred_mode(connector, width, height);
+ if (!modes[i]) {
+ list_for_each_entry(modes[i], &connector->modes, head)
+ break;
+ }
+ i++;
+ }
+ return true;
+}
+
+static int drm_pick_crtcs(struct drm_device *dev,
+ struct drm_crtc **best_crtcs,
+ struct drm_display_mode **modes,
+ int n, int width, int height)
+{
+ int c, o;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ struct drm_encoder *encoder;
+ struct drm_crtc *best_crtc;
+ int my_score, best_score, score;
+ struct drm_crtc **crtcs, *crtc;
+
+ if (n == dev->mode_config.num_connector)
+ return 0;
+ c = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (c == n)
+ break;
+ c++;
+ }
+
+ best_crtcs[n] = NULL;
+ best_crtc = NULL;
+ best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
+ if (modes[n] == NULL)
+ return best_score;
+
+ crtcs = kmalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_crtc *), GFP_KERNEL);
+ if (!crtcs)
+ return best_score;
+
+ my_score = 1;
+ if (connector->status == connector_status_connected)
+ my_score++;
+ if (drm_has_preferred_mode(connector, width, height))
+ my_score++;
+
+ connector_funcs = connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+ if (!encoder)
+ goto out;
+
+ connector->encoder = encoder;
+
+ /* select a crtc for this connector and then attempt to configure
+ remaining connectors */
+ c = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+ if ((connector->encoder->possible_crtcs & (1 << c)) == 0) {
+ c++;
+ continue;
+ }
+
+ for (o = 0; o < n; o++)
+ if (best_crtcs[o] == crtc)
+ break;
+
+ if (o < n) {
+ /* ignore cloning for now */
+ c++;
+ continue;
+ }
+
+ crtcs[n] = crtc;
+ memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
+ score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
+ width, height);
+ if (score > best_score) {
+ best_crtc = crtc;
+ best_score = score;
+ memcpy(best_crtcs, crtcs,
+ dev->mode_config.num_connector *
+ sizeof(struct drm_crtc *));
+ }
+ c++;
+ }
+out:
+ kfree(crtcs);
+ return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_device *dev)
+{
+ struct drm_crtc **crtcs;
+ struct drm_display_mode **modes;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ bool *enabled;
+ int width, height;
+ int i, ret;
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ /* clean out all the encoder/crtc combos */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->crtc = NULL;
+ }
+
+ crtcs = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_crtc *), GFP_KERNEL);
+ modes = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_display_mode *), GFP_KERNEL);
+ enabled = kcalloc(dev->mode_config.num_connector,
+ sizeof(bool), GFP_KERNEL);
+
+ drm_enable_connectors(dev, enabled);
+
+ ret = drm_target_preferred(dev, modes, enabled, width, height);
+ if (!ret)
+ DRM_ERROR("Unable to find initial modes\n");
+
+ drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
+
+ i = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_display_mode *mode = modes[i];
+ struct drm_crtc *crtc = crtcs[i];
+
+ if (connector->encoder == NULL) {
+ i++;
+ continue;
+ }
+
+ if (mode && crtc) {
+ crtc->desired_mode = mode;
+ connector->encoder->crtc = crtc;
+ } else
+ connector->encoder->crtc = NULL;
+ i++;
+ }
+
+ kfree(crtcs);
+ kfree(modes);
+ kfree(enabled);
+}
+/**
+ * drm_crtc_set_mode - set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+ * @x: width of mode
+ * @y: height of mode
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
+ * to fixup or reject the mode prior to trying to set it.
+ *
+ * RETURNS:
+ * True if the mode was set successfully, or false otherwise.
+ */
+bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode, saved_mode;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ int saved_x, saved_y;
+ struct drm_encoder *encoder;
+ bool ret = true;
+
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+
+ if (!crtc->enabled)
+ return true;
+
+ saved_mode = crtc->mode;
+ saved_x = crtc->x;
+ saved_y = crtc->y;
+
+ /* Update crtc values up front so the driver can rely on them for mode
+ * setting.
+ */
+ crtc->mode = *mode;
+ crtc->x = x;
+ crtc->y = y;
+
+ if (drm_mode_equal(&saved_mode, &crtc->mode)) {
+ if (saved_x != crtc->x || saved_y != crtc->y) {
+ crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y,
+ old_fb);
+ goto done;
+ }
+ }
+
+ /* Pass our mode to the connectors and the CRTC to give them a chance to
+ * adjust it according to limitations or connector properties, and also
+ * a chance to reject the mode entirely.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+ encoder_funcs = encoder->helper_private;
+ if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+ adjusted_mode))) {
+ goto done;
+ }
+ }
+
+ if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+ goto done;
+ }
+
+ /* Prepare the encoders and CRTCs before setting the mode. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+ encoder_funcs = encoder->helper_private;
+ /* Disable the encoders as the first thing we do. */
+ encoder_funcs->prepare(encoder);
+ }
+
+ crtc_funcs->prepare(crtc);
+
+ /* Set up the DPLL and any encoders state that needs to adjust or depend
+ * on the DPLL.
+ */
+ crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
+ mode->name, mode->base.id);
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+ }
+
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ crtc_funcs->commit(crtc);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->commit(encoder);
+
+ }
+
+ /* XXX free adjustedmode */
+ drm_mode_destroy(dev, adjusted_mode);
+ /* FIXME: add subpixel order */
+done:
+ if (!ret) {
+ crtc->mode = saved_mode;
+ crtc->x = saved_x;
+ crtc->y = saved_y;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+
+
+/**
+ * drm_crtc_helper_set_config - set a new config from userspace
+ * @crtc: CRTC to setup
+ * @crtc_info: user provided configuration
+ * @new_mode: new mode to set
+ * @connector_set: set of connectors for the new config
+ * @fb: new framebuffer
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the user in @crtc_info, and enable
+ * it.
+ *
+ * RETURNS:
+ * Zero. (FIXME)
+ */
+int drm_crtc_helper_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct drm_crtc **save_crtcs, *new_crtc;
+ struct drm_encoder **save_encoders, *new_encoder;
+ struct drm_framebuffer *old_fb;
+ bool save_enabled;
+ bool changed = false;
+ bool flip_or_move = false;
+ struct drm_connector *connector;
+ int count = 0, ro, fail = 0;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ int ret = 0;
+
+ DRM_DEBUG("\n");
+
+ if (!set)
+ return -EINVAL;
+
+ if (!set->crtc)
+ return -EINVAL;
+
+ if (!set->crtc->helper_private)
+ return -EINVAL;
+
+ crtc_funcs = set->crtc->helper_private;
+
+ DRM_DEBUG("crtc: %p %d fb: %p connectors: %p num_connectors: %d (x, y) (%i, %i)\n",
+ set->crtc, set->crtc->base.id, set->fb, set->connectors,
+ (int)set->num_connectors, set->x, set->y);
+
+ dev = set->crtc->dev;
+
+ /* save previous config */
+ save_enabled = set->crtc->enabled;
+
+ /* this is meant to be num_connector not num_crtc */
+ save_crtcs = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_crtc *), GFP_KERNEL);
+ if (!save_crtcs)
+ return -ENOMEM;
+
+ save_encoders = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_encoders *), GFP_KERNEL);
+ if (!save_encoders) {
+ kfree(save_crtcs);
+ return -ENOMEM;
+ }
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (set->crtc->fb != set->fb) {
+ /* if we have no fb then its a change not a flip */
+ if (set->crtc->fb == NULL)
+ changed = true;
+ else
+ flip_or_move = true;
+ }
+
+ if (set->x != set->crtc->x || set->y != set->crtc->y)
+ flip_or_move = true;
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG("modes are different\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ changed = true;
+ }
+
+ /* a) traverse passed in connector list and get encoders for them */
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ save_encoders[count++] = connector->encoder;
+ new_encoder = connector->encoder;
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == connector) {
+ new_encoder = connector_funcs->best_encoder(connector);
+ /* if we can't get an encoder for a connector
+ we are setting now - then fail */
+ if (new_encoder == NULL)
+ /* don't break so fail path works correct */
+ fail = 1;
+ break;
+ }
+ }
+
+ if (new_encoder != connector->encoder) {
+ changed = true;
+ connector->encoder = new_encoder;
+ }
+ }
+
+ if (fail) {
+ ret = -EINVAL;
+ goto fail_no_encoder;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (!connector->encoder)
+ continue;
+
+ save_crtcs[count++] = connector->encoder->crtc;
+
+ if (connector->encoder->crtc == set->crtc)
+ new_crtc = NULL;
+ else
+ new_crtc = connector->encoder->crtc;
+
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro] == connector)
+ new_crtc = set->crtc;
+ }
+ if (new_crtc != connector->encoder->crtc) {
+ changed = true;
+ connector->encoder->crtc = new_crtc;
+ }
+ }
+
+ /* mode_set_base is not a required function */
+ if (flip_or_move && !crtc_funcs->mode_set_base)
+ changed = true;
+
+ if (changed) {
+ old_fb = set->crtc->fb;
+ set->crtc->fb = set->fb;
+ set->crtc->enabled = (set->mode != NULL);
+ if (set->mode != NULL) {
+ DRM_DEBUG("attempting to set mode from userspace\n");
+ drm_mode_debug_printmodeline(set->mode);
+ if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
+ set->x, set->y,
+ old_fb)) {
+ ret = -EINVAL;
+ goto fail_set_mode;
+ }
+ /* TODO are these needed? */
+ set->crtc->desired_x = set->x;
+ set->crtc->desired_y = set->y;
+ set->crtc->desired_mode = set->mode;
+ }
+ drm_helper_disable_unused_functions(dev);
+ } else if (flip_or_move) {
+ old_fb = set->crtc->fb;
+ if (set->crtc->fb != set->fb)
+ set->crtc->fb = set->fb;
+ crtc_funcs->mode_set_base(set->crtc, set->x, set->y, old_fb);
+ }
+
+ kfree(save_encoders);
+ kfree(save_crtcs);
+ return 0;
+
+fail_set_mode:
+ set->crtc->enabled = save_enabled;
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ connector->encoder->crtc = save_crtcs[count++];
+fail_no_encoder:
+ kfree(save_crtcs);
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ connector->encoder = save_encoders[count++];
+ }
+ kfree(save_encoders);
+ return ret;
+}
+EXPORT_SYMBOL(drm_crtc_helper_set_config);
+
+bool drm_helper_plugged_event(struct drm_device *dev)
+{
+ DRM_DEBUG("\n");
+
+ drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+
+ drm_setup_crtcs(dev);
+
+ /* alert the driver fb layer */
+ dev->mode_config.funcs->fb_changed(dev);
+
+ /* FIXME: send hotplug event */
+ return true;
+}
+/**
+ * drm_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ * @can_grow: this configuration is growable
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
+{
+ int ret = false;
+
+ drm_helper_plugged_event(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_helper_initial_config);
+
+/**
+ * drm_hotplug_stage_two
+ * @dev DRM device
+ * @connector hotpluged connector
+ *
+ * LOCKING.
+ * Caller must hold mode config lock, function might grab struct lock.
+ *
+ * Stage two of a hotplug.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_helper_hotplug_stage_two(struct drm_device *dev)
+{
+ drm_helper_plugged_event(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
+
+int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ fb->width = mode_cmd->width;
+ fb->height = mode_cmd->height;
+ fb->pitch = mode_cmd->pitch;
+ fb->bits_per_pixel = mode_cmd->bpp;
+ fb->depth = mode_cmd->depth;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
+
+int drm_helper_resume_force_mode(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ int ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+ if (!crtc->enabled)
+ continue;
+
+ ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+
+ if (ret == false)
+ DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_helper_resume_force_mode);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3ab1e9cc469..febb517ee67 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -74,6 +74,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -123,6 +126,23 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -138,8 +158,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
*/
int drm_lastclose(struct drm_device * dev)
{
- struct drm_magic_entry *pt, *next;
- struct drm_map_list *r_list, *list_t;
struct drm_vma_entry *vma, *vma_temp;
int i;
@@ -149,13 +167,7 @@ int drm_lastclose(struct drm_device * dev)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
- if (dev->unique) {
- drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
- dev->unique = NULL;
- dev->unique_len = 0;
- }
-
- if (dev->irq_enabled)
+ if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
@@ -164,18 +176,9 @@ int drm_lastclose(struct drm_device * dev)
drm_drawable_free_all(dev);
del_timer(&dev->timer);
- /* Clear pid list */
- if (dev->magicfree.next) {
- list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
- list_del(&pt->head);
- drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
- drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
- }
- drm_ht_remove(&dev->magiclist);
- }
-
/* Clear AGP information */
- if (drm_core_has_AGP(dev) && dev->agp) {
+ if (drm_core_has_AGP(dev) && dev->agp &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
@@ -194,7 +197,8 @@ int drm_lastclose(struct drm_device * dev)
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
- if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
+ if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
@@ -205,13 +209,6 @@ int drm_lastclose(struct drm_device * dev)
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
- list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
- if (!(r_list->map->flags & _DRM_DRIVER)) {
- drm_rmmap_locked(dev, r_list->map);
- r_list = NULL;
- }
- }
-
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
for (i = 0; i < dev->queue_count; i++) {
if (dev->queuelist[i]) {
@@ -228,14 +225,11 @@ int drm_lastclose(struct drm_device * dev)
}
dev->queue_count = 0;
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+ !drm_core_check_feature(dev, DRIVER_MODESET))
drm_dma_takedown(dev);
- if (dev->lock.hw_lock) {
- dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
- dev->lock.file_priv = NULL;
- wake_up_interruptible(&dev->lock.lock_queue);
- }
+ dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("lastclose completed\n");
@@ -263,6 +257,8 @@ int drm_init(struct drm_driver *driver)
DRM_DEBUG("\n");
+ INIT_LIST_HEAD(&driver->device_list);
+
for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
@@ -305,6 +301,8 @@ static void drm_cleanup(struct drm_device * dev)
return;
}
+ drm_vblank_cleanup(dev);
+
drm_lastclose(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
@@ -327,35 +325,24 @@ static void drm_cleanup(struct drm_device * dev)
drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_put_minor(&dev->control);
+
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_destroy(dev);
+
drm_put_minor(&dev->primary);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
}
-static int drm_minors_cleanup(int id, void *ptr, void *data)
-{
- struct drm_minor *minor = ptr;
- struct drm_device *dev;
- struct drm_driver *driver = data;
-
- dev = minor->dev;
- if (minor->dev->driver != driver)
- return 0;
-
- if (minor->type != DRM_MINOR_LEGACY)
- return 0;
-
- if (dev)
- pci_dev_put(dev->pdev);
- drm_cleanup(dev);
- return 1;
-}
-
void drm_exit(struct drm_driver *driver)
{
+ struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
- idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver);
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_cleanup(dev);
DRM_INFO("Module unloaded\n");
}
@@ -501,7 +488,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
retcode = -EINVAL;
} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
- ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
+ ((ioctl->flags & DRM_MASTER) && !file_priv->is_master)) {
retcode = -EACCES;
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
new file mode 100644
index 00000000000..0fbb0da342c
--- /dev/null
+++ b/drivers/gpu/drm/drm_edid.c
@@ -0,0 +1,732 @@
+/*
+ * Copyright (c) 2006 Luc Verhaegen (quirks list)
+ * Copyright (c) 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm_edid.h"
+
+/*
+ * TODO:
+ * - support EDID 1.4 (incl. CE blocks)
+ */
+
+/*
+ * EDID blocks out in the wild have a variety of bugs, try to collect
+ * them here (note that userspace may work around broken monitors first,
+ * but fixes should make their way here so that the kernel "just works"
+ * on as many displays as possible).
+ */
+
+/* First detailed mode wrong, use largest 60Hz mode */
+#define EDID_QUIRK_PREFER_LARGE_60 (1 << 0)
+/* Reported 135MHz pixel clock is too high, needs adjustment */
+#define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1)
+/* Prefer the largest mode at 75 Hz */
+#define EDID_QUIRK_PREFER_LARGE_75 (1 << 2)
+/* Detail timing is in cm not mm */
+#define EDID_QUIRK_DETAILED_IN_CM (1 << 3)
+/* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
+/* Monitor forgot to set the first detailed is preferred bit. */
+#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
+/* use +hsync +vsync for detailed mode */
+#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+
+static struct edid_quirk {
+ char *vendor;
+ int product_id;
+ u32 quirks;
+} edid_quirk_list[] = {
+ /* Acer AL1706 */
+ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Acer F51 */
+ { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Unknown Acer */
+ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Belinea 10 15 55 */
+ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+ { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* Envision Peripherals, Inc. EN-7100e */
+ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+
+ /* Funai Electronics PM36B */
+ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+ EDID_QUIRK_DETAILED_IN_CM },
+
+ /* LG Philips LCD LP154W01-A5 */
+ { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+ { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+
+ /* Philips 107p5 CRT */
+ { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Proview AY765C */
+ { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+ /* Samsung SyncMaster 205BW. Note: irony */
+ { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+ /* Samsung SyncMaster 22[5-6]BW */
+ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+ { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+};
+
+
+/* Valid EDID header has these bytes */
+static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+/**
+ * edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity check the EDID block by looking at the header, the version number
+ * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
+ * valid.
+ */
+static bool edid_is_valid(struct edid *edid)
+{
+ int i;
+ u8 csum = 0;
+ u8 *raw_edid = (u8 *)edid;
+
+ if (memcmp(edid->header, edid_header, sizeof(edid_header)))
+ goto bad;
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+ if (edid->revision <= 0 || edid->revision > 3) {
+ DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision);
+ goto bad;
+ }
+
+ for (i = 0; i < EDID_LENGTH; i++)
+ csum += raw_edid[i];
+ if (csum) {
+ DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+ goto bad;
+ }
+
+ return 1;
+
+bad:
+ if (raw_edid) {
+ DRM_ERROR("Raw EDID:\n");
+ print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
+ printk("\n");
+ }
+ return 0;
+}
+
+/**
+ * edid_vendor - match a string against EDID's obfuscated vendor field
+ * @edid: EDID to match
+ * @vendor: vendor string
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+static bool edid_vendor(struct edid *edid, char *vendor)
+{
+ char edid_vendor[3];
+
+ edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
+ edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
+ ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
+ edid_vendor[2] = (edid->mfg_id[2] & 0x1f) + '@';
+
+ return !strncmp(edid_vendor, vendor, 3);
+}
+
+/**
+ * edid_get_quirks - return quirk flags for a given EDID
+ * @edid: EDID to process
+ *
+ * This tells subsequent routines what fixes they need to apply.
+ */
+static u32 edid_get_quirks(struct edid *edid)
+{
+ struct edid_quirk *quirk;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+ quirk = &edid_quirk_list[i];
+
+ if (edid_vendor(edid, quirk->vendor) &&
+ (EDID_PRODUCT_ID(edid) == quirk->product_id))
+ return quirk->quirks;
+ }
+
+ return 0;
+}
+
+#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+
+
+/**
+ * edid_fixup_preferred - set preferred modes based on quirk list
+ * @connector: has mode list to fix up
+ * @quirks: quirks list
+ *
+ * Walk the mode list for @connector, clearing the preferred status
+ * on existing modes and setting it anew for the right mode ala @quirks.
+ */
+static void edid_fixup_preferred(struct drm_connector *connector,
+ u32 quirks)
+{
+ struct drm_display_mode *t, *cur_mode, *preferred_mode;
+ int target_refresh = 0;
+
+ if (list_empty(&connector->probed_modes))
+ return;
+
+ if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+ target_refresh = 60;
+ if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+ target_refresh = 75;
+
+ preferred_mode = list_first_entry(&connector->probed_modes,
+ struct drm_display_mode, head);
+
+ list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
+ cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+ if (cur_mode == preferred_mode)
+ continue;
+
+ /* Largest mode is preferred */
+ if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
+ preferred_mode = cur_mode;
+
+ /* At a given size, try to get closest to target refresh */
+ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
+ MODE_REFRESH_DIFF(cur_mode, target_refresh) <
+ MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+ preferred_mode = cur_mode;
+ }
+ }
+
+ preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+/**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @t: standard timing params
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT.
+ *
+ * Punts for now, but should eventually use the FB layer's CVT based mode
+ * generation code.
+ */
+struct drm_display_mode *drm_mode_std(struct drm_device *dev,
+ struct std_timing *t)
+{
+ struct drm_display_mode *mode;
+ int hsize = t->hsize * 8 + 248, vsize;
+
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+ if (t->aspect_ratio == 0)
+ vsize = (hsize * 10) / 16;
+ else if (t->aspect_ratio == 1)
+ vsize = (hsize * 3) / 4;
+ else if (t->aspect_ratio == 2)
+ vsize = (hsize * 4) / 5;
+ else
+ vsize = (hsize * 9) / 16;
+
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+/**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @dev: DRM device (needed to create new mode)
+ * @edid: EDID block
+ * @timing: EDID detailed timing info
+ * @quirks: quirks to apply
+ *
+ * An EDID detailed timing block contains enough info for us to create and
+ * return a new struct drm_display_mode.
+ */
+static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ struct edid *edid,
+ struct detailed_timing *timing,
+ u32 quirks)
+{
+ struct drm_display_mode *mode;
+ struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+
+ if (pt->stereo) {
+ printk(KERN_WARNING "stereo mode not supported\n");
+ return NULL;
+ }
+ if (!pt->separate_sync) {
+ printk(KERN_WARNING "integrated sync not supported\n");
+ return NULL;
+ }
+
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ timing->pixel_clock = 1088;
+
+ mode->clock = timing->pixel_clock * 10;
+
+ mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo;
+ mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) |
+ pt->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start +
+ ((pt->hsync_pulse_width_hi << 8) |
+ pt->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo);
+
+ mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo;
+ mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) |
+ pt->vsync_offset_lo);
+ mode->vsync_end = mode->vsync_start +
+ ((pt->vsync_pulse_width_hi << 8) |
+ pt->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
+
+ drm_mode_set_name(mode);
+
+ if (pt->interlaced)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+ if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ pt->hsync_positive = 1;
+ pt->vsync_positive = 1;
+ }
+
+ mode->flags |= pt->hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= pt->vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
+ mode->width_mm = pt->width_mm_lo | (pt->width_mm_hi << 8);
+ mode->height_mm = pt->height_mm_lo | (pt->height_mm_hi << 8);
+
+ if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+ mode->width_mm *= 10;
+ mode->height_mm *= 10;
+ }
+
+ if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+ mode->width_mm = edid->width_cm * 10;
+ mode->height_mm = edid->height_cm * 10;
+ }
+
+ return mode;
+}
+
+/*
+ * Detailed mode info for the EDID "established modes" data to use.
+ */
+static struct drm_display_mode edid_est_modes[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+ 768, 864, 0, 480, 483, 486, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+ 846, 900, 0, 400, 421, 423, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+ 846, 900, 0, 400, 412, 414, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+ 928, 1152, 0, 624, 625, 628, 667, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above). Tease them out and add them to the global modes list.
+ */
+static int add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ unsigned long est_bits = edid->established_timings.t1 |
+ (edid->established_timings.t2 << 8) |
+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+ int i, modes = 0;
+
+ for (i = 0; i <= EDID_EST_TIMINGS; i++)
+ if (est_bits & (1<<i)) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ return modes;
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the CVT standard. Grab them from
+ * @edid, calculate them, and add them to the list.
+ */
+static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ int i, modes = 0;
+
+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
+ struct std_timing *t = &edid->standard_timings[i];
+ struct drm_display_mode *newmode;
+
+ /* If std timings bytes are 1, 1 it's empty */
+ if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1)
+ continue;
+
+ newmode = drm_mode_std(dev, &edid->standard_timings[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ return modes;
+}
+
+/**
+ * add_detailed_modes - get detailed mode info from EDID data
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ *
+ * Some of the detailed timing sections may contain mode information. Grab
+ * it and add it to the list.
+ */
+static int add_detailed_info(struct drm_connector *connector,
+ struct edid *edid, u32 quirks)
+{
+ struct drm_device *dev = connector->dev;
+ int i, j, modes = 0;
+
+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
+ struct detailed_timing *timing = &edid->detailed_timings[i];
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct drm_display_mode *newmode;
+
+ /* EDID up to and including 1.2 may put monitor info here */
+ if (edid->version == 1 && edid->revision < 3)
+ continue;
+
+ /* Detailed mode timing */
+ if (timing->pixel_clock) {
+ newmode = drm_mode_detailed(dev, edid, timing, quirks);
+ if (!newmode)
+ continue;
+
+ /* First detailed mode is preferred */
+ if (i == 0 && edid->preferred_timing)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, newmode);
+
+ modes++;
+ continue;
+ }
+
+ /* Other timing or info */
+ switch (data->type) {
+ case EDID_DETAIL_MONITOR_SERIAL:
+ break;
+ case EDID_DETAIL_MONITOR_STRING:
+ break;
+ case EDID_DETAIL_MONITOR_RANGE:
+ /* Get monitor range data */
+ break;
+ case EDID_DETAIL_MONITOR_NAME:
+ break;
+ case EDID_DETAIL_MONITOR_CPDATA:
+ break;
+ case EDID_DETAIL_STD_MODES:
+ /* Five modes per detailed section */
+ for (j = 0; j < 5; i++) {
+ struct std_timing *std;
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[j];
+ newmode = drm_mode_std(dev, std);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return modes;
+}
+
+#define DDC_ADDR 0x50
+
+unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter)
+{
+ unsigned char start = 0x0;
+ unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = EDID_LENGTH,
+ .buf = buf,
+ }
+ };
+
+ if (!buf) {
+ dev_warn(&adapter->dev, "unable to allocate memory for EDID "
+ "block.\n");
+ return NULL;
+ }
+
+ if (i2c_transfer(adapter, msgs, 2) == 2)
+ return buf;
+
+ dev_info(&adapter->dev, "unable to read EDID block.\n");
+ kfree(buf);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_do_probe_ddc_edid);
+
+static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
+{
+ struct i2c_algo_bit_data *algo_data = adapter->algo_data;
+ unsigned char *edid = NULL;
+ int i, j;
+
+ algo_data->setscl(algo_data->data, 1);
+
+ for (i = 0; i < 1; i++) {
+ /* For some old monitors we need the
+ * following process to initialize/stop DDC
+ */
+ algo_data->setsda(algo_data->data, 1);
+ msleep(13);
+
+ algo_data->setscl(algo_data->data, 1);
+ for (j = 0; j < 5; j++) {
+ msleep(10);
+ if (algo_data->getscl(algo_data->data))
+ break;
+ }
+ if (j == 5)
+ continue;
+
+ algo_data->setsda(algo_data->data, 0);
+ msleep(15);
+ algo_data->setscl(algo_data->data, 0);
+ msleep(15);
+ algo_data->setsda(algo_data->data, 1);
+ msleep(15);
+
+ /* Do the real work */
+ edid = drm_do_probe_ddc_edid(adapter);
+ algo_data->setsda(algo_data->data, 0);
+ algo_data->setscl(algo_data->data, 0);
+ msleep(15);
+
+ algo_data->setscl(algo_data->data, 1);
+ for (j = 0; j < 10; j++) {
+ msleep(10);
+ if (algo_data->getscl(algo_data->data))
+ break;
+ }
+
+ algo_data->setsda(algo_data->data, 1);
+ msleep(15);
+ algo_data->setscl(algo_data->data, 0);
+ algo_data->setsda(algo_data->data, 0);
+ if (edid)
+ break;
+ }
+ /* Release the DDC lines when done or the Apple Cinema HD display
+ * will switch off
+ */
+ algo_data->setsda(algo_data->data, 1);
+ algo_data->setscl(algo_data->data, 1);
+
+ return edid;
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given connector's i2c channel to grab EDID data if possible.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+
+ edid = (struct edid *)drm_ddc_read(adapter);
+ if (!edid) {
+ dev_warn(&connector->dev->pdev->dev, "%s: no EDID data\n",
+ drm_get_connector_name(connector));
+ return NULL;
+ }
+ if (!edid_is_valid(edid)) {
+ dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+ drm_get_connector_name(connector));
+ kfree(edid);
+ return NULL;
+ }
+
+ connector->display_info.raw_edid = (char *)edid;
+
+ return edid;
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+ * @edid: edid data
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+{
+ int num_modes = 0;
+ u32 quirks;
+
+ if (edid == NULL) {
+ return 0;
+ }
+ if (!edid_is_valid(edid)) {
+ dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+ drm_get_connector_name(connector));
+ return 0;
+ }
+
+ quirks = edid_get_quirks(edid);
+
+ num_modes += add_established_modes(connector, edid);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_detailed_info(connector, edid, quirks);
+
+ if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ edid_fixup_preferred(connector, quirks);
+
+ connector->display_info.serration_vsync = edid->serration_vsync;
+ connector->display_info.sync_on_green = edid->sync_on_green;
+ connector->display_info.composite_sync = edid->composite_sync;
+ connector->display_info.separate_syncs = edid->separate_syncs;
+ connector->display_info.blank_to_black = edid->blank_to_black;
+ connector->display_info.video_level = edid->video_level;
+ connector->display_info.digital = edid->digital;
+ connector->display_info.width_mm = edid->width_cm * 10;
+ connector->display_info.height_mm = edid->height_cm * 10;
+ connector->display_info.gamma = edid->gamma;
+ connector->display_info.gtf_supported = edid->default_gtf;
+ connector->display_info.standard_color = edid->standard_color;
+ connector->display_info.display_type = edid->display_type;
+ connector->display_info.active_off_supported = edid->pm_active_off;
+ connector->display_info.suspend_supported = edid->pm_suspend;
+ connector->display_info.standby_supported = edid->pm_standby;
+ connector->display_info.gamma = edid->gamma;
+
+ return num_modes;
+}
+EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 78eeed5caaf..b06a5371585 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -35,7 +35,6 @@
*/
#include "drmP.h"
-#include "drm_sarea.h"
#include <linux/poll.h>
#include <linux/smp_lock.h>
@@ -44,10 +43,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
static int drm_setup(struct drm_device * dev)
{
- drm_local_map_t *map;
int i;
int ret;
- u32 sareapage;
if (dev->driver->firstopen) {
ret = dev->driver->firstopen(dev);
@@ -55,20 +52,14 @@ static int drm_setup(struct drm_device * dev)
return ret;
}
- dev->magicfree.next = NULL;
-
- /* prebuild the SAREA */
- sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE);
- i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
- if (i != 0)
- return i;
-
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
i = drm_dma_setup(dev);
if (i < 0)
return i;
@@ -77,16 +68,12 @@ static int drm_setup(struct drm_device * dev)
for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
atomic_set(&dev->counts[i], 0);
- drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
- INIT_LIST_HEAD(&dev->magicfree);
-
dev->sigdata.lock = NULL;
- init_waitqueue_head(&dev->lock.lock_queue);
+
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
- dev->irq_enabled = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
@@ -147,10 +134,20 @@ int drm_open(struct inode *inode, struct file *filp)
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
- return drm_setup(dev);
+ retcode = drm_setup(dev);
+ goto out;
}
spin_unlock(&dev->count_lock);
}
+out:
+ mutex_lock(&dev->struct_mutex);
+ if (minor->type == DRM_MINOR_LEGACY) {
+ BUG_ON((dev->dev_mapping != NULL) &&
+ (dev->dev_mapping != inode->i_mapping));
+ if (dev->dev_mapping == NULL)
+ dev->dev_mapping = inode->i_mapping;
+ }
+ mutex_unlock(&dev->struct_mutex);
return retcode;
}
@@ -186,6 +183,10 @@ int drm_stub_open(struct inode *inode, struct file *filp)
old_fops = filp->f_op;
filp->f_op = fops_get(&dev->driver->fops);
+ if (filp->f_op == NULL) {
+ filp->f_op = old_fops;
+ goto out;
+ }
if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
fops_put(filp->f_op);
filp->f_op = fops_get(old_fops);
@@ -255,6 +256,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
+ INIT_LIST_HEAD(&priv->fbs);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
@@ -265,10 +267,42 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free;
}
+
+ /* if there is no current master make this fd it */
mutex_lock(&dev->struct_mutex);
- if (list_empty(&dev->filelist))
- priv->master = 1;
+ if (!priv->minor->master) {
+ /* create a new master */
+ priv->minor->master = drm_master_create(priv->minor);
+ if (!priv->minor->master) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ priv->is_master = 1;
+ /* take another reference for the copy in the local file priv */
+ priv->master = drm_master_get(priv->minor->master);
+
+ priv->authenticated = 1;
+
+ mutex_unlock(&dev->struct_mutex);
+ if (dev->driver->master_create) {
+ ret = dev->driver->master_create(dev, priv->master);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ /* drop both references if this fails */
+ drm_master_put(&priv->minor->master);
+ drm_master_put(&priv->master);
+ mutex_unlock(&dev->struct_mutex);
+ goto out_free;
+ }
+ }
+ } else {
+ /* get a reference to the master */
+ priv->master = drm_master_get(priv->minor->master);
+ mutex_unlock(&dev->struct_mutex);
+ }
+ mutex_lock(&dev->struct_mutex);
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->struct_mutex);
@@ -314,6 +348,74 @@ int drm_fasync(int fd, struct file *filp, int on)
}
EXPORT_SYMBOL(drm_fasync);
+/*
+ * Reclaim locked buffers; note that this may be a bad idea if the current
+ * context doesn't have the hw lock...
+ */
+static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
+{
+ struct drm_file *file_priv = f->private_data;
+
+ if (drm_i_have_hw_lock(dev, file_priv)) {
+ dev->driver->reclaim_buffers_locked(dev, file_priv);
+ } else {
+ unsigned long _end = jiffies + 3 * DRM_HZ;
+ int locked = 0;
+
+ drm_idlelock_take(&file_priv->master->lock);
+
+ /*
+ * Wait for a while.
+ */
+ do {
+ spin_lock_bh(&file_priv->master->lock.spinlock);
+ locked = file_priv->master->lock.idle_has_lock;
+ spin_unlock_bh(&file_priv->master->lock.spinlock);
+ if (locked)
+ break;
+ schedule();
+ } while (!time_after_eq(jiffies, _end));
+
+ if (!locked) {
+ DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
+ "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
+ "\tI will go on reclaiming the buffers anyway.\n");
+ }
+
+ dev->driver->reclaim_buffers_locked(dev, file_priv);
+ drm_idlelock_release(&file_priv->master->lock);
+ }
+}
+
+static void drm_master_release(struct drm_device *dev, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+
+ if (dev->driver->reclaim_buffers_locked &&
+ file_priv->master->lock.hw_lock)
+ drm_reclaim_locked_buffers(dev, filp);
+
+ if (dev->driver->reclaim_buffers_idlelocked &&
+ file_priv->master->lock.hw_lock) {
+ drm_idlelock_take(&file_priv->master->lock);
+ dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
+ drm_idlelock_release(&file_priv->master->lock);
+ }
+
+
+ if (drm_i_have_hw_lock(dev, file_priv)) {
+ DRM_DEBUG("File %p released, freeing lock for context %d\n",
+ filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+ drm_lock_free(&file_priv->master->lock,
+ _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+ !dev->driver->reclaim_buffers_locked) {
+ dev->driver->reclaim_buffers(dev, file_priv);
+ }
+}
+
/**
* Release file.
*
@@ -348,60 +450,9 @@ int drm_release(struct inode *inode, struct file *filp)
(long)old_encode_dev(file_priv->minor->device),
dev->open_count);
- if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
- if (drm_i_have_hw_lock(dev, file_priv)) {
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- } else {
- unsigned long endtime = jiffies + 3 * DRM_HZ;
- int locked = 0;
-
- drm_idlelock_take(&dev->lock);
-
- /*
- * Wait for a while.
- */
-
- do{
- spin_lock_bh(&dev->lock.spinlock);
- locked = dev->lock.idle_has_lock;
- spin_unlock_bh(&dev->lock.spinlock);
- if (locked)
- break;
- schedule();
- } while (!time_after_eq(jiffies, endtime));
-
- if (!locked) {
- DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
- "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
- "\tI will go on reclaiming the buffers anyway.\n");
- }
-
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- drm_idlelock_release(&dev->lock);
- }
- }
-
- if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
-
- drm_idlelock_take(&dev->lock);
- dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
- drm_idlelock_release(&dev->lock);
-
- }
-
- if (drm_i_have_hw_lock(dev, file_priv)) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-
- drm_lock_free(&dev->lock,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- }
-
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !dev->driver->reclaim_buffers_locked) {
- dev->driver->reclaim_buffers(dev, file_priv);
- }
+ /* if the master has gone away we can't do anything with the lock */
+ if (file_priv->minor->master)
+ drm_master_release(dev, filp);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
@@ -428,12 +479,24 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_unlock(&dev->ctxlist_mutex);
mutex_lock(&dev->struct_mutex);
- if (file_priv->remove_auth_on_close == 1) {
+
+ if (file_priv->is_master) {
struct drm_file *temp;
+ list_for_each_entry(temp, &dev->filelist, lhead) {
+ if ((temp->master == file_priv->master) &&
+ (temp != file_priv))
+ temp->authenticated = 0;
+ }
- list_for_each_entry(temp, &dev->filelist, lhead)
- temp->authenticated = 0;
+ if (file_priv->minor->master == file_priv->master) {
+ /* drop the reference held my the minor */
+ drm_master_put(&file_priv->minor->master);
+ }
}
+
+ /* drop the reference held my the file priv */
+ drm_master_put(&file_priv->master);
+ file_priv->is_master = 0;
list_del(&file_priv->lhead);
mutex_unlock(&dev->struct_mutex);
@@ -448,9 +511,9 @@ int drm_release(struct inode *inode, struct file *filp)
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
- if (atomic_read(&dev->ioctl_count) || dev->blocked) {
- DRM_ERROR("Device busy: %d %d\n",
- atomic_read(&dev->ioctl_count), dev->blocked);
+ if (atomic_read(&dev->ioctl_count)) {
+ DRM_ERROR("Device busy: %d\n",
+ atomic_read(&dev->ioctl_count));
spin_unlock(&dev->count_lock);
unlock_kernel();
return -EBUSY;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ccd1afdede0..9da58145287 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -64,6 +64,13 @@
* up at a later date, and as our interface with shmfs for memory allocation.
*/
+/*
+ * We make up offsets for buffer objects so we can recognize them at
+ * mmap time.
+ */
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+
/**
* Initialize the GEM device fields
*/
@@ -71,6 +78,8 @@
int
drm_gem_init(struct drm_device *dev)
{
+ struct drm_gem_mm *mm;
+
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
atomic_set(&dev->object_count, 0);
@@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev)
atomic_set(&dev->pin_memory, 0);
atomic_set(&dev->gtt_count, 0);
atomic_set(&dev->gtt_memory, 0);
+
+ mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM);
+ if (!mm) {
+ DRM_ERROR("out of memory\n");
+ return -ENOMEM;
+ }
+
+ dev->mm_private = mm;
+
+ if (drm_ht_create(&mm->offset_hash, 19)) {
+ drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
+ return -ENOMEM;
+ }
+
+ if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE)) {
+ drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
+ drm_ht_remove(&mm->offset_hash);
+ return -ENOMEM;
+ }
+
return 0;
}
+void
+drm_gem_destroy(struct drm_device *dev)
+{
+ struct drm_gem_mm *mm = dev->mm_private;
+
+ drm_mm_takedown(&mm->offset_manager);
+ drm_ht_remove(&mm->offset_hash);
+ drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
+ dev->mm_private = NULL;
+}
+
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
@@ -419,3 +460,73 @@ drm_gem_object_handle_free(struct kref *kref)
}
EXPORT_SYMBOL(drm_gem_object_handle_free);
+/**
+ * drm_gem_mmap - memory map routine for GEM objects
+ * @filp: DRM file pointer
+ * @vma: VMA for the area to be mapped
+ *
+ * If a driver supports GEM object mapping, mmap calls on the DRM file
+ * descriptor will end up here.
+ *
+ * If we find the object based on the offset passed in (vma->vm_pgoff will
+ * contain the fake offset we created when the GTT map ioctl was called on
+ * the object), we set up the driver fault handler so that any accesses
+ * to the object can be trapped, to perform migration, GTT binding, surface
+ * register allocation, or performance monitoring.
+ */
+int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map *map = NULL;
+ struct drm_gem_object *obj;
+ struct drm_hash_item *hash;
+ unsigned long prot;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+ mutex_unlock(&dev->struct_mutex);
+ return drm_mmap(filp, vma);
+ }
+
+ map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+ if (!map ||
+ ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ /* Check for valid size. */
+ if (map->size < vma->vm_end - vma->vm_start) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ obj = map->handle;
+ if (!obj->dev->driver->gem_vm_ops) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+ vma->vm_ops = obj->dev->driver->gem_vm_ops;
+ vma->vm_private_data = map->handle;
+ /* FIXME: use pgprot_writecombine when available */
+ prot = pgprot_val(vma->vm_page_prot);
+#ifdef CONFIG_X86
+ prot |= _PAGE_CACHE_WC;
+#endif
+ vma->vm_page_prot = __pgprot(prot);
+
+ vma->vm_file = filp; /* Needed for drm_vm_open() */
+ drm_vm_open_locked(vma);
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_mmap);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 33160673a7b..af539f7d87d 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
}
return 0;
}
+EXPORT_SYMBOL(drm_ht_insert_item);
/*
* Just insert an item and return any "bits" bit key that hasn't been
@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
ht->fill--;
return 0;
}
+EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 16829fb3089..1fad76289e6 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -53,12 +53,13 @@ int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
+ struct drm_master *master = file_priv->master;
- if (u->unique_len >= dev->unique_len) {
- if (copy_to_user(u->unique, dev->unique, dev->unique_len))
+ if (u->unique_len >= master->unique_len) {
+ if (copy_to_user(u->unique, master->unique, master->unique_len))
return -EFAULT;
}
- u->unique_len = dev->unique_len;
+ u->unique_len = master->unique_len;
return 0;
}
@@ -81,36 +82,38 @@ int drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
+ struct drm_master *master = file_priv->master;
int domain, bus, slot, func, ret;
- if (dev->unique_len || dev->unique)
+ if (master->unique_len || master->unique)
return -EBUSY;
if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
- dev->unique_len = u->unique_len;
- dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
- if (!dev->unique)
+ master->unique_len = u->unique_len;
+ master->unique_size = u->unique_len + 1;
+ master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER);
+ if (!master->unique)
return -ENOMEM;
- if (copy_from_user(dev->unique, u->unique, dev->unique_len))
+ if (copy_from_user(master->unique, u->unique, master->unique_len))
return -EFAULT;
- dev->unique[dev->unique_len] = '\0';
+ master->unique[master->unique_len] = '\0';
dev->devname =
drm_alloc(strlen(dev->driver->pci_driver.name) +
- strlen(dev->unique) + 2, DRM_MEM_DRIVER);
+ strlen(master->unique) + 2, DRM_MEM_DRIVER);
if (!dev->devname)
return -ENOMEM;
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- dev->unique);
+ master->unique);
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
- ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+ ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3)
return -EINVAL;
domain = bus >> 8;
@@ -125,34 +128,38 @@ int drm_setunique(struct drm_device *dev, void *data,
return 0;
}
-static int drm_set_busid(struct drm_device * dev)
+static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
+ struct drm_master *master = file_priv->master;
int len;
- if (dev->unique != NULL)
- return 0;
+ if (master->unique != NULL)
+ return -EBUSY;
- dev->unique_len = 40;
- dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
- if (dev->unique == NULL)
+ master->unique_len = 40;
+ master->unique_size = master->unique_len;
+ master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER);
+ if (master->unique == NULL)
return -ENOMEM;
- len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev), dev->pdev->bus->number,
+ len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d",
+ drm_get_pci_domain(dev),
+ dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
-
- if (len > dev->unique_len)
- DRM_ERROR("Unique buffer overflowed\n");
+ if (len >= master->unique_len)
+ DRM_ERROR("buffer overflow");
+ else
+ master->unique_len = len;
dev->devname =
- drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
+ drm_alloc(strlen(dev->driver->pci_driver.name) + master->unique_len +
2, DRM_MEM_DRIVER);
if (dev->devname == NULL)
return -ENOMEM;
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- dev->unique);
+ master->unique);
return 0;
}
@@ -276,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
stats->data[i].value =
- (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
+ (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
else
stats->data[i].value = atomic_read(&dev->counts[i]);
stats->data[i].type = dev->types[i];
@@ -318,7 +325,7 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
/*
* Version 1.1 includes tying of DRM to specific device
*/
- drm_set_busid(dev);
+ drm_set_busid(dev, file_priv);
}
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 15c8dabc3e9..724e505873c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -94,7 +94,7 @@ static void vblank_disable_fn(unsigned long arg)
}
}
-static void drm_vblank_cleanup(struct drm_device *dev)
+void drm_vblank_cleanup(struct drm_device *dev)
{
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
@@ -116,6 +116,9 @@ static void drm_vblank_cleanup(struct drm_device *dev)
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
DRM_MEM_DRIVER);
+ drm_free(dev->last_vblank_wait,
+ sizeof(*dev->last_vblank_wait) * dev->num_crtcs,
+ DRM_MEM_DRIVER);
drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
dev->num_crtcs, DRM_MEM_DRIVER);
@@ -161,6 +164,11 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
if (!dev->last_vblank)
goto err;
+ dev->last_vblank_wait = drm_calloc(num_crtcs, sizeof(u32),
+ DRM_MEM_DRIVER);
+ if (!dev->last_vblank_wait)
+ goto err;
+
dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
DRM_MEM_DRIVER);
if (!dev->vblank_inmodeset)
@@ -278,8 +286,6 @@ int drm_irq_uninstall(struct drm_device * dev)
free_irq(dev->pdev->irq, dev);
- drm_vblank_cleanup(dev);
-
return 0;
}
EXPORT_SYMBOL(drm_irq_uninstall);
@@ -307,6 +313,8 @@ int drm_control(struct drm_device *dev, void *data,
case DRM_INST_HANDLER:
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != dev->pdev->irq)
return -EINVAL;
@@ -314,6 +322,8 @@ int drm_control(struct drm_device *dev, void *data,
case DRM_UNINST_HANDLER:
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
return drm_irq_uninstall(dev);
default:
return -EINVAL;
@@ -429,6 +439,45 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(drm_vblank_put);
/**
+ * drm_vblank_pre_modeset - account for vblanks across mode sets
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @post: post or pre mode set?
+ *
+ * Account for vblank events across mode setting events, which will likely
+ * reset the hardware frame counter.
+ */
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+{
+ /*
+ * To avoid all the problems that might happen if interrupts
+ * were enabled/disabled around or between these calls, we just
+ * have the kernel take a reference on the CRTC (just once though
+ * to avoid corrupting the count if multiple, mismatch calls occur),
+ * so that interrupts remain enabled in the interim.
+ */
+ if (!dev->vblank_inmodeset[crtc]) {
+ dev->vblank_inmodeset[crtc] = 1;
+ drm_vblank_get(dev, crtc);
+ }
+}
+EXPORT_SYMBOL(drm_vblank_pre_modeset);
+
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+
+ if (dev->vblank_inmodeset[crtc]) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ dev->vblank_disable_allowed = 1;
+ dev->vblank_inmodeset[crtc] = 0;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ drm_vblank_put(dev, crtc);
+ }
+}
+EXPORT_SYMBOL(drm_vblank_post_modeset);
+
+/**
* drm_modeset_ctl - handle vblank event counter changes across mode switch
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
@@ -443,7 +492,6 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- unsigned long irqflags;
int crtc, ret = 0;
/* If drm_vblank_init() hasn't been called yet, just no-op */
@@ -456,28 +504,12 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
goto out;
}
- /*
- * To avoid all the problems that might happen if interrupts
- * were enabled/disabled around or between these calls, we just
- * have the kernel take a reference on the CRTC (just once though
- * to avoid corrupting the count if multiple, mismatch calls occur),
- * so that interrupts remain enabled in the interim.
- */
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
- if (!dev->vblank_inmodeset[crtc]) {
- dev->vblank_inmodeset[crtc] = 1;
- drm_vblank_get(dev, crtc);
- }
+ drm_vblank_pre_modeset(dev, crtc);
break;
case _DRM_POST_MODESET:
- if (dev->vblank_inmodeset[crtc]) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->vblank_disable_allowed = 1;
- dev->vblank_inmodeset[crtc] = 0;
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- drm_vblank_put(dev, crtc);
- }
+ drm_vblank_post_modeset(dev, crtc);
break;
default:
ret = -EINVAL;
@@ -618,6 +650,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
} else {
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
+ dev->last_vblank_wait[crtc] = vblwait->request.sequence;
DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
((drm_vblank_count(dev, crtc)
- vblwait->request.sequence) <= (1 << 23)));
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 1cfa72031f8..46e7b28f070 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -52,6 +52,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DECLARE_WAITQUEUE(entry, current);
struct drm_lock *lock = data;
+ struct drm_master *master = file_priv->master;
int ret = 0;
++file_priv->lock_count;
@@ -64,26 +65,27 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, task_pid_nr(current),
- dev->lock.hw_lock->lock, lock->flags);
+ master->lock.hw_lock->lock, lock->flags);
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
if (lock->context < 0)
return -EINVAL;
- add_wait_queue(&dev->lock.lock_queue, &entry);
- spin_lock_bh(&dev->lock.spinlock);
- dev->lock.user_waiters++;
- spin_unlock_bh(&dev->lock.spinlock);
+ add_wait_queue(&master->lock.lock_queue, &entry);
+ spin_lock_bh(&master->lock.spinlock);
+ master->lock.user_waiters++;
+ spin_unlock_bh(&master->lock.spinlock);
+
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
- if (!dev->lock.hw_lock) {
+ if (!master->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
- if (drm_lock_take(&dev->lock, lock->context)) {
- dev->lock.file_priv = file_priv;
- dev->lock.lock_time = jiffies;
+ if (drm_lock_take(&master->lock, lock->context)) {
+ master->lock.file_priv = file_priv;
+ master->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
@@ -95,11 +97,11 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
break;
}
}
- spin_lock_bh(&dev->lock.spinlock);
- dev->lock.user_waiters--;
- spin_unlock_bh(&dev->lock.spinlock);
+ spin_lock_bh(&master->lock.spinlock);
+ master->lock.user_waiters--;
+ spin_unlock_bh(&master->lock.spinlock);
__set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev->lock.lock_queue, &entry);
+ remove_wait_queue(&master->lock.lock_queue, &entry);
DRM_DEBUG("%d %s\n", lock->context,
ret ? "interrupted" : "has lock");
@@ -108,14 +110,14 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
/* don't set the block all signals on the master process for now
* really probably not the correct answer but lets us debug xkb
* xserver for now */
- if (!file_priv->master) {
+ if (!file_priv->is_master) {
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
sigaddset(&dev->sigmask, SIGTTIN);
sigaddset(&dev->sigmask, SIGTTOU);
dev->sigdata.context = lock->context;
- dev->sigdata.lock = dev->lock.hw_lock;
+ dev->sigdata.lock = master->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
}
@@ -154,6 +156,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
+ struct drm_master *master = file_priv->master;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@@ -169,7 +172,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
if (dev->driver->kernel_context_switch_unlock)
dev->driver->kernel_context_switch_unlock(dev);
else {
- if (drm_lock_free(&dev->lock,lock->context)) {
+ if (drm_lock_free(&master->lock, lock->context)) {
/* FIXME: Should really bail out here. */
}
}
@@ -379,9 +382,10 @@ EXPORT_SYMBOL(drm_idlelock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
- return (file_priv->lock_count && dev->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
- dev->lock.file_priv == file_priv);
+ struct drm_master *master = file_priv->master;
+ return (file_priv->lock_count && master->lock.hw_lock &&
+ _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
+ master->lock.file_priv == file_priv);
}
EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 217ad7dc707..367c590ffbb 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -296,3 +296,4 @@ void drm_mm_takedown(struct drm_mm * mm)
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
}
+EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
new file mode 100644
index 00000000000..c9b80fdd463
--- /dev/null
+++ b/drivers/gpu/drm/drm_modes.c
@@ -0,0 +1,576 @@
+/*
+ * The list_sort function is (presumably) licensed under the GPL (see the
+ * top level "COPYING" file for details).
+ *
+ * The remainder of this file is:
+ *
+ * Copyright © 1997-2003 by The XFree86 Project, Inc.
+ * Copyright © 2007 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+#include <linux/list.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+
+/**
+ * drm_mode_debug_printmodeline - debug print a mode
+ * @dev: DRM device
+ * @mode: mode to print
+ *
+ * LOCKING:
+ * None.
+ *
+ * Describe @mode using DRM_DEBUG.
+ */
+void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+{
+ DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+}
+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
+
+/**
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Set the name of @mode to a standard format.
+ */
+void drm_mode_set_name(struct drm_display_mode *mode)
+{
+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
+ mode->vdisplay);
+}
+EXPORT_SYMBOL(drm_mode_set_name);
+
+/**
+ * drm_mode_list_concat - move modes from one list to another
+ * @head: source list
+ * @new: dst list
+ *
+ * LOCKING:
+ * Caller must ensure both lists are locked.
+ *
+ * Move all the modes from @head to @new.
+ */
+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
+{
+
+ struct list_head *entry, *tmp;
+
+ list_for_each_safe(entry, tmp, head) {
+ list_move_tail(entry, new);
+ }
+}
+EXPORT_SYMBOL(drm_mode_list_concat);
+
+/**
+ * drm_mode_width - get the width of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's width (hdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->hdisplay
+ */
+int drm_mode_width(struct drm_display_mode *mode)
+{
+ return mode->hdisplay;
+
+}
+EXPORT_SYMBOL(drm_mode_width);
+
+/**
+ * drm_mode_height - get the height of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's height (vdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->vdisplay
+ */
+int drm_mode_height(struct drm_display_mode *mode)
+{
+ return mode->vdisplay;
+}
+EXPORT_SYMBOL(drm_mode_height);
+
+/**
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's vrefresh rate or calculate it if necessary.
+ *
+ * FIXME: why is this needed? shouldn't vrefresh be set already?
+ *
+ * RETURNS:
+ * Vertical refresh rate of @mode x 1000. For precision reasons.
+ */
+int drm_mode_vrefresh(struct drm_display_mode *mode)
+{
+ int refresh = 0;
+ unsigned int calc_val;
+
+ if (mode->vrefresh > 0)
+ refresh = mode->vrefresh;
+ else if (mode->htotal > 0 && mode->vtotal > 0) {
+ /* work out vrefresh the value will be x1000 */
+ calc_val = (mode->clock * 1000);
+
+ calc_val /= mode->htotal;
+ calc_val *= 1000;
+ calc_val /= mode->vtotal;
+
+ refresh = calc_val;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ refresh *= 2;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ refresh /= 2;
+ if (mode->vscan > 1)
+ refresh /= mode->vscan;
+ }
+ return refresh;
+}
+EXPORT_SYMBOL(drm_mode_vrefresh);
+
+/**
+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * @p: mode
+ * @adjust_flags: unused? (FIXME)
+ *
+ * LOCKING:
+ * None.
+ *
+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ */
+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+{
+ if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+ return;
+
+ p->crtc_hdisplay = p->hdisplay;
+ p->crtc_hsync_start = p->hsync_start;
+ p->crtc_hsync_end = p->hsync_end;
+ p->crtc_htotal = p->htotal;
+ p->crtc_hskew = p->hskew;
+ p->crtc_vdisplay = p->vdisplay;
+ p->crtc_vsync_start = p->vsync_start;
+ p->crtc_vsync_end = p->vsync_end;
+ p->crtc_vtotal = p->vtotal;
+
+ if (p->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
+ p->crtc_vdisplay /= 2;
+ p->crtc_vsync_start /= 2;
+ p->crtc_vsync_end /= 2;
+ p->crtc_vtotal /= 2;
+ }
+
+ p->crtc_vtotal |= 1;
+ }
+
+ if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+ p->crtc_vdisplay *= 2;
+ p->crtc_vsync_start *= 2;
+ p->crtc_vsync_end *= 2;
+ p->crtc_vtotal *= 2;
+ }
+
+ if (p->vscan > 1) {
+ p->crtc_vdisplay *= p->vscan;
+ p->crtc_vsync_start *= p->vscan;
+ p->crtc_vsync_end *= p->vscan;
+ p->crtc_vtotal *= p->vscan;
+ }
+
+ p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
+ p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
+ p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
+ p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
+
+ p->crtc_hadjusted = false;
+ p->crtc_vadjusted = false;
+}
+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
+
+
+/**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+ * @m: mode to duplicate
+ *
+ * LOCKING:
+ * None.
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it. Used to create new instances of established modes.
+ */
+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ struct drm_display_mode *mode)
+{
+ struct drm_display_mode *nmode;
+ int new_id;
+
+ nmode = drm_mode_create(dev);
+ if (!nmode)
+ return NULL;
+
+ new_id = nmode->base.id;
+ *nmode = *mode;
+ nmode->base.id = new_id;
+ INIT_LIST_HEAD(&nmode->head);
+ return nmode;
+}
+EXPORT_SYMBOL(drm_mode_duplicate);
+
+/**
+ * drm_mode_equal - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+{
+ /* do clock check convert to PICOS so fb modes get matched
+ * the same */
+ if (mode1->clock && mode2->clock) {
+ if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
+ return false;
+ } else if (mode1->clock != mode2->clock)
+ return false;
+
+ if (mode1->hdisplay == mode2->hdisplay &&
+ mode1->hsync_start == mode2->hsync_start &&
+ mode1->hsync_end == mode2->hsync_end &&
+ mode1->htotal == mode2->htotal &&
+ mode1->hskew == mode2->hskew &&
+ mode1->vdisplay == mode2->vdisplay &&
+ mode1->vsync_start == mode2->vsync_start &&
+ mode1->vsync_end == mode2->vsync_end &&
+ mode1->vtotal == mode2->vtotal &&
+ mode1->vscan == mode2->vscan &&
+ mode1->flags == mode2->flags)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_validate_size - make sure modes adhere to size constraints
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+ * @maxPitch: max pitch
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * The DRM device (@dev) has size and pitch limits. Here we validate the
+ * modes we probed for @dev against those limits and set their status as
+ * necessary.
+ */
+void drm_mode_validate_size(struct drm_device *dev,
+ struct list_head *mode_list,
+ int maxX, int maxY, int maxPitch)
+{
+ struct drm_display_mode *mode;
+
+ list_for_each_entry(mode, mode_list, head) {
+ if (maxPitch > 0 && mode->hdisplay > maxPitch)
+ mode->status = MODE_BAD_WIDTH;
+
+ if (maxX > 0 && mode->hdisplay > maxX)
+ mode->status = MODE_VIRTUAL_X;
+
+ if (maxY > 0 && mode->vdisplay > maxY)
+ mode->status = MODE_VIRTUAL_Y;
+ }
+}
+EXPORT_SYMBOL(drm_mode_validate_size);
+
+/**
+ * drm_mode_validate_clocks - validate modes against clock limits
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @min: minimum clock rate array
+ * @max: maximum clock rate array
+ * @n_ranges: number of clock ranges (size of arrays)
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Some code may need to check a mode list against the clock limits of the
+ * device in question. This function walks the mode list, testing to make
+ * sure each mode falls within a given range (defined by @min and @max
+ * arrays) and sets @mode->status as needed.
+ */
+void drm_mode_validate_clocks(struct drm_device *dev,
+ struct list_head *mode_list,
+ int *min, int *max, int n_ranges)
+{
+ struct drm_display_mode *mode;
+ int i;
+
+ list_for_each_entry(mode, mode_list, head) {
+ bool good = false;
+ for (i = 0; i < n_ranges; i++) {
+ if (mode->clock >= min[i] && mode->clock <= max[i]) {
+ good = true;
+ break;
+ }
+ }
+ if (!good)
+ mode->status = MODE_CLOCK_RANGE;
+ }
+}
+EXPORT_SYMBOL(drm_mode_validate_clocks);
+
+/**
+ * drm_mode_prune_invalid - remove invalid modes from mode list
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Once mode list generation is complete, a caller can use this routine to
+ * remove invalid modes from a mode list. If any of the modes have a
+ * status other than %MODE_OK, they are removed from @mode_list and freed.
+ */
+void drm_mode_prune_invalid(struct drm_device *dev,
+ struct list_head *mode_list, bool verbose)
+{
+ struct drm_display_mode *mode, *t;
+
+ list_for_each_entry_safe(mode, t, mode_list, head) {
+ if (mode->status != MODE_OK) {
+ list_del(&mode->head);
+ if (verbose) {
+ drm_mode_debug_printmodeline(mode);
+ DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status);
+ }
+ drm_mode_destroy(dev, mode);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_mode_prune_invalid);
+
+/**
+ * drm_mode_compare - compare modes for favorability
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+ * RETURNS:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+{
+ struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+ struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+ int diff;
+
+ diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
+ ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
+ if (diff)
+ return diff;
+ diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
+ if (diff)
+ return diff;
+ diff = b->clock - a->clock;
+ return diff;
+}
+
+/* FIXME: what we don't have a list sort function? */
+/* list sort from Mark J Roberts (mjr@znex.org) */
+void list_sort(struct list_head *head,
+ int (*cmp)(struct list_head *a, struct list_head *b))
+{
+ struct list_head *p, *q, *e, *list, *tail, *oldhead;
+ int insize, nmerges, psize, qsize, i;
+
+ list = head->next;
+ list_del(head);
+ insize = 1;
+ for (;;) {
+ p = oldhead = list;
+ list = tail = NULL;
+ nmerges = 0;
+
+ while (p) {
+ nmerges++;
+ q = p;
+ psize = 0;
+ for (i = 0; i < insize; i++) {
+ psize++;
+ q = q->next == oldhead ? NULL : q->next;
+ if (!q)
+ break;
+ }
+
+ qsize = insize;
+ while (psize > 0 || (qsize > 0 && q)) {
+ if (!psize) {
+ e = q;
+ q = q->next;
+ qsize--;
+ if (q == oldhead)
+ q = NULL;
+ } else if (!qsize || !q) {
+ e = p;
+ p = p->next;
+ psize--;
+ if (p == oldhead)
+ p = NULL;
+ } else if (cmp(p, q) <= 0) {
+ e = p;
+ p = p->next;
+ psize--;
+ if (p == oldhead)
+ p = NULL;
+ } else {
+ e = q;
+ q = q->next;
+ qsize--;
+ if (q == oldhead)
+ q = NULL;
+ }
+ if (tail)
+ tail->next = e;
+ else
+ list = e;
+ e->prev = tail;
+ tail = e;
+ }
+ p = q;
+ }
+
+ tail->next = list;
+ list->prev = tail;
+
+ if (nmerges <= 1)
+ break;
+
+ insize *= 2;
+ }
+
+ head->next = list;
+ head->prev = list->prev;
+ list->prev->next = head;
+ list->prev = head;
+}
+
+/**
+ * drm_mode_sort - sort mode list
+ * @mode_list: list to sort
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Sort @mode_list by favorability, putting good modes first.
+ */
+void drm_mode_sort(struct list_head *mode_list)
+{
+ list_sort(mode_list, drm_mode_compare);
+}
+EXPORT_SYMBOL(drm_mode_sort);
+
+/**
+ * drm_mode_connector_list_update - update the mode list for the connector
+ * @connector: the connector to update
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * This moves the modes from the @connector probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+ * list and only adds different modes. All modes unverified after this point
+ * will be removed by the prune invalid modes.
+ */
+void drm_mode_connector_list_update(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ struct drm_display_mode *pmode, *pt;
+ int found_it;
+
+ list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
+ head) {
+ found_it = 0;
+ /* go through current modes checking for the new probed mode */
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (drm_mode_equal(pmode, mode)) {
+ found_it = 1;
+ /* if equal delete the probed mode */
+ mode->status = pmode->status;
+ list_del(&pmode->head);
+ drm_mode_destroy(connector->dev, pmode);
+ break;
+ }
+ }
+
+ if (!found_it) {
+ list_move_tail(&pmode->head, &connector->modes);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_mode_connector_list_update);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index ae73b7f7249..8df849f6683 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -49,6 +49,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
+static int drm_vblank_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
static int drm_gem_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_gem_object_info(char *buf, char **start, off_t offset,
@@ -72,6 +74,7 @@ static struct drm_proc_list {
{"clients", drm_clients_info, 0},
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
+ {"vblank", drm_vblank_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
{"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
@@ -195,6 +198,7 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_master *master = minor->master;
struct drm_device *dev = minor->dev;
int len = 0;
@@ -203,13 +207,16 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
return 0;
}
+ if (!master)
+ return 0;
+
*start = &buf[offset];
*eof = 0;
- if (dev->unique) {
+ if (master->unique) {
DRM_PROC_PRINT("%s %s %s\n",
dev->driver->pci_driver.name,
- pci_name(dev->pdev), dev->unique);
+ pci_name(dev->pdev), master->unique);
} else {
DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
pci_name(dev->pdev));
@@ -454,6 +461,66 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
}
/**
+ * Called when "/proc/dri/.../vblank" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ */
+static int drm__vblank_info(char *buf, char **start, off_t offset, int request,
+ int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ int len = 0;
+ int crtc;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+
+ for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+ DRM_PROC_PRINT("CRTC %d enable: %d\n",
+ crtc, atomic_read(&dev->vblank_refcount[crtc]));
+ DRM_PROC_PRINT("CRTC %d counter: %d\n",
+ crtc, drm_vblank_count(dev, crtc));
+ DRM_PROC_PRINT("CRTC %d last wait: %d\n",
+ crtc, dev->last_vblank_wait[crtc]);
+ DRM_PROC_PRINT("CRTC %d in modeset: %d\n",
+ crtc, dev->vblank_inmodeset[crtc]);
+ }
+
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+/**
+ * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_vblank_info(char *buf, char **start, off_t offset, int request,
+ int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm__vblank_info(buf, start, offset, request, eof, data);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
* Called when "/proc/dri/.../clients" is read.
*
* \param buf output buffer.
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 66c96ec6667..5ca132afa4f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -57,6 +57,14 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
int ret;
int base = 0, limit = 63;
+ if (type == DRM_MINOR_CONTROL) {
+ base += 64;
+ limit = base + 127;
+ } else if (type == DRM_MINOR_RENDER) {
+ base += 128;
+ limit = base + 255;
+ }
+
again:
if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Out of memory expanding drawable idr\n");
@@ -79,6 +87,104 @@ again:
return new_id;
}
+struct drm_master *drm_master_create(struct drm_minor *minor)
+{
+ struct drm_master *master;
+
+ master = drm_calloc(1, sizeof(*master), DRM_MEM_DRIVER);
+ if (!master)
+ return NULL;
+
+ kref_init(&master->refcount);
+ spin_lock_init(&master->lock.spinlock);
+ init_waitqueue_head(&master->lock.lock_queue);
+ drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
+ INIT_LIST_HEAD(&master->magicfree);
+ master->minor = minor;
+
+ list_add_tail(&master->head, &minor->master_list);
+
+ return master;
+}
+
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+ kref_get(&master->refcount);
+ return master;
+}
+
+static void drm_master_destroy(struct kref *kref)
+{
+ struct drm_master *master = container_of(kref, struct drm_master, refcount);
+ struct drm_magic_entry *pt, *next;
+ struct drm_device *dev = master->minor->dev;
+
+ list_del(&master->head);
+
+ if (dev->driver->master_destroy)
+ dev->driver->master_destroy(dev, master);
+
+ if (master->unique) {
+ drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER);
+ master->unique = NULL;
+ master->unique_len = 0;
+ }
+
+ list_for_each_entry_safe(pt, next, &master->magicfree, head) {
+ list_del(&pt->head);
+ drm_ht_remove_item(&master->magiclist, &pt->hash_item);
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+
+ drm_ht_remove(&master->magiclist);
+
+ if (master->lock.hw_lock) {
+ if (dev->sigdata.lock == master->lock.hw_lock)
+ dev->sigdata.lock = NULL;
+ master->lock.hw_lock = NULL;
+ master->lock.file_priv = NULL;
+ wake_up_interruptible(&master->lock.lock_queue);
+ }
+
+ drm_free(master, sizeof(*master), DRM_MEM_DRIVER);
+}
+
+void drm_master_put(struct drm_master **master)
+{
+ kref_put(&(*master)->refcount, drm_master_destroy);
+ *master = NULL;
+}
+
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
+ return -EINVAL;
+
+ if (!file_priv->master)
+ return -EINVAL;
+
+ if (!file_priv->minor->master &&
+ file_priv->minor->master != file_priv->master) {
+ mutex_lock(&dev->struct_mutex);
+ file_priv->minor->master = drm_master_get(file_priv->master);
+ mutex_lock(&dev->struct_mutex);
+ }
+
+ return 0;
+}
+
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ if (!file_priv->master)
+ return -EINVAL;
+ mutex_lock(&dev->struct_mutex);
+ drm_master_put(&file_priv->minor->master);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)
@@ -92,7 +198,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
- spin_lock_init(&dev->lock.spinlock);
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
@@ -140,9 +245,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
}
}
- if (dev->driver->load)
- if ((retcode = dev->driver->load(dev, ent->driver_data)))
- goto error_out_unreg;
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
@@ -200,6 +302,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
new_minor->device = MKDEV(DRM_MAJOR, minor_id);
new_minor->dev = dev;
new_minor->index = minor_id;
+ INIT_LIST_HEAD(&new_minor->master_list);
idr_replace(&drm_minors_idr, new_minor, minor_id);
@@ -267,8 +370,30 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_g2;
+ }
+
if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
- goto err_g2;
+ goto err_g3;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, ent->driver_data);
+ if (ret)
+ goto err_g3;
+ }
+
+ /* setup the grouping for the legacy output */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
+ if (ret)
+ goto err_g3;
+ }
+
+ list_add_tail(&dev->driver_item, &driver->device_list);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -276,6 +401,8 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
return 0;
+err_g3:
+ drm_put_minor(&dev->primary);
err_g2:
pci_disable_device(pdev);
err_g1:
@@ -297,11 +424,6 @@ int drm_put_dev(struct drm_device * dev)
{
DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
- if (dev->unique) {
- drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
- dev->unique = NULL;
- dev->unique_len = 0;
- }
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname) + 1,
DRM_MEM_DRIVER);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 1611b9bcbe7..65d72d094c8 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -20,6 +20,7 @@
#include "drmP.h"
#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
+#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
/**
* drm_sysfs_suspend - DRM class suspend hook
@@ -34,7 +35,7 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
struct drm_minor *drm_minor = to_drm_minor(dev);
struct drm_device *drm_dev = drm_minor->dev;
- if (drm_dev->driver->suspend)
+ if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->suspend)
return drm_dev->driver->suspend(drm_dev, state);
return 0;
@@ -52,7 +53,7 @@ static int drm_sysfs_resume(struct device *dev)
struct drm_minor *drm_minor = to_drm_minor(dev);
struct drm_device *drm_dev = drm_minor->dev;
- if (drm_dev->driver->resume)
+ if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->resume)
return drm_dev->driver->resume(drm_dev);
return 0;
@@ -144,6 +145,323 @@ static void drm_sysfs_device_release(struct device *dev)
return;
}
+/*
+ * Connector properties
+ */
+static ssize_t status_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = to_drm_connector(device);
+ enum drm_connector_status status;
+
+ status = connector->funcs->detect(connector);
+ return snprintf(buf, PAGE_SIZE, "%s",
+ drm_get_connector_status_name(status));
+}
+
+static ssize_t dpms_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = to_drm_connector(device);
+ struct drm_device *dev = connector->dev;
+ uint64_t dpms_status;
+ int ret;
+
+ ret = drm_connector_property_get_value(connector,
+ dev->mode_config.dpms_property,
+ &dpms_status);
+ if (ret)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%s",
+ drm_get_dpms_name((int)dpms_status));
+}
+
+static ssize_t enabled_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = to_drm_connector(device);
+
+ return snprintf(buf, PAGE_SIZE, connector->encoder ? "enabled" :
+ "disabled");
+}
+
+static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *connector_dev = container_of(kobj, struct device, kobj);
+ struct drm_connector *connector = to_drm_connector(connector_dev);
+ unsigned char *edid;
+ size_t size;
+
+ if (!connector->edid_blob_ptr)
+ return 0;
+
+ edid = connector->edid_blob_ptr->data;
+ size = connector->edid_blob_ptr->length;
+ if (!edid)
+ return 0;
+
+ if (off >= size)
+ return 0;
+
+ if (off + count > size)
+ count = size - off;
+ memcpy(buf, edid + off, count);
+
+ return count;
+}
+
+static ssize_t modes_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = to_drm_connector(device);
+ struct drm_display_mode *mode;
+ int written = 0;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
+ mode->name);
+ }
+
+ return written;
+}
+
+static ssize_t subconnector_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = to_drm_connector(device);
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop = NULL;
+ uint64_t subconnector;
+ int is_tv = 0;
+ int ret;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ prop = dev->mode_config.dvi_i_subconnector_property;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ prop = dev->mode_config.tv_subconnector_property;
+ is_tv = 1;
+ break;
+ default:
+ DRM_ERROR("Wrong connector type for this property\n");
+ return 0;
+ }
+
+ if (!prop) {
+ DRM_ERROR("Unable to find subconnector property\n");
+ return 0;
+ }
+
+ ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ if (ret)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+ drm_get_tv_subconnector_name((int)subconnector) :
+ drm_get_dvi_i_subconnector_name((int)subconnector));
+}
+
+static ssize_t select_subconnector_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = to_drm_connector(device);
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop = NULL;
+ uint64_t subconnector;
+ int is_tv = 0;
+ int ret;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ prop = dev->mode_config.dvi_i_select_subconnector_property;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ prop = dev->mode_config.tv_select_subconnector_property;
+ is_tv = 1;
+ break;
+ default:
+ DRM_ERROR("Wrong connector type for this property\n");
+ return 0;
+ }
+
+ if (!prop) {
+ DRM_ERROR("Unable to find select subconnector property\n");
+ return 0;
+ }
+
+ ret = drm_connector_property_get_value(connector, prop, &subconnector);
+ if (ret)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+ drm_get_tv_select_name((int)subconnector) :
+ drm_get_dvi_i_select_name((int)subconnector));
+}
+
+static struct device_attribute connector_attrs[] = {
+ __ATTR_RO(status),
+ __ATTR_RO(enabled),
+ __ATTR_RO(dpms),
+ __ATTR_RO(modes),
+};
+
+/* These attributes are for both DVI-I connectors and all types of tv-out. */
+static struct device_attribute connector_attrs_opt1[] = {
+ __ATTR_RO(subconnector),
+ __ATTR_RO(select_subconnector),
+};
+
+static struct bin_attribute edid_attr = {
+ .attr.name = "edid",
+ .size = 128,
+ .read = edid_show,
+};
+
+/**
+ * drm_sysfs_connector_add - add an connector to sysfs
+ * @connector: connector to add
+ *
+ * Create an connector device in sysfs, along with its associated connector
+ * properties (so far, connection status, dpms, mode list & edid) and
+ * generate a hotplug event so userspace knows there's a new connector
+ * available.
+ *
+ * Note:
+ * This routine should only be called *once* for each DRM minor registered.
+ * A second call for an already registered device will trigger the BUG_ON
+ * below.
+ */
+int drm_sysfs_connector_add(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ int ret = 0, i, j;
+
+ /* We shouldn't get called more than once for the same connector */
+ BUG_ON(device_is_registered(&connector->kdev));
+
+ connector->kdev.parent = &dev->primary->kdev;
+ connector->kdev.class = drm_class;
+ connector->kdev.release = drm_sysfs_device_release;
+
+ DRM_DEBUG("adding \"%s\" to sysfs\n",
+ drm_get_connector_name(connector));
+
+ snprintf(connector->kdev.bus_id, BUS_ID_SIZE, "card%d-%s",
+ dev->primary->index, drm_get_connector_name(connector));
+ ret = device_register(&connector->kdev);
+
+ if (ret) {
+ DRM_ERROR("failed to register connector device: %d\n", ret);
+ goto out;
+ }
+
+ /* Standard attributes */
+
+ for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
+ ret = device_create_file(&connector->kdev, &connector_attrs[i]);
+ if (ret)
+ goto err_out_files;
+ }
+
+ /* Optional attributes */
+ /*
+ * In the long run it maybe a good idea to make one set of
+ * optionals per connector type.
+ */
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
+ ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
+ if (ret)
+ goto err_out_files;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+ if (ret)
+ goto err_out_files;
+
+ /* Let userspace know we have a new connector */
+ drm_sysfs_hotplug_event(dev);
+
+ return 0;
+
+err_out_files:
+ if (i > 0)
+ for (j = 0; j < i; j++)
+ device_remove_file(&connector->kdev,
+ &connector_attrs[i]);
+ device_unregister(&connector->kdev);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(drm_sysfs_connector_add);
+
+/**
+ * drm_sysfs_connector_remove - remove an connector device from sysfs
+ * @connector: connector to remove
+ *
+ * Remove @connector and its associated attributes from sysfs. Note that
+ * the device model core will take care of sending the "remove" uevent
+ * at this time, so we don't need to do it.
+ *
+ * Note:
+ * This routine should only be called if the connector was previously
+ * successfully registered. If @connector hasn't been registered yet,
+ * you'll likely see a panic somewhere deep in sysfs code when called.
+ */
+void drm_sysfs_connector_remove(struct drm_connector *connector)
+{
+ int i;
+
+ DRM_DEBUG("removing \"%s\" from sysfs\n",
+ drm_get_connector_name(connector));
+
+ for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
+ device_remove_file(&connector->kdev, &connector_attrs[i]);
+ sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
+ device_unregister(&connector->kdev);
+}
+EXPORT_SYMBOL(drm_sysfs_connector_remove);
+
+/**
+ * drm_sysfs_hotplug_event - generate a DRM uevent
+ * @dev: DRM device
+ *
+ * Send a uevent for the DRM device specified by @dev. Currently we only
+ * set HOTPLUG=1 in the uevent environment, but this could be expanded to
+ * deal with other types of events.
+ */
+void drm_sysfs_hotplug_event(struct drm_device *dev)
+{
+ char *event_string = "HOTPLUG=1";
+ char *envp[] = { event_string, NULL };
+
+ DRM_DEBUG("generating hotplug event\n");
+
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+}
+
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
* @dev: DRM device to be added
@@ -163,7 +481,12 @@ int drm_sysfs_device_add(struct drm_minor *minor)
minor->kdev.class = drm_class;
minor->kdev.release = drm_sysfs_device_release;
minor->kdev.devt = minor->device;
- minor_str = "card%d";
+ if (minor->type == DRM_MINOR_CONTROL)
+ minor_str = "controlD%d";
+ else if (minor->type == DRM_MINOR_RENDER)
+ minor_str = "renderD%d";
+ else
+ minor_str = "card%d";
snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index c234c6f24a8..3ffae021d28 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -267,6 +267,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
+ case _DRM_GEM:
+ DRM_ERROR("tried to rmmap GEM object\n");
+ break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
@@ -399,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
-static void drm_vm_open_locked(struct vm_area_struct *vma)
+void drm_vm_open_locked(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
@@ -540,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
* according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open().
*/
-static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
+int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index d8fb5d8ee7e..dd57a5bd457 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -8,7 +8,22 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_proc.o \
- i915_gem_tiling.o
+ i915_gem_tiling.o \
+ intel_display.o \
+ intel_crt.o \
+ intel_lvds.o \
+ intel_bios.o \
+ intel_sdvo.o \
+ intel_modes.o \
+ intel_i2c.o \
+ intel_fb.o \
+ intel_tv.o \
+ intel_dvo.o \
+ dvo_ch7xxx.o \
+ dvo_ch7017.o \
+ dvo_ivch.o \
+ dvo_tfp410.o \
+ dvo_sil164.o
i915-$(CONFIG_ACPI) += i915_opregion.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
new file mode 100644
index 00000000000..e747ac42fe3
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright © 2006 Eric Anholt
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _INTEL_DVO_H
+#define _INTEL_DVO_H
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "intel_drv.h"
+
+struct intel_dvo_device {
+ char *name;
+ int type;
+ /* DVOA/B/C output register */
+ u32 dvo_reg;
+ /* GPIO register used for i2c bus to control this device */
+ u32 gpio;
+ int slave_addr;
+ struct intel_i2c_chan *i2c_bus;
+
+ const struct intel_dvo_dev_ops *dev_ops;
+ void *dev_priv;
+
+ struct drm_display_mode *panel_fixed_mode;
+ bool panel_wants_dither;
+};
+
+struct intel_dvo_dev_ops {
+ /*
+ * Initialize the device at startup time.
+ * Returns NULL if the device does not exist.
+ */
+ bool (*init)(struct intel_dvo_device *dvo,
+ struct intel_i2c_chan *i2cbus);
+
+ /*
+ * Called to allow the output a chance to create properties after the
+ * RandR objects have been created.
+ */
+ void (*create_resources)(struct intel_dvo_device *dvo);
+
+ /*
+ * Turn on/off output or set intermediate power levels if available.
+ *
+ * Unsupported intermediate modes drop to the lower power setting.
+ * If the mode is DPMSModeOff, the output must be disabled,
+ * as the DPLL may be disabled afterwards.
+ */
+ void (*dpms)(struct intel_dvo_device *dvo, int mode);
+
+ /*
+ * Saves the output's state for restoration on VT switch.
+ */
+ void (*save)(struct intel_dvo_device *dvo);
+
+ /*
+ * Restore's the output's state at VT switch.
+ */
+ void (*restore)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for testing a video mode for a given output.
+ *
+ * This function should only check for cases where a mode can't
+ * be supported on the output specifically, and not represent
+ * generic CRTC limitations.
+ *
+ * \return MODE_OK if the mode is valid, or another MODE_* otherwise.
+ */
+ int (*mode_valid)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode);
+
+ /*
+ * Callback to adjust the mode to be set in the CRTC.
+ *
+ * This allows an output to adjust the clock or even the entire set of
+ * timings, which is used for panels with fixed timings or for
+ * buses with clock limitations.
+ */
+ bool (*mode_fixup)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /*
+ * Callback for preparing mode changes on an output
+ */
+ void (*prepare)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for committing mode changes on an output
+ */
+ void (*commit)(struct intel_dvo_device *dvo);
+
+ /*
+ * Callback for setting up a video mode after fixups have been made.
+ *
+ * This is only called while the output is disabled. The dpms callback
+ * must be all that's necessary for the output, to turn the output on
+ * after this function is called.
+ */
+ void (*mode_set)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ /*
+ * Probe for a connected output, and return detect_status.
+ */
+ enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+
+ /**
+ * Query the device for the modes it provides.
+ *
+ * This function may also update MonInfo, mm_width, and mm_height.
+ *
+ * \return singly-linked list of modes or NULL if no modes found.
+ */
+ struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
+
+ /**
+ * Clean up driver-specific bits of the output
+ */
+ void (*destroy) (struct intel_dvo_device *dvo);
+
+ /**
+ * Debugging hook to dump device registers to log file
+ */
+ void (*dump_regs)(struct intel_dvo_device *dvo);
+};
+
+extern struct intel_dvo_dev_ops sil164_ops;
+extern struct intel_dvo_dev_ops ch7xxx_ops;
+extern struct intel_dvo_dev_ops ivch_ops;
+extern struct intel_dvo_dev_ops tfp410_ops;
+extern struct intel_dvo_dev_ops ch7017_ops;
+
+#endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
new file mode 100644
index 00000000000..03d4b4973b0
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -0,0 +1,454 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "dvo.h"
+
+#define CH7017_TV_DISPLAY_MODE 0x00
+#define CH7017_FLICKER_FILTER 0x01
+#define CH7017_VIDEO_BANDWIDTH 0x02
+#define CH7017_TEXT_ENHANCEMENT 0x03
+#define CH7017_START_ACTIVE_VIDEO 0x04
+#define CH7017_HORIZONTAL_POSITION 0x05
+#define CH7017_VERTICAL_POSITION 0x06
+#define CH7017_BLACK_LEVEL 0x07
+#define CH7017_CONTRAST_ENHANCEMENT 0x08
+#define CH7017_TV_PLL 0x09
+#define CH7017_TV_PLL_M 0x0a
+#define CH7017_TV_PLL_N 0x0b
+#define CH7017_SUB_CARRIER_0 0x0c
+#define CH7017_CIV_CONTROL 0x10
+#define CH7017_CIV_0 0x11
+#define CH7017_CHROMA_BOOST 0x14
+#define CH7017_CLOCK_MODE 0x1c
+#define CH7017_INPUT_CLOCK 0x1d
+#define CH7017_GPIO_CONTROL 0x1e
+#define CH7017_INPUT_DATA_FORMAT 0x1f
+#define CH7017_CONNECTION_DETECT 0x20
+#define CH7017_DAC_CONTROL 0x21
+#define CH7017_BUFFERED_CLOCK_OUTPUT 0x22
+#define CH7017_DEFEAT_VSYNC 0x47
+#define CH7017_TEST_PATTERN 0x48
+
+#define CH7017_POWER_MANAGEMENT 0x49
+/** Enables the TV output path. */
+#define CH7017_TV_EN (1 << 0)
+#define CH7017_DAC0_POWER_DOWN (1 << 1)
+#define CH7017_DAC1_POWER_DOWN (1 << 2)
+#define CH7017_DAC2_POWER_DOWN (1 << 3)
+#define CH7017_DAC3_POWER_DOWN (1 << 4)
+/** Powers down the TV out block, and DAC0-3 */
+#define CH7017_TV_POWER_DOWN_EN (1 << 5)
+
+#define CH7017_VERSION_ID 0x4a
+
+#define CH7017_DEVICE_ID 0x4b
+#define CH7017_DEVICE_ID_VALUE 0x1b
+#define CH7018_DEVICE_ID_VALUE 0x1a
+#define CH7019_DEVICE_ID_VALUE 0x19
+
+#define CH7017_XCLK_D2_ADJUST 0x53
+#define CH7017_UP_SCALER_COEFF_0 0x55
+#define CH7017_UP_SCALER_COEFF_1 0x56
+#define CH7017_UP_SCALER_COEFF_2 0x57
+#define CH7017_UP_SCALER_COEFF_3 0x58
+#define CH7017_UP_SCALER_COEFF_4 0x59
+#define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a
+#define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b
+#define CH7017_GPIO_INVERT 0x5c
+#define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d
+#define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f
+/**< Low bits of horizontal active pixel input */
+
+#define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60
+/** High bits of horizontal active pixel input */
+#define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0)
+/** High bits of vertical active line output */
+#define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3)
+
+#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61
+/**< Low bits of vertical active line output */
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62
+/**< Low bits of horizontal active pixel output */
+
+#define CH7017_LVDS_POWER_DOWN 0x63
+/** High bits of horizontal active pixel output */
+#define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0)
+/** Enables the LVDS power down state transition */
+#define CH7017_LVDS_POWER_DOWN_EN (1 << 6)
+/** Enables the LVDS upscaler */
+#define CH7017_LVDS_UPSCALER_EN (1 << 7)
+#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08
+
+#define CH7017_LVDS_ENCODING 0x64
+#define CH7017_LVDS_DITHER_2D (1 << 2)
+#define CH7017_LVDS_DITHER_DIS (1 << 3)
+#define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4)
+#define CH7017_LVDS_24_BIT (1 << 5)
+
+#define CH7017_LVDS_ENCODING_2 0x65
+
+#define CH7017_LVDS_PLL_CONTROL 0x66
+/** Enables the LVDS panel output path */
+#define CH7017_LVDS_PANEN (1 << 0)
+/** Enables the LVDS panel backlight */
+#define CH7017_LVDS_BKLEN (1 << 3)
+
+#define CH7017_POWER_SEQUENCING_T1 0x67
+#define CH7017_POWER_SEQUENCING_T2 0x68
+#define CH7017_POWER_SEQUENCING_T3 0x69
+#define CH7017_POWER_SEQUENCING_T4 0x6a
+#define CH7017_POWER_SEQUENCING_T5 0x6b
+#define CH7017_GPIO_DRIVER_TYPE 0x6c
+#define CH7017_GPIO_DATA 0x6d
+#define CH7017_GPIO_DIRECTION_CONTROL 0x6e
+
+#define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71
+# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4
+# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0
+# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80
+
+#define CH7017_LVDS_PLL_VCO_CONTROL 0x72
+# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80
+# define CH7017_LVDS_PLL_VCO_SHIFT 4
+# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0
+
+#define CH7017_OUTPUTS_ENABLE 0x73
+# define CH7017_CHARGE_PUMP_LOW 0x0
+# define CH7017_CHARGE_PUMP_HIGH 0x3
+# define CH7017_LVDS_CHANNEL_A (1 << 3)
+# define CH7017_LVDS_CHANNEL_B (1 << 4)
+# define CH7017_TV_DAC_A (1 << 5)
+# define CH7017_TV_DAC_B (1 << 6)
+# define CH7017_DDC_SELECT_DC2 (1 << 7)
+
+#define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74
+#define CH7017_LVDS_PLL_EMI_REDUCTION 0x75
+#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76
+
+#define CH7017_LVDS_CONTROL_2 0x78
+# define CH7017_LOOP_FILTER_SHIFT 5
+# define CH7017_PHASE_DETECTOR_SHIFT 0
+
+#define CH7017_BANG_LIMIT_CONTROL 0x7f
+
+struct ch7017_priv {
+ uint8_t save_hapi;
+ uint8_t save_vali;
+ uint8_t save_valo;
+ uint8_t save_ailo;
+ uint8_t save_lvds_pll_vco;
+ uint8_t save_feedback_div;
+ uint8_t save_lvds_control_2;
+ uint8_t save_outputs_enable;
+ uint8_t save_lvds_power_down;
+ uint8_t save_power_management;
+};
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo);
+static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
+
+static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
+{
+ struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = i2cbus->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = i2cbus->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ *val= in_buf[0];
+ return true;
+ };
+
+ return false;
+}
+
+static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
+{
+ struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = i2cbus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = val;
+
+ if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ return true;
+
+ return false;
+}
+
+/** Probes for a CH7017 on the given bus and slave address. */
+static bool ch7017_init(struct intel_dvo_device *dvo,
+ struct intel_i2c_chan *i2cbus)
+{
+ struct ch7017_priv *priv;
+ uint8_t val;
+
+ priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return false;
+
+ dvo->i2c_bus = i2cbus;
+ dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->dev_priv = priv;
+
+ if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
+ goto fail;
+
+ if (val != CH7017_DEVICE_ID_VALUE &&
+ val != CH7018_DEVICE_ID_VALUE &&
+ val != CH7019_DEVICE_ID_VALUE) {
+ DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
+ val, i2cbus->adapter.name,i2cbus->slave_addr);
+ goto fail;
+ }
+
+ return true;
+fail:
+ kfree(priv);
+ return false;
+}
+
+static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
+{
+ return connector_status_unknown;
+}
+
+static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 160000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void ch7017_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
+ uint8_t outputs_enable, lvds_control_2, lvds_power_down;
+ uint8_t horizontal_active_pixel_input;
+ uint8_t horizontal_active_pixel_output, vertical_active_line_output;
+ uint8_t active_input_line_output;
+
+ DRM_DEBUG("Registers before mode setting\n");
+ ch7017_dump_regs(dvo);
+
+ /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
+ if (mode->clock < 100000) {
+ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW;
+ lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+ (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) |
+ (0 << CH7017_PHASE_DETECTOR_SHIFT);
+ } else {
+ outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
+ lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+ (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+ lvds_pll_feedback_div = 35;
+ lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
+ (0 << CH7017_PHASE_DETECTOR_SHIFT);
+ if (1) { /* XXX: dual channel panel detection. Assume yes for now. */
+ outputs_enable |= CH7017_LVDS_CHANNEL_B;
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ } else {
+ lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+ (1 << CH7017_LVDS_PLL_VCO_SHIFT) |
+ (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+ }
+ }
+
+ horizontal_active_pixel_input = mode->hdisplay & 0x00ff;
+
+ vertical_active_line_output = mode->vdisplay & 0x00ff;
+ horizontal_active_pixel_output = mode->hdisplay & 0x00ff;
+
+ active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) |
+ (((mode->vdisplay & 0x0700) >> 8) << 3);
+
+ lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
+ (mode->hdisplay & 0x0700) >> 8;
+
+ ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
+ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
+ horizontal_active_pixel_input);
+ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
+ horizontal_active_pixel_output);
+ ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT,
+ vertical_active_line_output);
+ ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT,
+ active_input_line_output);
+ ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control);
+ ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div);
+ ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2);
+ ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable);
+
+ /* Turn the LVDS back on with new settings. */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
+
+ DRM_DEBUG("Registers after mode setting\n");
+ ch7017_dump_regs(dvo);
+}
+
+/* set the CH7017 power state */
+static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
+{
+ uint8_t val;
+
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+ /* Turn off TV/VGA, and never turn it on since we don't support it. */
+ ch7017_write(dvo, CH7017_POWER_MANAGEMENT,
+ CH7017_DAC0_POWER_DOWN |
+ CH7017_DAC1_POWER_DOWN |
+ CH7017_DAC2_POWER_DOWN |
+ CH7017_DAC3_POWER_DOWN |
+ CH7017_TV_POWER_DOWN_EN);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ /* Turn on the LVDS */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+ val & ~CH7017_LVDS_POWER_DOWN_EN);
+ } else {
+ /* Turn off the LVDS */
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+ val | CH7017_LVDS_POWER_DOWN_EN);
+ }
+
+ /* XXX: Should actually wait for update power status somehow */
+ udelay(20000);
+}
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+#define DUMP(reg) \
+do { \
+ ch7017_read(dvo, reg, &val); \
+ DRM_DEBUG(#reg ": %02x\n", val); \
+} while (0)
+
+ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
+ DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT);
+ DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT);
+ DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT);
+ DUMP(CH7017_LVDS_PLL_VCO_CONTROL);
+ DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV);
+ DUMP(CH7017_LVDS_CONTROL_2);
+ DUMP(CH7017_OUTPUTS_ENABLE);
+ DUMP(CH7017_LVDS_POWER_DOWN);
+}
+
+static void ch7017_save(struct intel_dvo_device *dvo)
+{
+ struct ch7017_priv *priv = dvo->dev_priv;
+
+ ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
+ ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
+ ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
+ ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
+ ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
+ ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
+ ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
+ ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
+ ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
+}
+
+static void ch7017_restore(struct intel_dvo_device *dvo)
+{
+ struct ch7017_priv *priv = dvo->dev_priv;
+
+ /* Power down before changing mode */
+ ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
+
+ ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
+ ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
+ ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
+ ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
+ ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
+ ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
+ ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
+ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
+ ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
+}
+
+static void ch7017_destroy(struct intel_dvo_device *dvo)
+{
+ struct ch7017_priv *priv = dvo->dev_priv;
+
+ if (priv) {
+ kfree(priv);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ch7017_ops = {
+ .init = ch7017_init,
+ .detect = ch7017_detect,
+ .mode_valid = ch7017_mode_valid,
+ .mode_set = ch7017_mode_set,
+ .dpms = ch7017_dpms,
+ .dump_regs = ch7017_dump_regs,
+ .save = ch7017_save,
+ .restore = ch7017_restore,
+ .destroy = ch7017_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
new file mode 100644
index 00000000000..d2fd95dbd03
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -0,0 +1,368 @@
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define CH7xxx_REG_VID 0x4a
+#define CH7xxx_REG_DID 0x4b
+
+#define CH7011_VID 0x83 /* 7010 as well */
+#define CH7009A_VID 0x84
+#define CH7009B_VID 0x85
+#define CH7301_VID 0x95
+
+#define CH7xxx_VID 0x84
+#define CH7xxx_DID 0x17
+
+#define CH7xxx_NUM_REGS 0x4c
+
+#define CH7xxx_CM 0x1c
+#define CH7xxx_CM_XCM (1<<0)
+#define CH7xxx_CM_MCP (1<<2)
+#define CH7xxx_INPUT_CLOCK 0x1d
+#define CH7xxx_GPIO 0x1e
+#define CH7xxx_GPIO_HPIR (1<<3)
+#define CH7xxx_IDF 0x1f
+
+#define CH7xxx_IDF_HSP (1<<3)
+#define CH7xxx_IDF_VSP (1<<4)
+
+#define CH7xxx_CONNECTION_DETECT 0x20
+#define CH7xxx_CDET_DVI (1<<5)
+
+#define CH7301_DAC_CNTL 0x21
+#define CH7301_HOTPLUG 0x23
+#define CH7xxx_TCTL 0x31
+#define CH7xxx_TVCO 0x32
+#define CH7xxx_TPCP 0x33
+#define CH7xxx_TPD 0x34
+#define CH7xxx_TPVT 0x35
+#define CH7xxx_TLPF 0x36
+#define CH7xxx_TCT 0x37
+#define CH7301_TEST_PATTERN 0x48
+
+#define CH7xxx_PM 0x49
+#define CH7xxx_PM_FPD (1<<0)
+#define CH7301_PM_DACPD0 (1<<1)
+#define CH7301_PM_DACPD1 (1<<2)
+#define CH7301_PM_DACPD2 (1<<3)
+#define CH7xxx_PM_DVIL (1<<6)
+#define CH7xxx_PM_DVIP (1<<7)
+
+#define CH7301_SYNC_POLARITY 0x56
+#define CH7301_SYNC_RGB_YUV (1<<0)
+#define CH7301_SYNC_POL_DVI (1<<5)
+
+/** @file
+ * driver for the Chrontel 7xxx DVI chip over DVO.
+ */
+
+static struct ch7xxx_id_struct {
+ uint8_t vid;
+ char *name;
+} ch7xxx_ids[] = {
+ { CH7011_VID, "CH7011" },
+ { CH7009A_VID, "CH7009A" },
+ { CH7009B_VID, "CH7009B" },
+ { CH7301_VID, "CH7301" },
+};
+
+struct ch7xxx_reg_state {
+ uint8_t regs[CH7xxx_NUM_REGS];
+};
+
+struct ch7xxx_priv {
+ bool quiet;
+
+ struct ch7xxx_reg_state save_reg;
+ struct ch7xxx_reg_state mode_reg;
+ uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
+ uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
+};
+
+static void ch7xxx_save(struct intel_dvo_device *dvo);
+
+static char *ch7xxx_get_id(uint8_t vid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
+ if (ch7xxx_ids[i].vid == vid)
+ return ch7xxx_ids[i].name;
+ }
+
+ return NULL;
+}
+
+/** Reads an 8 bit register */
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
+ struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = i2cbus->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = i2cbus->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ }
+ return false;
+}
+
+/** Writes an 8 bit register */
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = i2cbus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ return true;
+
+ if (!ch7xxx->quiet) {
+ DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ }
+
+ return false;
+}
+
+static bool ch7xxx_init(struct intel_dvo_device *dvo,
+ struct intel_i2c_chan *i2cbus)
+{
+ /* this will detect the CH7xxx chip on the specified i2c bus */
+ struct ch7xxx_priv *ch7xxx;
+ uint8_t vendor, device;
+ char *name;
+
+ ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
+ if (ch7xxx == NULL)
+ return false;
+
+ dvo->i2c_bus = i2cbus;
+ dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->dev_priv = ch7xxx;
+ ch7xxx->quiet = true;
+
+ if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
+ goto out;
+
+ name = ch7xxx_get_id(vendor);
+ if (!name) {
+ DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+ vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+ goto out;
+ }
+
+
+ if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
+ goto out;
+
+ if (device != CH7xxx_DID) {
+ DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+ vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+ goto out;
+ }
+
+ ch7xxx->quiet = false;
+ DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+ name, vendor, device);
+ return true;
+out:
+ kfree(ch7xxx);
+ return false;
+}
+
+static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
+{
+ uint8_t cdet, orig_pm, pm;
+
+ ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
+
+ pm = orig_pm;
+ pm &= ~CH7xxx_PM_FPD;
+ pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
+
+ ch7xxx_writeb(dvo, CH7xxx_PM, pm);
+
+ ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
+
+ ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
+
+ if (cdet & CH7xxx_CDET_DVI)
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ uint8_t tvco, tpcp, tpd, tlpf, idf;
+
+ if (mode->clock <= 65000) {
+ tvco = 0x23;
+ tpcp = 0x08;
+ tpd = 0x16;
+ tlpf = 0x60;
+ } else {
+ tvco = 0x2d;
+ tpcp = 0x06;
+ tpd = 0x26;
+ tlpf = 0xa0;
+ }
+
+ ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
+ ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
+ ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
+ ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
+ ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
+ ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
+ ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
+
+ ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
+
+ idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ idf |= CH7xxx_IDF_HSP;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ idf |= CH7xxx_IDF_HSP;
+
+ ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
+}
+
+/* set the CH7xxx power state */
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
+{
+ if (mode == DRM_MODE_DPMS_ON)
+ ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
+ else
+ ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
+}
+
+static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+ int i;
+
+ for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ if ((i % 8) == 0 )
+ DRM_DEBUG("\n %02X: ", i);
+ DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]);
+ }
+}
+
+static void ch7xxx_save(struct intel_dvo_device *dvo)
+{
+ struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
+
+ ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
+ ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
+ ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
+ ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
+ ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
+ ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
+ ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
+}
+
+static void ch7xxx_restore(struct intel_dvo_device *dvo)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+
+ ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
+ ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
+ ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
+ ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
+ ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
+ ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
+ ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
+}
+
+static void ch7xxx_destroy(struct intel_dvo_device *dvo)
+{
+ struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+
+ if (ch7xxx) {
+ kfree(ch7xxx);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ch7xxx_ops = {
+ .init = ch7xxx_init,
+ .detect = ch7xxx_detect,
+ .mode_valid = ch7xxx_mode_valid,
+ .mode_set = ch7xxx_mode_set,
+ .dpms = ch7xxx_dpms,
+ .dump_regs = ch7xxx_dump_regs,
+ .save = ch7xxx_save,
+ .restore = ch7xxx_restore,
+ .destroy = ch7xxx_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
new file mode 100644
index 00000000000..0c8d375e8e3
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "dvo.h"
+
+/*
+ * register definitions for the i82807aa.
+ *
+ * Documentation on this chipset can be found in datasheet #29069001 at
+ * intel.com.
+ */
+
+/*
+ * VCH Revision & GMBus Base Addr
+ */
+#define VR00 0x00
+# define VR00_BASE_ADDRESS_MASK 0x007f
+
+/*
+ * Functionality Enable
+ */
+#define VR01 0x01
+
+/*
+ * Enable the panel fitter
+ */
+# define VR01_PANEL_FIT_ENABLE (1 << 3)
+/*
+ * Enables the LCD display.
+ *
+ * This must not be set while VR01_DVO_BYPASS_ENABLE is set.
+ */
+# define VR01_LCD_ENABLE (1 << 2)
+/** Enables the DVO repeater. */
+# define VR01_DVO_BYPASS_ENABLE (1 << 1)
+/** Enables the DVO clock */
+# define VR01_DVO_ENABLE (1 << 0)
+
+/*
+ * LCD Interface Format
+ */
+#define VR10 0x10
+/** Enables LVDS output instead of CMOS */
+# define VR10_LVDS_ENABLE (1 << 4)
+/** Enables 18-bit LVDS output. */
+# define VR10_INTERFACE_1X18 (0 << 2)
+/** Enables 24-bit LVDS or CMOS output */
+# define VR10_INTERFACE_1X24 (1 << 2)
+/** Enables 2x18-bit LVDS or CMOS output. */
+# define VR10_INTERFACE_2X18 (2 << 2)
+/** Enables 2x24-bit LVDS output */
+# define VR10_INTERFACE_2X24 (3 << 2)
+
+/*
+ * VR20 LCD Horizontal Display Size
+ */
+#define VR20 0x20
+
+/*
+ * LCD Vertical Display Size
+ */
+#define VR21 0x20
+
+/*
+ * Panel power down status
+ */
+#define VR30 0x30
+/** Read only bit indicating that the panel is not in a safe poweroff state. */
+# define VR30_PANEL_ON (1 << 15)
+
+#define VR40 0x40
+# define VR40_STALL_ENABLE (1 << 13)
+# define VR40_VERTICAL_INTERP_ENABLE (1 << 12)
+# define VR40_ENHANCED_PANEL_FITTING (1 << 11)
+# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10)
+# define VR40_AUTO_RATIO_ENABLE (1 << 9)
+# define VR40_CLOCK_GATING_ENABLE (1 << 8)
+
+/*
+ * Panel Fitting Vertical Ratio
+ * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2
+ */
+#define VR41 0x41
+
+/*
+ * Panel Fitting Horizontal Ratio
+ * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2
+ */
+#define VR42 0x42
+
+/*
+ * Horizontal Image Size
+ */
+#define VR43 0x43
+
+/* VR80 GPIO 0
+ */
+#define VR80 0x80
+#define VR81 0x81
+#define VR82 0x82
+#define VR83 0x83
+#define VR84 0x84
+#define VR85 0x85
+#define VR86 0x86
+#define VR87 0x87
+
+/* VR88 GPIO 8
+ */
+#define VR88 0x88
+
+/* Graphics BIOS scratch 0
+ */
+#define VR8E 0x8E
+# define VR8E_PANEL_TYPE_MASK (0xf << 0)
+# define VR8E_PANEL_INTERFACE_CMOS (0 << 4)
+# define VR8E_PANEL_INTERFACE_LVDS (1 << 4)
+# define VR8E_FORCE_DEFAULT_PANEL (1 << 5)
+
+/* Graphics BIOS scratch 1
+ */
+#define VR8F 0x8F
+# define VR8F_VCH_PRESENT (1 << 0)
+# define VR8F_DISPLAY_CONN (1 << 1)
+# define VR8F_POWER_MASK (0x3c)
+# define VR8F_POWER_POS (2)
+
+
+struct ivch_priv {
+ bool quiet;
+
+ uint16_t width, height;
+
+ uint16_t save_VR01;
+ uint16_t save_VR40;
+};
+
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo);
+
+/**
+ * Reads a register on the ivch.
+ *
+ * Each of the 256 registers are 16 bits long.
+ */
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ u8 out_buf[1];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = i2cbus->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 0,
+ },
+ {
+ .addr = 0,
+ .flags = I2C_M_NOSTART,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = i2cbus->slave_addr,
+ .flags = I2C_M_RD | I2C_M_NOSTART,
+ .len = 2,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+
+ if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
+ *data = (in_buf[1] << 8) | in_buf[0];
+ return true;
+ };
+
+ if (!priv->quiet) {
+ DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ }
+ return false;
+}
+
+/** Writes a 16-bit register on the ivch */
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+ struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ u8 out_buf[3];
+ struct i2c_msg msg = {
+ .addr = i2cbus->slave_addr,
+ .flags = 0,
+ .len = 3,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = data & 0xff;
+ out_buf[2] = data >> 8;
+
+ if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ return true;
+
+ if (!priv->quiet) {
+ DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ }
+
+ return false;
+}
+
+/** Probes the given bus and slave address for an ivch */
+static bool ivch_init(struct intel_dvo_device *dvo,
+ struct intel_i2c_chan *i2cbus)
+{
+ struct ivch_priv *priv;
+ uint16_t temp;
+
+ priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return false;
+
+ dvo->i2c_bus = i2cbus;
+ dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->dev_priv = priv;
+ priv->quiet = true;
+
+ if (!ivch_read(dvo, VR00, &temp))
+ goto out;
+ priv->quiet = false;
+
+ /* Since the identification bits are probably zeroes, which doesn't seem
+ * very unique, check that the value in the base address field matches
+ * the address it's responding on.
+ */
+ if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
+ DRM_DEBUG("ivch detect failed due to address mismatch "
+ "(%d vs %d)\n",
+ (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
+ goto out;
+ }
+
+ ivch_read(dvo, VR20, &priv->width);
+ ivch_read(dvo, VR21, &priv->height);
+
+ return true;
+
+out:
+ kfree(priv);
+ return false;
+}
+
+static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
+{
+ return connector_status_connected;
+}
+
+static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 112000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+/** Sets the power state of the panel connected to the ivch */
+static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
+{
+ int i;
+ uint16_t vr01, vr30, backlight;
+
+ /* Set the new power state of the panel. */
+ if (!ivch_read(dvo, VR01, &vr01))
+ return;
+
+ if (mode == DRM_MODE_DPMS_ON)
+ backlight = 1;
+ else
+ backlight = 0;
+ ivch_write(dvo, VR80, backlight);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
+ else
+ vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
+
+ ivch_write(dvo, VR01, vr01);
+
+ /* Wait for the panel to make its state transition */
+ for (i = 0; i < 100; i++) {
+ if (!ivch_read(dvo, VR30, &vr30))
+ break;
+
+ if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
+ break;
+ udelay(1000);
+ }
+ /* wait some more; vch may fail to resync sometimes without this */
+ udelay(16 * 1000);
+}
+
+static void ivch_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ uint16_t vr40 = 0;
+ uint16_t vr01;
+
+ vr01 = 0;
+ vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
+ VR40_HORIZONTAL_INTERP_ENABLE);
+
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay) {
+ uint16_t x_ratio, y_ratio;
+
+ vr01 |= VR01_PANEL_FIT_ENABLE;
+ vr40 |= VR40_CLOCK_GATING_ENABLE;
+ x_ratio = (((mode->hdisplay - 1) << 16) /
+ (adjusted_mode->hdisplay - 1)) >> 2;
+ y_ratio = (((mode->vdisplay - 1) << 16) /
+ (adjusted_mode->vdisplay - 1)) >> 2;
+ ivch_write (dvo, VR42, x_ratio);
+ ivch_write (dvo, VR41, y_ratio);
+ } else {
+ vr01 &= ~VR01_PANEL_FIT_ENABLE;
+ vr40 &= ~VR40_CLOCK_GATING_ENABLE;
+ }
+ vr40 &= ~VR40_AUTO_RATIO_ENABLE;
+
+ ivch_write(dvo, VR01, vr01);
+ ivch_write(dvo, VR40, vr40);
+
+ ivch_dump_regs(dvo);
+}
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint16_t val;
+
+ ivch_read(dvo, VR00, &val);
+ DRM_DEBUG("VR00: 0x%04x\n", val);
+ ivch_read(dvo, VR01, &val);
+ DRM_DEBUG("VR01: 0x%04x\n", val);
+ ivch_read(dvo, VR30, &val);
+ DRM_DEBUG("VR30: 0x%04x\n", val);
+ ivch_read(dvo, VR40, &val);
+ DRM_DEBUG("VR40: 0x%04x\n", val);
+
+ /* GPIO registers */
+ ivch_read(dvo, VR80, &val);
+ DRM_DEBUG("VR80: 0x%04x\n", val);
+ ivch_read(dvo, VR81, &val);
+ DRM_DEBUG("VR81: 0x%04x\n", val);
+ ivch_read(dvo, VR82, &val);
+ DRM_DEBUG("VR82: 0x%04x\n", val);
+ ivch_read(dvo, VR83, &val);
+ DRM_DEBUG("VR83: 0x%04x\n", val);
+ ivch_read(dvo, VR84, &val);
+ DRM_DEBUG("VR84: 0x%04x\n", val);
+ ivch_read(dvo, VR85, &val);
+ DRM_DEBUG("VR85: 0x%04x\n", val);
+ ivch_read(dvo, VR86, &val);
+ DRM_DEBUG("VR86: 0x%04x\n", val);
+ ivch_read(dvo, VR87, &val);
+ DRM_DEBUG("VR87: 0x%04x\n", val);
+ ivch_read(dvo, VR88, &val);
+ DRM_DEBUG("VR88: 0x%04x\n", val);
+
+ /* Scratch register 0 - AIM Panel type */
+ ivch_read(dvo, VR8E, &val);
+ DRM_DEBUG("VR8E: 0x%04x\n", val);
+
+ /* Scratch register 1 - Status register */
+ ivch_read(dvo, VR8F, &val);
+ DRM_DEBUG("VR8F: 0x%04x\n", val);
+}
+
+static void ivch_save(struct intel_dvo_device *dvo)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+
+ ivch_read(dvo, VR01, &priv->save_VR01);
+ ivch_read(dvo, VR40, &priv->save_VR40);
+}
+
+static void ivch_restore(struct intel_dvo_device *dvo)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+
+ ivch_write(dvo, VR01, priv->save_VR01);
+ ivch_write(dvo, VR40, priv->save_VR40);
+}
+
+static void ivch_destroy(struct intel_dvo_device *dvo)
+{
+ struct ivch_priv *priv = dvo->dev_priv;
+
+ if (priv) {
+ kfree(priv);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops ivch_ops= {
+ .init = ivch_init,
+ .dpms = ivch_dpms,
+ .save = ivch_save,
+ .restore = ivch_restore,
+ .mode_valid = ivch_mode_valid,
+ .mode_set = ivch_mode_set,
+ .detect = ivch_detect,
+ .dump_regs = ivch_dump_regs,
+ .destroy = ivch_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
new file mode 100644
index 00000000000..033a4bb070b
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -0,0 +1,302 @@
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define SIL164_VID 0x0001
+#define SIL164_DID 0x0006
+
+#define SIL164_VID_LO 0x00
+#define SIL164_VID_HI 0x01
+#define SIL164_DID_LO 0x02
+#define SIL164_DID_HI 0x03
+#define SIL164_REV 0x04
+#define SIL164_RSVD 0x05
+#define SIL164_FREQ_LO 0x06
+#define SIL164_FREQ_HI 0x07
+
+#define SIL164_REG8 0x08
+#define SIL164_8_VEN (1<<5)
+#define SIL164_8_HEN (1<<4)
+#define SIL164_8_DSEL (1<<3)
+#define SIL164_8_BSEL (1<<2)
+#define SIL164_8_EDGE (1<<1)
+#define SIL164_8_PD (1<<0)
+
+#define SIL164_REG9 0x09
+#define SIL164_9_VLOW (1<<7)
+#define SIL164_9_MSEL_MASK (0x7<<4)
+#define SIL164_9_TSEL (1<<3)
+#define SIL164_9_RSEN (1<<2)
+#define SIL164_9_HTPLG (1<<1)
+#define SIL164_9_MDI (1<<0)
+
+#define SIL164_REGC 0x0c
+
+struct sil164_save_rec {
+ uint8_t reg8;
+ uint8_t reg9;
+ uint8_t regc;
+};
+
+struct sil164_priv {
+ //I2CDevRec d;
+ bool quiet;
+ struct sil164_save_rec save_regs;
+ struct sil164_save_rec mode_regs;
+};
+
+#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
+
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+ struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = i2cbus->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = i2cbus->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!sil->quiet) {
+ DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ }
+ return false;
+}
+
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct sil164_priv *sil= dvo->dev_priv;
+ struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = i2cbus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ return true;
+
+ if (!sil->quiet) {
+ DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ }
+
+ return false;
+}
+
+/* Silicon Image 164 driver for chip on i2c bus */
+static bool sil164_init(struct intel_dvo_device *dvo,
+ struct intel_i2c_chan *i2cbus)
+{
+ /* this will detect the SIL164 chip on the specified i2c bus */
+ struct sil164_priv *sil;
+ unsigned char ch;
+
+ sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
+ if (sil == NULL)
+ return false;
+
+ dvo->i2c_bus = i2cbus;
+ dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->dev_priv = sil;
+ sil->quiet = true;
+
+ if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
+ goto out;
+
+ if (ch != (SIL164_VID & 0xff)) {
+ DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+ ch, i2cbus->adapter.name, i2cbus->slave_addr);
+ goto out;
+ }
+
+ if (!sil164_readb(dvo, SIL164_DID_LO, &ch))
+ goto out;
+
+ if (ch != (SIL164_DID & 0xff)) {
+ DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+ ch, i2cbus->adapter.name, i2cbus->slave_addr);
+ goto out;
+ }
+ sil->quiet = false;
+
+ DRM_DEBUG("init sil164 dvo controller successfully!\n");
+ return true;
+
+out:
+ kfree(sil);
+ return false;
+}
+
+static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
+{
+ uint8_t reg9;
+
+ sil164_readb(dvo, SIL164_REG9, &reg9);
+
+ if (reg9 & SIL164_9_HTPLG)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void sil164_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* As long as the basics are set up, since we don't have clock
+ * dependencies in the mode setup, we can just leave the
+ * registers alone and everything will work fine.
+ */
+ /* recommended programming sequence from doc */
+ /*sil164_writeb(sil, 0x08, 0x30);
+ sil164_writeb(sil, 0x09, 0x00);
+ sil164_writeb(sil, 0x0a, 0x90);
+ sil164_writeb(sil, 0x0c, 0x89);
+ sil164_writeb(sil, 0x08, 0x31);*/
+ /* don't do much */
+ return;
+}
+
+/* set the SIL164 power state */
+static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
+{
+ int ret;
+ unsigned char ch;
+
+ ret = sil164_readb(dvo, SIL164_REG8, &ch);
+ if (ret == false)
+ return;
+
+ if (mode == DRM_MODE_DPMS_ON)
+ ch |= SIL164_8_PD;
+ else
+ ch &= ~SIL164_8_PD;
+
+ sil164_writeb(dvo, SIL164_REG8, ch);
+ return;
+}
+
+static void sil164_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val;
+
+ sil164_readb(dvo, SIL164_FREQ_LO, &val);
+ DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_FREQ_HI, &val);
+ DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REG8, &val);
+ DRM_DEBUG("SIL164_REG8: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REG9, &val);
+ DRM_DEBUG("SIL164_REG9: 0x%02x\n", val);
+ sil164_readb(dvo, SIL164_REGC, &val);
+ DRM_DEBUG("SIL164_REGC: 0x%02x\n", val);
+}
+
+static void sil164_save(struct intel_dvo_device *dvo)
+{
+ struct sil164_priv *sil= dvo->dev_priv;
+
+ if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
+ return;
+
+ if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
+ return;
+
+ if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
+ return;
+
+ return;
+}
+
+static void sil164_restore(struct intel_dvo_device *dvo)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+
+ /* Restore it powered down initially */
+ sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
+
+ sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
+ sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
+ sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
+}
+
+static void sil164_destroy(struct intel_dvo_device *dvo)
+{
+ struct sil164_priv *sil = dvo->dev_priv;
+
+ if (sil) {
+ kfree(sil);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops sil164_ops = {
+ .init = sil164_init,
+ .detect = sil164_detect,
+ .mode_valid = sil164_mode_valid,
+ .mode_set = sil164_mode_set,
+ .dpms = sil164_dpms,
+ .dump_regs = sil164_dump_regs,
+ .save = sil164_save,
+ .restore = sil164_restore,
+ .destroy = sil164_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
new file mode 100644
index 00000000000..207fda806eb
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright © 2007 Dave Mueller
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Mueller <dave.mueller@gmx.ch>
+ *
+ */
+
+#include "dvo.h"
+
+/* register definitions according to the TFP410 data sheet */
+#define TFP410_VID 0x014C
+#define TFP410_DID 0x0410
+
+#define TFP410_VID_LO 0x00
+#define TFP410_VID_HI 0x01
+#define TFP410_DID_LO 0x02
+#define TFP410_DID_HI 0x03
+#define TFP410_REV 0x04
+
+#define TFP410_CTL_1 0x08
+#define TFP410_CTL_1_TDIS (1<<6)
+#define TFP410_CTL_1_VEN (1<<5)
+#define TFP410_CTL_1_HEN (1<<4)
+#define TFP410_CTL_1_DSEL (1<<3)
+#define TFP410_CTL_1_BSEL (1<<2)
+#define TFP410_CTL_1_EDGE (1<<1)
+#define TFP410_CTL_1_PD (1<<0)
+
+#define TFP410_CTL_2 0x09
+#define TFP410_CTL_2_VLOW (1<<7)
+#define TFP410_CTL_2_MSEL_MASK (0x7<<4)
+#define TFP410_CTL_2_MSEL (1<<4)
+#define TFP410_CTL_2_TSEL (1<<3)
+#define TFP410_CTL_2_RSEN (1<<2)
+#define TFP410_CTL_2_HTPLG (1<<1)
+#define TFP410_CTL_2_MDI (1<<0)
+
+#define TFP410_CTL_3 0x0A
+#define TFP410_CTL_3_DK_MASK (0x7<<5)
+#define TFP410_CTL_3_DK (1<<5)
+#define TFP410_CTL_3_DKEN (1<<4)
+#define TFP410_CTL_3_CTL_MASK (0x7<<1)
+#define TFP410_CTL_3_CTL (1<<1)
+
+#define TFP410_USERCFG 0x0B
+
+#define TFP410_DE_DLY 0x32
+
+#define TFP410_DE_CTL 0x33
+#define TFP410_DE_CTL_DEGEN (1<<6)
+#define TFP410_DE_CTL_VSPOL (1<<5)
+#define TFP410_DE_CTL_HSPOL (1<<4)
+#define TFP410_DE_CTL_DEDLY8 (1<<0)
+
+#define TFP410_DE_TOP 0x34
+
+#define TFP410_DE_CNT_LO 0x36
+#define TFP410_DE_CNT_HI 0x37
+
+#define TFP410_DE_LIN_LO 0x38
+#define TFP410_DE_LIN_HI 0x39
+
+#define TFP410_H_RES_LO 0x3A
+#define TFP410_H_RES_HI 0x3B
+
+#define TFP410_V_RES_LO 0x3C
+#define TFP410_V_RES_HI 0x3D
+
+struct tfp410_save_rec {
+ uint8_t ctl1;
+ uint8_t ctl2;
+};
+
+struct tfp410_priv {
+ bool quiet;
+
+ struct tfp410_save_rec saved_reg;
+ struct tfp410_save_rec mode_reg;
+};
+
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ u8 out_buf[2];
+ u8 in_buf[2];
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = i2cbus->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = i2cbus->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+ *ch = in_buf[0];
+ return true;
+ };
+
+ if (!tfp->quiet) {
+ DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ }
+ return false;
+}
+
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+ struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = i2cbus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+ return true;
+
+ if (!tfp->quiet) {
+ DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ }
+
+ return false;
+}
+
+static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
+{
+ uint8_t ch1, ch2;
+
+ if (tfp410_readb(dvo, addr+0, &ch1) &&
+ tfp410_readb(dvo, addr+1, &ch2))
+ return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF);
+
+ return -1;
+}
+
+/* Ti TFP410 driver for chip on i2c bus */
+static bool tfp410_init(struct intel_dvo_device *dvo,
+ struct intel_i2c_chan *i2cbus)
+{
+ /* this will detect the tfp410 chip on the specified i2c bus */
+ struct tfp410_priv *tfp;
+ int id;
+
+ tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
+ if (tfp == NULL)
+ return false;
+
+ dvo->i2c_bus = i2cbus;
+ dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->dev_priv = tfp;
+ tfp->quiet = true;
+
+ if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
+ DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
+ id, i2cbus->adapter.name, i2cbus->slave_addr);
+ goto out;
+ }
+
+ if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
+ DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
+ id, i2cbus->adapter.name, i2cbus->slave_addr);
+ goto out;
+ }
+ tfp->quiet = false;
+ return true;
+out:
+ kfree(tfp);
+ return false;
+}
+
+static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
+{
+ enum drm_connector_status ret = connector_status_disconnected;
+ uint8_t ctl2;
+
+ if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
+ if (ctl2 & TFP410_CTL_2_HTPLG)
+ ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+ }
+
+ return ret;
+}
+
+static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void tfp410_mode_set(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* As long as the basics are set up, since we don't have clock dependencies
+ * in the mode setup, we can just leave the registers alone and everything
+ * will work fine.
+ */
+ /* don't do much */
+ return;
+}
+
+/* set the tfp410 power state */
+static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
+{
+ uint8_t ctl1;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+ return;
+
+ if (mode == DRM_MODE_DPMS_ON)
+ ctl1 |= TFP410_CTL_1_PD;
+ else
+ ctl1 &= ~TFP410_CTL_1_PD;
+
+ tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
+}
+
+static void tfp410_dump_regs(struct intel_dvo_device *dvo)
+{
+ uint8_t val, val2;
+
+ tfp410_readb(dvo, TFP410_REV, &val);
+ DRM_DEBUG("TFP410_REV: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_1, &val);
+ DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_2, &val);
+ DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_CTL_3, &val);
+ DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_USERCFG, &val);
+ DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_DLY, &val);
+ DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_CTL, &val);
+ DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_TOP, &val);
+ DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val);
+ tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
+ tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
+ DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
+ tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
+ DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_H_RES_LO, &val);
+ tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
+ DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+ tfp410_readb(dvo, TFP410_V_RES_LO, &val);
+ tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
+ DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+}
+
+static void tfp410_save(struct intel_dvo_device *dvo)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
+ return;
+
+ if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
+ return;
+}
+
+static void tfp410_restore(struct intel_dvo_device *dvo)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+
+ /* Restore it powered down initially */
+ tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
+
+ tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
+ tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
+}
+
+static void tfp410_destroy(struct intel_dvo_device *dvo)
+{
+ struct tfp410_priv *tfp = dvo->dev_priv;
+
+ if (tfp) {
+ kfree(tfp);
+ dvo->dev_priv = NULL;
+ }
+}
+
+struct intel_dvo_dev_ops tfp410_ops = {
+ .init = tfp410_init,
+ .detect = tfp410_detect,
+ .mode_valid = tfp410_mode_valid,
+ .mode_set = tfp410_mode_set,
+ .dpms = tfp410_dpms,
+ .dump_regs = tfp410_dump_regs,
+ .save = tfp410_save,
+ .restore = tfp410_restore,
+ .destroy = tfp410_destroy,
+};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0d215e38606..3d7082af5b7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -28,6 +28,8 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_crtc_helper.h"
+#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -39,6 +41,7 @@
int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
u32 last_acthd = I915_READ(acthd_reg);
@@ -55,8 +58,8 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
if (ring->space >= n)
return 0;
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
if (ring->head != last_head)
i = 0;
@@ -121,16 +124,28 @@ static void i915_free_hws(struct drm_device *dev)
void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ /*
+ * We should never lose context on the ring with modesetting
+ * as we don't expose it to userspace
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
- if (ring->head == ring->tail && dev_priv->sarea_priv)
- dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (ring->head == ring->tail && master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
static int i915_dma_cleanup(struct drm_device * dev)
@@ -154,25 +169,13 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
- dev_priv->sarea = NULL;
- dev_priv->sarea_priv = NULL;
-
return 0;
}
static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
drm_i915_private_t *dev_priv = dev->dev_private;
-
- dev_priv->sarea = drm_getsarea(dev);
- if (!dev_priv->sarea) {
- DRM_ERROR("can not find sarea!\n");
- i915_dma_cleanup(dev);
- return -EINVAL;
- }
-
- dev_priv->sarea_priv = (drm_i915_sarea_t *)
- ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
if (init->ring_size != 0) {
if (dev_priv->ring.ring_obj != NULL) {
@@ -207,7 +210,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
dev_priv->back_offset = init->back_offset;
dev_priv->front_offset = init->front_offset;
dev_priv->current_page = 0;
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->pf_current_page = 0;
/* Allow hardware batchbuffers unless told otherwise.
*/
@@ -222,11 +226,6 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG("%s\n", __func__);
- if (!dev_priv->sarea) {
- DRM_ERROR("can not find sarea!\n");
- return -EINVAL;
- }
-
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -435,13 +434,14 @@ i915_emit_box(struct drm_device *dev,
static void i915_emit_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
RING_LOCALS;
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 0;
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter;
BEGIN_LP_RING(4);
OUT_RING(MI_STORE_DWORD_INDEX);
@@ -537,15 +537,17 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
static int i915_dispatch_flip(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv =
+ dev->primary->master->driver_priv;
RING_LOCALS;
- if (!dev_priv->sarea_priv)
+ if (!master_priv->sarea_priv)
return -EINVAL;
DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
__func__,
dev_priv->current_page,
- dev_priv->sarea_priv->pf_current_page);
+ master_priv->sarea_priv->pf_current_page);
i915_kernel_lost_context(dev);
@@ -572,7 +574,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
OUT_RING(0);
ADVANCE_LP_RING();
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
BEGIN_LP_RING(4);
OUT_RING(MI_STORE_DWORD_INDEX);
@@ -581,7 +583,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
OUT_RING(0);
ADVANCE_LP_RING();
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+ master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
return 0;
}
@@ -611,8 +613,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
- dev_priv->sarea_priv;
+ master_priv->sarea_priv;
drm_i915_batchbuffer_t *batch = data;
int ret;
@@ -644,8 +647,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
- dev_priv->sarea_priv;
+ master_priv->sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
int ret;
@@ -717,7 +721,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pci_device;
break;
case I915_PARAM_HAS_GEM:
- value = 1;
+ value = dev_priv->has_gem;
break;
default:
DRM_ERROR("Unknown parameter %d\n", param->param);
@@ -774,6 +778,11 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ WARN(1, "tried to set status page when mode setting active\n");
+ return 0;
+ }
+
printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
@@ -802,6 +811,214 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
return 0;
}
+/**
+ * i915_probe_agp - get AGP bootup configuration
+ * @pdev: PCI device
+ * @aperture_size: returns AGP aperture configured size
+ * @preallocated_size: returns size of BIOS preallocated AGP space
+ *
+ * Since Intel integrated graphics are UMA, the BIOS has to set aside
+ * some RAM for the framebuffer at early boot. This code figures out
+ * how much was set aside so we can use it for our own purposes.
+ */
+static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
+ unsigned long *preallocated_size)
+{
+ struct pci_dev *bridge_dev;
+ u16 tmp = 0;
+ unsigned long overhead;
+
+ bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+ if (!bridge_dev) {
+ DRM_ERROR("bridge device not found\n");
+ return -1;
+ }
+
+ /* Get the fb aperture size and "stolen" memory amount. */
+ pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
+ pci_dev_put(bridge_dev);
+
+ *aperture_size = 1024 * 1024;
+ *preallocated_size = 1024 * 1024;
+
+ switch (dev->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_82830_CGC:
+ case PCI_DEVICE_ID_INTEL_82845G_IG:
+ case PCI_DEVICE_ID_INTEL_82855GM_IG:
+ case PCI_DEVICE_ID_INTEL_82865_IG:
+ if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
+ *aperture_size *= 64;
+ else
+ *aperture_size *= 128;
+ break;
+ default:
+ /* 9xx supports large sizes, just look at the length */
+ *aperture_size = pci_resource_len(dev->pdev, 2);
+ break;
+ }
+
+ /*
+ * Some of the preallocated space is taken by the GTT
+ * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
+ */
+ if (IS_G4X(dev))
+ overhead = 4096;
+ else
+ overhead = (*aperture_size / 1024) + 4096;
+
+ switch (tmp & INTEL_855_GMCH_GMS_MASK) {
+ case INTEL_855_GMCH_GMS_STOLEN_1M:
+ break; /* 1M already */
+ case INTEL_855_GMCH_GMS_STOLEN_4M:
+ *preallocated_size *= 4;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_8M:
+ *preallocated_size *= 8;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_16M:
+ *preallocated_size *= 16;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_32M:
+ *preallocated_size *= 32;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_48M:
+ *preallocated_size *= 48;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_64M:
+ *preallocated_size *= 64;
+ break;
+ case INTEL_855_GMCH_GMS_DISABLED:
+ DRM_ERROR("video memory is disabled\n");
+ return -1;
+ default:
+ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+ tmp & INTEL_855_GMCH_GMS_MASK);
+ return -1;
+ }
+ *preallocated_size -= overhead;
+
+ return 0;
+}
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long agp_size, prealloc_size;
+ int fb_bar = IS_I9XX(dev) ? 2 : 0;
+ int ret = 0;
+
+ dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
+ if (!dev->devname) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
+ 0xff000000;
+
+ DRM_DEBUG("*** fb base 0x%08lx\n", dev->mode_config.fb_base);
+
+ if (IS_MOBILE(dev) || (IS_I9XX(dev) && !IS_I965G(dev) && !IS_G33(dev)))
+ dev_priv->cursor_needs_physical = true;
+ else
+ dev_priv->cursor_needs_physical = false;
+
+ ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
+ if (ret)
+ goto kfree_devname;
+
+ /* Basic memrange allocator for stolen space (aka vram) */
+ drm_mm_init(&dev_priv->vram, 0, prealloc_size);
+
+ /* Let GEM Manage from end of prealloc space to end of aperture */
+ i915_gem_do_init(dev, prealloc_size, agp_size);
+
+ ret = i915_gem_init_ringbuffer(dev);
+ if (ret)
+ goto kfree_devname;
+
+ dev_priv->mm.gtt_mapping =
+ io_mapping_create_wc(dev->agp->base,
+ dev->agp->agp_info.aper_size * 1024*1024);
+
+ /* Allow hardware batchbuffers unless told otherwise.
+ */
+ dev_priv->allow_batchbuffer = 1;
+
+ ret = intel_init_bios(dev);
+ if (ret)
+ DRM_INFO("failed to find VBIOS tables\n");
+
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto destroy_ringbuffer;
+
+ /* FIXME: re-add hotplug support */
+#if 0
+ ret = drm_hotplug_init(dev);
+ if (ret)
+ goto destroy_ringbuffer;
+#endif
+
+ /* Always safe in the mode setting case. */
+ /* FIXME: do pre/post-mode set stuff in core KMS code */
+ dev->vblank_disable_allowed = 1;
+
+ /*
+ * Initialize the hardware status page IRQ location.
+ */
+
+ I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
+
+ intel_modeset_init(dev);
+
+ drm_helper_initial_config(dev, false);
+
+ return 0;
+
+destroy_ringbuffer:
+ i915_gem_cleanup_ringbuffer(dev);
+kfree_devname:
+ kfree(dev->devname);
+out:
+ return ret;
+}
+
+int i915_master_create(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_i915_master_private *master_priv;
+
+ master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
+ if (!master_priv)
+ return -ENOMEM;
+
+ master->driver_priv = master_priv;
+ return 0;
+}
+
+void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_i915_master_private *master_priv = master->driver_priv;
+
+ if (!master_priv)
+ return;
+
+ drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
+
+ master->driver_priv = NULL;
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - drive output discovery via intel_modeset_init()
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -829,6 +1046,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
size = drm_get_resource_len(dev, mmio_bar);
dev_priv->regs = ioremap(base, size);
+ if (!dev_priv->regs) {
+ DRM_ERROR("failed to map registers\n");
+ ret = -EIO;
+ goto free_priv;
+ }
+
+#ifdef CONFIG_HIGHMEM64G
+ /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
+ dev_priv->has_gem = 0;
+#else
+ /* enable GEM by default */
+ dev_priv->has_gem = 1;
+#endif
i915_gem_load(dev);
@@ -836,7 +1066,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
if (ret != 0)
- return ret;
+ goto out_rmmap;
}
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -847,15 +1077,38 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* and the registers being closely associated.
*
* According to chipset errata, on the 965GM, MSI interrupts may
- * be lost or delayed
+ * be lost or delayed, but we use them anyways to avoid
+ * stuck interrupts on some machines.
*/
- if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
+ if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
intel_opregion_init(dev);
spin_lock_init(&dev_priv->user_irq_lock);
+ dev_priv->user_irq_refcount = 0;
+
+ ret = drm_vblank_init(dev, I915_NUM_PIPE);
+
+ if (ret) {
+ (void) i915_driver_unload(dev);
+ return ret;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = i915_load_modeset_init(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to init modeset\n");
+ goto out_rmmap;
+ }
+ }
+ return 0;
+
+out_rmmap:
+ iounmap(dev_priv->regs);
+free_priv:
+ drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
return ret;
}
@@ -863,16 +1116,29 @@ int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ io_mapping_free(dev_priv->mm.gtt_mapping);
+ drm_irq_uninstall(dev);
+ }
+
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
- i915_free_hws(dev);
-
if (dev_priv->regs != NULL)
iounmap(dev_priv->regs);
intel_opregion_free(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_modeset_cleanup(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_cleanup_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
+ drm_mm_takedown(&dev_priv->vram);
+ i915_gem_lastclose(dev);
+ }
+
drm_free(dev->dev_private, sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
@@ -898,12 +1164,26 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
return 0;
}
+/**
+ * i915_driver_lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited. In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ *
+ * Additionally, in the non-mode setting case, we'll tear down the AGP
+ * and DMA structures, since the kernel won't be using them, and clea
+ * up any GEM state.
+ */
void i915_driver_lastclose(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!dev_priv)
+ if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intelfb_restore();
return;
+ }
i915_gem_lastclose(dev);
@@ -916,7 +1196,8 @@ void i915_driver_lastclose(struct drm_device * dev)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
@@ -956,6 +1237,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a80ead21528..f8b3df0926c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -33,11 +33,22 @@
#include "i915_drv.h"
#include "drm_pciids.h"
+#include <linux/console.h>
+
+static unsigned int i915_modeset = -1;
+module_param_named(modeset, i915_modeset, int, 0400);
+
+unsigned int i915_fbpercrtc = 0;
+module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
static struct pci_device_id pciidlist[] = {
i915_PCI_IDS
};
+#if defined(CONFIG_DRM_I915_KMS)
+MODULE_DEVICE_TABLE(pci, pciidlist);
+#endif
+
static int i915_suspend(struct drm_device *dev, pm_message_t state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -81,6 +92,10 @@ static int i915_resume(struct drm_device *dev)
return 0;
}
+static struct vm_operations_struct i915_gem_vm_ops = {
+ .fault = i915_gem_fault,
+};
+
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
* deal with them for intel hardware.
@@ -107,17 +122,20 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
+ .master_create = i915_master_create,
+ .master_destroy = i915_master_destroy,
.proc_init = i915_gem_proc_init,
.proc_cleanup = i915_gem_proc_cleanup,
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
+ .gem_vm_ops = &i915_gem_vm_ops,
.ioctls = i915_ioctls,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
- .mmap = drm_mmap,
+ .mmap = drm_gem_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
#ifdef CONFIG_COMPAT
@@ -141,6 +159,28 @@ static struct drm_driver driver = {
static int __init i915_init(void)
{
driver.num_ioctls = i915_max_ioctl;
+
+ /*
+ * If CONFIG_DRM_I915_KMS is set, default to KMS unless
+ * explicitly disabled with the module pararmeter.
+ *
+ * Otherwise, just follow the parameter (defaulting to off).
+ *
+ * Allow optional vga_text_mode_force boot option to override
+ * the default behavior.
+ */
+#if defined(CONFIG_DRM_I915_KMS)
+ if (i915_modeset != 0)
+ driver.driver_features |= DRIVER_MODESET;
+#endif
+ if (i915_modeset == 1)
+ driver.driver_features |= DRIVER_MODESET;
+
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && i915_modeset == -1)
+ driver.driver_features &= ~DRIVER_MODESET;
+#endif
+
return drm_init(&driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ef1c0b8f8d0..4756e5cd6b5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,6 +31,7 @@
#define _I915_DRV_H_
#include "i915_reg.h"
+#include "intel_bios.h"
#include <linux/io-mapping.h>
/* General customization:
@@ -47,6 +48,8 @@ enum pipe {
PIPE_B,
};
+#define I915_NUM_PIPE 2
+
/* Interface history:
*
* 1.1: Original.
@@ -101,13 +104,23 @@ struct intel_opregion {
int enabled;
};
+struct drm_i915_master_private {
+ drm_local_map_t *sarea;
+ struct _drm_i915_sarea *sarea_priv;
+};
+#define I915_FENCE_REG_NONE -1
+
+struct drm_i915_fence_reg {
+ struct drm_gem_object *obj;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
+ int has_gem;
+
void __iomem *regs;
- drm_local_map_t *sarea;
- drm_i915_sarea_t *sarea_priv;
drm_i915_ring_buffer_t ring;
drm_dma_handle_t *status_page_dmah;
@@ -132,6 +145,7 @@ typedef struct drm_i915_private {
int user_irq_refcount;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask_reg;
+ u32 pipestat[2];
int tex_lru_log_granularity;
int allow_batchbuffer;
@@ -139,14 +153,37 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
+ bool cursor_needs_physical;
+
+ struct drm_mm vram;
+
+ int irq_enabled;
+
struct intel_opregion opregion;
+ /* LVDS info */
+ int backlight_duty_cycle; /* restore backlight to this value */
+ bool panel_wants_dither;
+ struct drm_display_mode *panel_fixed_mode;
+ struct drm_display_mode *vbt_mode; /* if any */
+
+ /* Feature bits from the VBIOS */
+ unsigned int int_tv_support:1;
+ unsigned int lvds_dither:1;
+ unsigned int lvds_vbt:1;
+ unsigned int int_crt_support:1;
+
+ struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
+ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
/* Register state */
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
u32 saveRENDERSTANDBY;
+ u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
@@ -240,6 +277,10 @@ typedef struct drm_i915_private {
* List of objects currently involved in rendering from the
* ringbuffer.
*
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
@@ -249,6 +290,8 @@ typedef struct drm_i915_private {
* still have a write_domain which needs to be flushed before
* unbinding.
*
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
* A reference is held on the buffer while on this list.
*/
struct list_head flushing_list;
@@ -257,6 +300,8 @@ typedef struct drm_i915_private {
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
@@ -350,6 +395,21 @@ struct drm_i915_gem_object {
* This is the same as gtt_space->start
*/
uint32_t gtt_offset;
+ /**
+ * Required alignment for the object
+ */
+ uint32_t gtt_alignment;
+ /**
+ * Fake offset for use by mmap(2)
+ */
+ uint64_t mmap_offset;
+
+ /**
+ * Fence register bits (if any) for this object. Will be set
+ * as needed when mapped into the GTT.
+ * Protected by dev->struct_mutex.
+ */
+ int fence_reg;
/** Boolean whether this object has a valid gtt offset. */
int gtt_bound;
@@ -362,15 +422,20 @@ struct drm_i915_gem_object {
/** Current tiling mode for the object. */
uint32_t tiling_mode;
+ uint32_t stride;
/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
uint32_t agp_type;
/**
- * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
- * GEM_DOMAIN_CPU is not in the object's read domain.
+ * If present, while GEM_DOMAIN_CPU is in the read domain this array
+ * flags which individual pages are valid.
*/
uint8_t *page_cpu_valid;
+
+ /** User space pin count and filp owning the pin */
+ uint32_t user_pin_count;
+ struct drm_file *pin_filp;
};
/**
@@ -390,9 +455,6 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
- /** Cache domains that were flushed at the start of the request. */
- uint32_t flush_domains;
-
struct list_head list;
};
@@ -403,8 +465,19 @@ struct drm_i915_file_private {
} mm;
};
+enum intel_chip_family {
+ CHIP_I8XX = 0x01,
+ CHIP_I9XX = 0x02,
+ CHIP_I915 = 0x04,
+ CHIP_I965 = 0x08,
+};
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
+extern unsigned int i915_fbpercrtc;
+
+extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
+extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_dma.c */
extern void i915_kernel_lost_context(struct drm_device * dev);
@@ -430,6 +503,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_user_irq_get(struct drm_device *dev);
void i915_user_irq_put(struct drm_device *dev);
+extern void i915_enable_interrupt (struct drm_device *dev);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
@@ -446,6 +520,13 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
+void
+i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+
+void
+i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+
+
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -469,6 +550,8 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@@ -505,6 +588,16 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev);
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_work_handler(struct work_struct *work);
void i915_gem_clflush_object(struct drm_gem_object *obj);
+int i915_gem_object_set_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long end);
+int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ int write);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -543,6 +636,10 @@ static inline void opregion_asle_intr(struct drm_device *dev) { return; }
static inline void opregion_enable_asle(struct drm_device *dev) { return; }
#endif
+/* modesetting */
+extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_cleanup(struct drm_device *dev);
+
/**
* Lock test for when it's just for synchronization of ring access.
*
@@ -560,6 +657,13 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
#define I915_READ8(reg) readb(dev_priv->regs + (reg))
#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
+#ifdef writeq
+#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
+#else
+#define I915_WRITE64(reg, val) (writel(val, dev_priv->regs + (reg)), \
+ writel(upper_32_bits(val), dev_priv->regs + \
+ (reg) + 4))
+#endif
#define I915_VERBOSE 0
@@ -642,7 +746,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
(dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22)
+ (dev)->pci_device == 0x2E22 || \
+ IS_GM45(dev))
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
(dev)->pci_device == 0x29B2 || \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6b4a2bd2064..cc2ca5561fe 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -30,73 +30,77 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include <linux/swap.h>
+#include <linux/pci.h>
-static int
-i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
-static int
-i915_gem_object_set_domain_range(struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size,
- uint32_t read_domains,
- uint32_t write_domain);
-static int
-i915_gem_set_domain(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain);
+#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
+static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
+ int write);
+static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size);
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+ unsigned alignment);
+static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
+static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
+static int i915_gem_evict_something(struct drm_device *dev);
+
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long end)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
-static void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+ if (start >= end ||
+ (start & (PAGE_SIZE - 1)) != 0 ||
+ (end & (PAGE_SIZE - 1)) != 0) {
+ return -EINVAL;
+ }
+
+ drm_mm_init(&dev_priv->mm.gtt_space, start,
+ end - start);
+
+ dev->gtt_total = (uint32_t) (end - start);
+
+ return 0;
+}
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_init *args = data;
+ int ret;
mutex_lock(&dev->struct_mutex);
-
- if (args->gtt_start >= args->gtt_end ||
- (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
- (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
- args->gtt_end - args->gtt_start);
-
- dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
-
+ ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
- struct drm_i915_gem_object *obj_priv;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
args->aper_size = dev->gtt_total;
- args->aper_available_size = args->aper_size;
-
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
- if (obj_priv->pin_count > 0)
- args->aper_available_size -= obj_priv->obj->size;
- }
+ args->aper_available_size = (args->aper_size -
+ atomic_read(&dev->pin_memory));
return 0;
}
@@ -166,8 +170,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
- I915_GEM_DOMAIN_CPU, 0);
+ ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+ args->size);
if (ret != 0) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -264,8 +268,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
return ret;
}
- ret = i915_gem_set_domain(obj, file_priv,
- I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
goto fail;
@@ -324,8 +327,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_set_domain(obj, file_priv,
- I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -401,7 +403,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
/**
- * Called when user space prepares to use an object
+ * Called when user space prepares to use an object with the CPU, either
+ * through the mmap ioctl's mapping or a GTT mapping.
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -409,11 +412,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_set_domain *args = data;
struct drm_gem_object *obj;
+ uint32_t read_domains = args->read_domains;
+ uint32_t write_domain = args->write_domain;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
+ /* Only handle setting domains to types used by the CPU. */
+ if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ return -EINVAL;
+
+ if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ return -EINVAL;
+
+ /* Having something in the write domain implies it's in the read
+ * domain, and only that read domain. Enforce that in the request.
+ */
+ if (write_domain != 0 && read_domains != write_domain)
+ return -EINVAL;
+
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
@@ -421,10 +439,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
#if WATCH_BUF
DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
- obj, obj->size, args->read_domains, args->write_domain);
+ obj, obj->size, read_domains, write_domain);
#endif
- ret = i915_gem_set_domain(obj, file_priv,
- args->read_domains, args->write_domain);
+ if (read_domains & I915_GEM_DOMAIN_GTT) {
+ ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+
+ /* Silently promote "you're not bound, there was nothing to do"
+ * to success, since the client was just asking us to
+ * make sure everything was done.
+ */
+ if (ret == -EINVAL)
+ ret = 0;
+ } else {
+ ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ }
+
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -459,10 +488,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
obj_priv = obj->driver_private;
/* Pinned buffers may be scanout, so flush the cache */
- if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- }
+ if (obj_priv->pin_count)
+ i915_gem_object_flush_cpu_write_domain(obj);
+
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -509,6 +537,252 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
+/**
+ * i915_gem_fault - fault a page into the GTT
+ * vma: VMA in question
+ * vmf: fault info
+ *
+ * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
+ * from userspace. The fault handler takes care of binding the object to
+ * the GTT (if needed), allocating and programming a fence register (again,
+ * only if needed based on whether the old reg is still valid or the object
+ * is tiled) and inserting a new PTE into the faulting process.
+ *
+ * Note that the faulting process may involve evicting existing objects
+ * from the GTT and/or fence registers to make room. So performance may
+ * suffer if the GTT working set is large or there are few fence registers
+ * left.
+ */
+int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ pgoff_t page_offset;
+ unsigned long pfn;
+ int ret = 0;
+
+ /* We don't use vmf->pgoff since that has the fake offset */
+ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+ PAGE_SHIFT;
+
+ /* Now bind it into the GTT if needed */
+ mutex_lock(&dev->struct_mutex);
+ if (!obj_priv->gtt_space) {
+ ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return VM_FAULT_SIGBUS;
+ }
+ list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
+ }
+
+ /* Need a new fence register? */
+ if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+ obj_priv->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_get_fence_reg(obj);
+
+ pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+ page_offset;
+
+ /* Finally, remap it using the new GTT offset */
+ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ switch (ret) {
+ case -ENOMEM:
+ case -EAGAIN:
+ return VM_FAULT_OOM;
+ case -EFAULT:
+ case -EBUSY:
+ DRM_ERROR("can't insert pfn?? fault or busy...\n");
+ return VM_FAULT_SIGBUS;
+ default:
+ return VM_FAULT_NOPAGE;
+ }
+}
+
+/**
+ * i915_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call. The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+static int
+i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_map_list *list;
+ struct drm_map *map;
+ int ret = 0;
+
+ /* Set the object up for mmap'ing */
+ list = &obj->map_list;
+ list->map = drm_calloc(1, sizeof(struct drm_map_list),
+ DRM_MEM_DRIVER);
+ if (!list->map)
+ return -ENOMEM;
+
+ map = list->map;
+ map->type = _DRM_GEM;
+ map->size = obj->size;
+ map->handle = obj;
+
+ /* Get a DRM GEM mmap offset allocated... */
+ list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+ obj->size / PAGE_SIZE, 0, 0);
+ if (!list->file_offset_node) {
+ DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+ ret = -ENOMEM;
+ goto out_free_list;
+ }
+
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ obj->size / PAGE_SIZE, 0);
+ if (!list->file_offset_node) {
+ ret = -ENOMEM;
+ goto out_free_list;
+ }
+
+ list->hash.key = list->file_offset_node->start;
+ if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+ DRM_ERROR("failed to add to map hash\n");
+ goto out_free_mm;
+ }
+
+ /* By now we should be all set, any drm_mmap request on the offset
+ * below will get to our mmap & fault handler */
+ obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
+
+ return 0;
+
+out_free_mm:
+ drm_mm_put_block(list->file_offset_node);
+out_free_list:
+ drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
+
+ return ret;
+}
+
+/**
+ * i915_gem_get_gtt_alignment - return required GTT alignment for an object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, taking into account
+ * potential fence register mapping if needed.
+ */
+static uint32_t
+i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int start, i;
+
+ /*
+ * Minimum alignment is 4k (GTT page size), but might be greater
+ * if a fence register is needed for the object.
+ */
+ if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
+ return 4096;
+
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ if (IS_I9XX(dev))
+ start = 1024*1024;
+ else
+ start = 512*1024;
+
+ for (i = start; i < obj->size; i <<= 1)
+ ;
+
+ return i;
+}
+
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file_priv: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_mmap_gtt *args = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EBADF;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj_priv = obj->driver_private;
+
+ if (!obj_priv->mmap_offset) {
+ ret = i915_gem_create_mmap_offset(obj);
+ if (ret)
+ return ret;
+ }
+
+ args->offset = obj_priv->mmap_offset;
+
+ obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
+
+ /* Make sure the alignment is correct for fence regs etc */
+ if (obj_priv->agp_mem &&
+ (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ /*
+ * Pull it into the GTT so that we have a page list (makes the
+ * initial fault faster and any subsequent flushing possible).
+ */
+ if (!obj_priv->agp_mem) {
+ ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static void
i915_gem_object_free_page_list(struct drm_gem_object *obj)
{
@@ -536,7 +810,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
}
static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj)
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -550,8 +824,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
/* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj_priv->list,
&dev_priv->mm.active_list);
+ obj_priv->last_rendering_seqno = seqno;
}
+static void
+i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ BUG_ON(!obj_priv->active);
+ list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
+ obj_priv->last_rendering_seqno = 0;
+}
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@@ -566,6 +852,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
drm_gem_object_unreference(obj);
@@ -614,10 +901,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
request->seqno = seqno;
request->emitted_jiffies = jiffies;
- request->flush_domains = flush_domains;
was_empty = list_empty(&dev_priv->mm.request_list);
list_add_tail(&request->list, &dev_priv->mm.request_list);
+ /* Associate any objects on the flushing list matching the write
+ * domain we're flushing with our flush.
+ */
+ if (flush_domains != 0) {
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.flushing_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if ((obj->write_domain & flush_domains) ==
+ obj->write_domain) {
+ obj->write_domain = 0;
+ i915_gem_object_move_to_active(obj, seqno);
+ }
+ }
+
+ }
+
if (was_empty && !dev_priv->mm.suspended)
schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
return seqno;
@@ -675,35 +980,16 @@ i915_gem_retire_request(struct drm_device *dev,
*/
if (obj_priv->last_rendering_seqno != request->seqno)
return;
+
#if WATCH_LRU
DRM_INFO("%s: retire %d moves to inactive list %p\n",
__func__, request->seqno, obj);
#endif
- if (obj->write_domain != 0) {
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.flushing_list);
- } else {
+ if (obj->write_domain != 0)
+ i915_gem_object_move_to_flushing(obj);
+ else
i915_gem_object_move_to_inactive(obj);
- }
- }
-
- if (request->flush_domains != 0) {
- struct drm_i915_gem_object *obj_priv, *next;
-
- /* Clear the write domain and activity from any buffers
- * that are just waiting for a flush matching the one retired.
- */
- list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.flushing_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
-
- if (obj->write_domain & request->flush_domains) {
- obj->write_domain = 0;
- i915_gem_object_move_to_inactive(obj);
- }
- }
-
}
}
@@ -896,25 +1182,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
- /* If there are writes queued to the buffer, flush and
- * create a new seqno to wait for.
+ /* This function only exists to support waiting for existing rendering,
+ * not for emitting required flushes.
*/
- if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
- uint32_t write_domain = obj->write_domain;
-#if WATCH_BUF
- DRM_INFO("%s: flushing object %p from write domain %08x\n",
- __func__, obj, write_domain);
-#endif
- i915_gem_flush(dev, 0, write_domain);
-
- i915_gem_object_move_to_active(obj);
- obj_priv->last_rendering_seqno = i915_add_request(dev,
- write_domain);
- BUG_ON(obj_priv->last_rendering_seqno == 0);
-#if WATCH_LRU
- DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
-#endif
- }
+ BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
/* If there is rendering queued on the buffer being evicted, wait for
* it.
@@ -940,6 +1211,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ loff_t offset;
int ret = 0;
#if WATCH_BUF
@@ -954,24 +1226,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return -EINVAL;
}
- /* Wait for any rendering to complete
- */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret) {
- DRM_ERROR("wait_rendering failed: %d\n", ret);
- return ret;
- }
-
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
* also ensure that all pending GPU writes are finished
* before we unbind.
*/
- ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
- I915_GEM_DOMAIN_CPU);
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret) {
- DRM_ERROR("set_domain failed: %d\n", ret);
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("set_domain failed: %d\n", ret);
return ret;
}
@@ -983,6 +1247,14 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
BUG_ON(obj_priv->active);
+ /* blow away mappings if mapped through GTT */
+ offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
+ if (dev->dev_mapping)
+ unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
+
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
i915_gem_object_free_page_list(obj);
if (obj_priv->gtt_space) {
@@ -1087,6 +1359,21 @@ i915_gem_evict_something(struct drm_device *dev)
}
static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ int ret;
+
+ for (;;) {
+ ret = i915_gem_evict_something(dev);
+ if (ret != 0)
+ break;
+ }
+ if (ret == -ENOMEM)
+ return 0;
+ return ret;
+}
+
+static int
i915_gem_object_get_page_list(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1126,6 +1413,204 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
return 0;
}
+static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
+{
+ struct drm_gem_object *obj = reg->obj;
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int regnum = obj_priv->fence_reg;
+ uint64_t val;
+
+ val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj_priv->gtt_offset & 0xfffff000;
+ val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ if (obj_priv->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+
+ I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
+}
+
+static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
+{
+ struct drm_gem_object *obj = reg->obj;
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int regnum = obj_priv->fence_reg;
+ uint32_t val;
+ uint32_t pitch_val;
+
+ if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (obj_priv->gtt_offset & (obj->size - 1))) {
+ WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
+ return;
+ }
+
+ if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) ||
+ IS_I945GM(dev) ||
+ IS_G33(dev)))
+ pitch_val = (obj_priv->stride / 128) - 1;
+ else
+ pitch_val = (obj_priv->stride / 512) - 1;
+
+ val = obj_priv->gtt_offset;
+ if (obj_priv->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(obj->size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+
+ I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+}
+
+static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+{
+ struct drm_gem_object *obj = reg->obj;
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int regnum = obj_priv->fence_reg;
+ uint32_t val;
+ uint32_t pitch_val;
+
+ if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (obj_priv->gtt_offset & (obj->size - 1))) {
+ WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
+ return;
+ }
+
+ pitch_val = (obj_priv->stride / 128) - 1;
+
+ val = obj_priv->gtt_offset;
+ if (obj_priv->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(obj->size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+
+ I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+
+}
+
+/**
+ * i915_gem_object_get_fence_reg - set up a fence reg for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ *
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ */
+static void
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_fence_reg *reg = NULL;
+ int i, ret;
+
+ switch (obj_priv->tiling_mode) {
+ case I915_TILING_NONE:
+ WARN(1, "allocating a fence for non-tiled object?\n");
+ break;
+ case I915_TILING_X:
+ WARN(obj_priv->stride & (512 - 1),
+ "object is X tiled but has non-512B pitch\n");
+ break;
+ case I915_TILING_Y:
+ WARN(obj_priv->stride & (128 - 1),
+ "object is Y tiled but has non-128B pitch\n");
+ break;
+ }
+
+ /* First try to find a free reg */
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ break;
+ }
+
+ /* None available, try to steal one or wait for a user to finish */
+ if (i == dev_priv->num_fence_regs) {
+ struct drm_i915_gem_object *old_obj_priv = NULL;
+ loff_t offset;
+
+try_again:
+ /* Could try to use LRU here instead... */
+ for (i = dev_priv->fence_reg_start;
+ i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ old_obj_priv = reg->obj->driver_private;
+ if (!old_obj_priv->pin_count)
+ break;
+ }
+
+ /*
+ * Now things get ugly... we have to wait for one of the
+ * objects to finish before trying again.
+ */
+ if (i == dev_priv->num_fence_regs) {
+ ret = i915_gem_object_wait_rendering(reg->obj);
+ if (ret) {
+ WARN(ret, "wait_rendering failed: %d\n", ret);
+ return;
+ }
+ goto try_again;
+ }
+
+ /*
+ * Zap this virtual mapping so we can set up a fence again
+ * for this object next time we need it.
+ */
+ offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
+ if (dev->dev_mapping)
+ unmap_mapping_range(dev->dev_mapping, offset,
+ reg->obj->size, 1);
+ old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
+ }
+
+ obj_priv->fence_reg = i;
+ reg->obj = obj;
+
+ if (IS_I965G(dev))
+ i965_write_fence_reg(reg);
+ else if (IS_I9XX(dev))
+ i915_write_fence_reg(reg);
+ else
+ i830_write_fence_reg(reg);
+}
+
+/**
+ * i915_gem_clear_fence_reg - clear out fence register info
+ * @obj: object to clear
+ *
+ * Zeroes out the fence register itself and clears out the associated
+ * data structures in dev_priv and obj_priv.
+ */
+static void
+i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ if (IS_I965G(dev))
+ I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
+ else
+ I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0);
+
+ dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
+ obj_priv->fence_reg = I915_FENCE_REG_NONE;
+}
+
/**
* Finds free space in the GTT aperture and binds the object there.
*/
@@ -1172,7 +1657,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
ret = i915_gem_evict_something(dev);
if (ret != 0) {
- DRM_ERROR("Failed to evict a buffer %d\n", ret);
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to evict a buffer %d\n", ret);
return ret;
}
goto search_free;
@@ -1232,6 +1718,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
}
+/** Flushes any GPU write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ uint32_t seqno;
+
+ if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ return;
+
+ /* Queue the GPU write cache flushing we need. */
+ i915_gem_flush(dev, 0, obj->write_domain);
+ seqno = i915_add_request(dev, obj->write_domain);
+ obj->write_domain = 0;
+ i915_gem_object_move_to_active(obj, seqno);
+}
+
+/** Flushes the GTT write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+{
+ if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+ return;
+
+ /* No actual flushing is required for the GTT write domain. Writes
+ * to it immediately go to main memory as far as we know, so there's
+ * no chipset flush. It also doesn't land in render cache.
+ */
+ obj->write_domain = 0;
+}
+
+/** Flushes the CPU write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+ return;
+
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+}
+
+/**
+ * Moves a single object to the GTT read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret;
+
+ /* Not valid to be called on unbound objects. */
+ if (obj_priv->gtt_space == NULL)
+ return -EINVAL;
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return ret;
+
+ /* If we're writing through the GTT domain, then CPU and GPU caches
+ * will need to be invalidated at next use.
+ */
+ if (write)
+ obj->read_domains &= I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ if (write) {
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
+ obj_priv->dirty = 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+static int
+i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return ret;
+
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ /* If we have a partially-valid cache of the object in the CPU,
+ * finish invalidating it and free the per-page flags.
+ */
+ i915_gem_object_set_to_full_cpu_read_domain(obj);
+
+ /* Flush the CPU cache if it's still invalid. */
+ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ }
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+
+ /* If we're writing through the CPU, then the GPU read domains will
+ * need to be invalidated at next use.
+ */
+ if (write) {
+ obj->read_domains &= I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ return 0;
+}
+
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
@@ -1343,16 +1966,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
-static int
-i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain)
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
- int ret;
+
+ BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
+ BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
#if WATCH_BUF
DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
@@ -1389,34 +2014,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
__func__, flush_domains, invalidate_domains);
#endif
- /*
- * If we're invaliding the CPU cache and flushing a GPU cache,
- * then pause for rendering so that the GPU caches will be
- * flushed before the cpu cache is invalidated
- */
- if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
- (flush_domains & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT))) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
i915_gem_clflush_object(obj);
}
if ((write_domain | flush_domains) != 0)
obj->write_domain = write_domain;
-
- /* If we're invalidating the CPU domain, clear the per-page CPU
- * domain list as well.
- */
- if (obj_priv->page_cpu_valid != NULL &&
- (write_domain != 0 ||
- read_domains & I915_GEM_DOMAIN_CPU)) {
- drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
- DRM_MEM_DRIVER);
- obj_priv->page_cpu_valid = NULL;
- }
obj->read_domains = read_domains;
dev->invalidate_domains |= invalidate_domains;
@@ -1427,47 +2029,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains);
#endif
- return 0;
}
/**
- * Set the read/write domain on a range of the object.
+ * Moves the object from a partially CPU read to a full one.
*
- * Currently only implemented for CPU reads, otherwise drops to normal
- * i915_gem_object_set_domain().
+ * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
+ * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/
-static int
-i915_gem_object_set_domain_range(struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size,
- uint32_t read_domains,
- uint32_t write_domain)
+static void
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
{
+ struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret, i;
- if (obj->read_domains & I915_GEM_DOMAIN_CPU)
- return 0;
+ if (!obj_priv->page_cpu_valid)
+ return;
+
+ /* If we're partially in the CPU read domain, finish moving it in.
+ */
+ if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
+ int i;
+
+ for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
+ if (obj_priv->page_cpu_valid[i])
+ continue;
+ drm_clflush_pages(obj_priv->page_list + i, 1);
+ }
+ drm_agp_chipset_flush(dev);
+ }
- if (read_domains != I915_GEM_DOMAIN_CPU ||
- write_domain != 0)
- return i915_gem_object_set_domain(obj,
- read_domains, write_domain);
+ /* Free the page_cpu_valid mappings which are now stale, whether
+ * or not we've got I915_GEM_DOMAIN_CPU.
+ */
+ drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
+ DRM_MEM_DRIVER);
+ obj_priv->page_cpu_valid = NULL;
+}
- /* Wait on any GPU rendering to the object to be flushed. */
+/**
+ * Set the CPU read domain on a range of the object.
+ *
+ * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
+ * not entirely valid. The page_cpu_valid member of the object flags which
+ * pages have been flushed, and will be respected by
+ * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
+ * of the whole object.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+static int
+i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ uint64_t offset, uint64_t size)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int i, ret;
+
+ if (offset == 0 && size == obj->size)
+ return i915_gem_object_set_to_cpu_domain(obj, 0);
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+ /* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
- if (ret)
+ if (ret != 0)
return ret;
+ i915_gem_object_flush_gtt_write_domain(obj);
+ /* If we're already fully in the CPU read domain, we're done. */
+ if (obj_priv->page_cpu_valid == NULL &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
+ return 0;
+
+ /* Otherwise, create/clear the per-page CPU read domain flag if we're
+ * newly adding I915_GEM_DOMAIN_CPU
+ */
if (obj_priv->page_cpu_valid == NULL) {
obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER);
- }
+ if (obj_priv->page_cpu_valid == NULL)
+ return -ENOMEM;
+ } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
/* Flush the cache on any pages that are still invalid from the CPU's
* perspective.
*/
- for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
+ for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
+ i++) {
if (obj_priv->page_cpu_valid[i])
continue;
@@ -1476,39 +2125,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
obj_priv->page_cpu_valid[i] = 1;
}
- return 0;
-}
-
-/**
- * Once all of the objects have been set in the proper domain,
- * perform the necessary flush and invalidate operations.
- *
- * Returns the write domains flushed, for use in flush tracking.
- */
-static uint32_t
-i915_gem_dev_set_domain(struct drm_device *dev)
-{
- uint32_t flush_domains = dev->flush_domains;
-
- /*
- * Now that all the buffers are synced to the proper domains,
- * flush and invalidate the collected domains
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
*/
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev,
- dev->invalidate_domains,
- dev->flush_domains);
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- }
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
- return flush_domains;
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
+
+ return 0;
}
/**
@@ -1589,6 +2213,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL;
}
+ if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
+ reloc.read_domains & I915_GEM_DOMAIN_CPU) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.read_domains,
+ reloc.write_domain);
+ return -EINVAL;
+ }
+
if (reloc.write_domain && target_obj->pending_write_domain &&
reloc.write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
@@ -1629,19 +2265,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
continue;
}
- /* Now that we're going to actually write some data in,
- * make sure that any rendering using this buffer's contents
- * is completed.
- */
- i915_gem_object_wait_rendering(obj);
-
- /* As we're writing through the gtt, flush
- * any CPU writes before we write the relocations
- */
- if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- obj->write_domain = 0;
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret != 0) {
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
}
/* Map the page containing the relocation we're going to
@@ -1783,6 +2411,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains;
+ int pin_tries;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -1831,14 +2460,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EBUSY;
}
- /* Zero the gloabl flush/invalidate flags. These
- * will be modified as each object is bound to the
- * gtt
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
-
- /* Look up object handles and perform the relocations */
+ /* Look up object handles */
for (i = 0; i < args->buffer_count; i++) {
object_list[i] = drm_gem_object_lookup(dev, file_priv,
exec_list[i].handle);
@@ -1848,17 +2470,41 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = -EBADF;
goto err;
}
+ }
- object_list[i]->pending_read_domains = 0;
- object_list[i]->pending_write_domain = 0;
- ret = i915_gem_object_pin_and_relocate(object_list[i],
- file_priv,
- &exec_list[i]);
- if (ret) {
- DRM_ERROR("object bind and relocate failed %d\n", ret);
+ /* Pin and relocate */
+ for (pin_tries = 0; ; pin_tries++) {
+ ret = 0;
+ for (i = 0; i < args->buffer_count; i++) {
+ object_list[i]->pending_read_domains = 0;
+ object_list[i]->pending_write_domain = 0;
+ ret = i915_gem_object_pin_and_relocate(object_list[i],
+ file_priv,
+ &exec_list[i]);
+ if (ret)
+ break;
+ pinned = i + 1;
+ }
+ /* success */
+ if (ret == 0)
+ break;
+
+ /* error other than GTT full, or we've already tried again */
+ if (ret != -ENOMEM || pin_tries >= 1) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to pin buffers %d\n", ret);
goto err;
}
- pinned = i + 1;
+
+ /* unpin all of our buffers */
+ for (i = 0; i < pinned; i++)
+ i915_gem_object_unpin(object_list[i]);
+ pinned = 0;
+
+ /* evict everyone we can from the aperture */
+ ret = i915_gem_evict_everything(dev);
+ if (ret)
+ goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
@@ -1868,32 +2514,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__);
+ /* Zero the global flush/invalidate flags. These
+ * will be modified as new domains are computed
+ * for each object
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- if (obj_priv->gtt_space == NULL) {
- /* We evicted the buffer in the process of validating
- * our set of buffers in. We could try to recover by
- * kicking them everything out and trying again from
- * the start.
- */
- ret = -ENOMEM;
- goto err;
- }
-
- /* make sure all previous memory operations have passed */
- ret = i915_gem_object_set_domain(obj,
- obj->pending_read_domains,
- obj->pending_write_domain);
- if (ret)
- goto err;
+ /* Compute new gpu domains and update invalidate/flush */
+ i915_gem_object_set_to_gpu_domain(obj,
+ obj->pending_read_domains,
+ obj->pending_write_domain);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
- /* Flush/invalidate caches and chipset buffer */
- flush_domains = i915_gem_dev_set_domain(dev);
+ if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+#endif
+ i915_gem_flush(dev,
+ dev->invalidate_domains,
+ dev->flush_domains);
+ if (dev->flush_domains)
+ (void)i915_add_request(dev, dev->flush_domains);
+ }
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -1913,8 +2564,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
~0);
#endif
- (void)i915_add_request(dev, flush_domains);
-
/* Exec the batchbuffer */
ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
if (ret) {
@@ -1942,10 +2591,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_file_priv->mm.last_gem_seqno = seqno;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- i915_gem_object_move_to_active(obj);
- obj_priv->last_rendering_seqno = seqno;
+ i915_gem_object_move_to_active(obj, seqno);
#if WATCH_LRU
DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif
@@ -1966,13 +2613,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
"back to user (%d)\n",
args->buffer_count, ret);
err:
- if (object_list != NULL) {
- for (i = 0; i < pinned; i++)
- i915_gem_object_unpin(object_list[i]);
+ for (i = 0; i < pinned; i++)
+ i915_gem_object_unpin(object_list[i]);
+
+ for (i = 0; i < args->buffer_count; i++)
+ drm_gem_object_unreference(object_list[i]);
- for (i = 0; i < args->buffer_count; i++)
- drm_gem_object_unreference(object_list[i]);
- }
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
@@ -1995,7 +2641,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment);
if (ret != 0) {
- DRM_ERROR("Failure to bind: %d", ret);
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failure to bind: %d", ret);
return ret;
}
}
@@ -2066,21 +2713,28 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
}
obj_priv = obj->driver_private;
- ret = i915_gem_object_pin(obj, args->alignment);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
+ if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
+ DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return -EINVAL;
+ }
+
+ obj_priv->user_pin_count++;
+ obj_priv->pin_filp = file_priv;
+ if (obj_priv->user_pin_count == 1) {
+ ret = i915_gem_object_pin(obj, args->alignment);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
}
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
- if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- obj->write_domain = 0;
- }
+ i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj_priv->gtt_offset;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -2094,6 +2748,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_pin *args = data;
struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
mutex_lock(&dev->struct_mutex);
@@ -2105,7 +2760,19 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
return -EBADF;
}
- i915_gem_object_unpin(obj);
+ obj_priv = obj->driver_private;
+ if (obj_priv->pin_filp != file_priv) {
+ DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+ obj_priv->user_pin_count--;
+ if (obj_priv->user_pin_count == 0) {
+ obj_priv->pin_filp = NULL;
+ i915_gem_object_unpin(obj);
+ }
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -2130,7 +2797,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
}
obj_priv = obj->driver_private;
- args->busy = obj_priv->active;
+ /* Don't count being on the flushing list against the object being
+ * done. Otherwise, a buffer left on the flushing list but not getting
+ * flushed (because nobody's flushing that domain) won't ever return
+ * unbusy and get reused by libdrm's bo cache. The other expected
+ * consumer of this interface, OpenGL's occlusion queries, also specs
+ * that the objects get unbusy "eventually" without any interference.
+ */
+ args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -2165,12 +2839,18 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj->driver_private = obj_priv;
obj_priv->obj = obj;
+ obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
+
return 0;
}
void i915_gem_free_object(struct drm_gem_object *obj)
{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list;
+ struct drm_map *map;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
while (obj_priv->pin_count > 0)
@@ -2178,31 +2858,22 @@ void i915_gem_free_object(struct drm_gem_object *obj)
i915_gem_object_unbind(obj);
- drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
- drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
-}
+ list = &obj->map_list;
+ drm_ht_remove_item(&mm->offset_hash, &list->hash);
-static int
-i915_gem_set_domain(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain)
-{
- struct drm_device *dev = obj->dev;
- int ret;
- uint32_t flush_domains;
-
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
- if (ret)
- return ret;
- flush_domains = i915_gem_dev_set_domain(obj->dev);
+ if (list->file_offset_node) {
+ drm_mm_put_block(list->file_offset_node);
+ list->file_offset_node = NULL;
+ }
- if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
- (void) i915_add_request(dev, flush_domains);
+ map = list->map;
+ if (map) {
+ drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
+ list->map = NULL;
+ }
- return 0;
+ drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
+ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
}
/** Unbinds all objects that are on the given buffer list. */
@@ -2269,8 +2940,7 @@ i915_gem_idle(struct drm_device *dev)
*/
i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
- seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT));
+ seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
if (seqno == 0) {
mutex_unlock(&dev->struct_mutex);
@@ -2299,29 +2969,52 @@ i915_gem_idle(struct drm_device *dev)
i915_gem_retire_requests(dev);
- /* Active and flushing should now be empty as we've
- * waited for a sequence higher than any pending execbuffer
- */
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ if (!dev_priv->mm.wedged) {
+ /* Active and flushing should now be empty as we've
+ * waited for a sequence higher than any pending execbuffer
+ */
+ WARN_ON(!list_empty(&dev_priv->mm.active_list));
+ WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
+ /* Request should now be empty as we've also waited
+ * for the last request in the list
+ */
+ WARN_ON(!list_empty(&dev_priv->mm.request_list));
+ }
- /* Request should now be empty as we've also waited
- * for the last request in the list
+ /* Empty the active and flushing lists to inactive. If there's
+ * anything left at this point, it means that we're wedged and
+ * nothing good's going to happen by leaving them there. So strip
+ * the GPU domains and just stuff them onto inactive.
*/
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ while (!list_empty(&dev_priv->mm.active_list)) {
+ struct drm_i915_gem_object *obj_priv;
- /* Move all buffers out of the GTT. */
+ obj_priv = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list);
+ obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj_priv->obj);
+ }
+
+ while (!list_empty(&dev_priv->mm.flushing_list)) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list);
+ obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj_priv->obj);
+ }
+
+
+ /* Move all inactive buffers out of the GTT. */
ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+ WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
-
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
@@ -2374,12 +3067,13 @@ i915_gem_init_hws(struct drm_device *dev)
return 0;
}
-static int
+int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
+ drm_i915_ring_buffer_t *ring = &dev_priv->ring;
int ret;
u32 head;
@@ -2401,24 +3095,24 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
}
/* Set up the kernel mapping for the ring. */
- dev_priv->ring.Size = obj->size;
- dev_priv->ring.tail_mask = obj->size - 1;
+ ring->Size = obj->size;
+ ring->tail_mask = obj->size - 1;
- dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
- dev_priv->ring.map.size = obj->size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
+ ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+ ring->map.size = obj->size;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
- drm_core_ioremap_wc(&dev_priv->ring.map, dev);
- if (dev_priv->ring.map.handle == NULL) {
+ drm_core_ioremap_wc(&ring->map, dev);
+ if (ring->map.handle == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
drm_gem_object_unreference(obj);
return -EINVAL;
}
- dev_priv->ring.ring_obj = obj;
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+ ring->ring_obj = obj;
+ ring->virtual_start = ring->map.handle;
/* Stop the ring if it's running. */
I915_WRITE(PRB0_CTL, 0);
@@ -2466,12 +3160,20 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
}
/* Update our cache of the ring state */
- i915_kernel_lost_context(dev);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kernel_lost_context(dev);
+ else {
+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->Size;
+ }
return 0;
}
-static void
+void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2509,6 +3211,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
if (dev_priv->mm.wedged) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
dev_priv->mm.wedged = 0;
@@ -2542,6 +3247,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
ret = i915_gem_idle(dev);
drm_irq_uninstall(dev);
@@ -2572,5 +3280,13 @@ i915_gem_load(struct drm_device *dev)
i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
+ /* Old X drivers will take 0-2 for front, back, depth buffers */
+ dev_priv->fence_reg_start = 3;
+
+ if (IS_I965G(dev))
+ dev_priv->num_fence_regs = 16;
+ else
+ dev_priv->num_fence_regs = 8;
+
i915_gem_detect_bit_6_swizzle(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 93de15b4c9a..4d1b9de0cd8 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
list_for_each_entry(gem_request, &dev_priv->mm.request_list,
list)
{
- DRM_PROC_PRINT(" %d @ %d %08x\n",
+ DRM_PROC_PRINT(" %d @ %d\n",
gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies),
- gem_request->flush_domains);
+ (int) (jiffies - gem_request->emitted_jiffies));
}
if (len > request + offset)
return request;
@@ -251,6 +250,39 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset,
return len - offset;
}
+static int i915_hws_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int len = 0, i;
+ volatile u32 *hws;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ hws = (volatile u32 *)dev_priv->hw_status_page;
+ if (hws == NULL) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
+ DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i * 4,
+ hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
static struct drm_proc_list {
/** file name */
const char *name;
@@ -263,6 +295,7 @@ static struct drm_proc_list {
{"i915_gem_request", i915_gem_request_info},
{"i915_gem_seqno", i915_gem_seqno_info},
{"i915_gem_interrupt", i915_interrupt_info},
+ {"i915_gem_hws", i915_hws_info},
};
#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index e8b85ac4ca0..241f39b7f46 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dcc & DCC_CHANNEL_XOR_DISABLE) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_I965GM(dev) || IS_GM45(dev)) {
- /* GM965 only does bit 11-based channel
- * randomization
+ } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
+ (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+ /* GM965/GM45 does either bit 11 or bit 17
+ * swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
@@ -207,6 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
}
obj_priv->tiling_mode = args->tiling_mode;
+ obj_priv->stride = args->stride;
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 82752d6177a..0cadafbef41 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -30,14 +30,36 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "intel_drv.h"
#define MAX_NOPID ((u32)~0)
-/** These are the interrupts used by the driver */
-#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
- I915_ASLE_INTERRUPT | \
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+/**
+ * Interrupts that are always left unmasked.
+ *
+ * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
+ * we leave them always unmasked in IMR and then control enabling them through
+ * PIPESTAT alone.
+ */
+#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+
+/** Interrupts that we mask and unmask at runtime. */
+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
+
+/** These are all of the interrupts used by the driver */
+#define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \
+ I915_INTERRUPT_ENABLE_VAR)
+
+#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
+ PIPE_VBLANK_INTERRUPT_STATUS)
+
+#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
+ PIPE_VBLANK_INTERRUPT_ENABLE)
+
+#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
+ DRM_I915_VBLANK_PIPE_B)
void
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -59,6 +81,41 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
+static inline u32
+i915_pipestat(int pipe)
+{
+ if (pipe == 0)
+ return PIPEASTAT;
+ if (pipe == 1)
+ return PIPEBSTAT;
+ BUG();
+}
+
+void
+i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != mask) {
+ u32 reg = i915_pipestat(pipe);
+
+ dev_priv->pipestat[pipe] |= mask;
+ /* Enable the interrupt, clear any pending status */
+ I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
+ (void) I915_READ(reg);
+ }
+}
+
+void
+i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+{
+ if ((dev_priv->pipestat[pipe] & mask) != 0) {
+ u32 reg = i915_pipestat(pipe);
+
+ dev_priv->pipestat[pipe] &= ~mask;
+ I915_WRITE(reg, dev_priv->pipestat[pipe]);
+ (void) I915_READ(reg);
+ }
+}
+
/**
* i915_pipe_enabled - check if a pipe is enabled
* @dev: DRM device
@@ -121,85 +178,113 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 iir;
+ struct drm_i915_master_private *master_priv;
+ u32 iir, new_iir;
u32 pipea_stats, pipeb_stats;
+ u32 vblank_status;
+ u32 vblank_enable;
int vblank = 0;
+ unsigned long irqflags;
+ int irq_received;
+ int ret = IRQ_NONE;
atomic_inc(&dev_priv->irq_received);
- if (dev->pdev->msi_enabled)
- I915_WRITE(IMR, ~0);
iir = I915_READ(IIR);
- if (iir == 0) {
- if (dev->pdev->msi_enabled) {
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
- }
- return IRQ_NONE;
+ if (IS_I965G(dev)) {
+ vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
+ vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
+ } else {
+ vblank_status = I915_VBLANK_INTERRUPT_STATUS;
+ vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
}
- /*
- * Clear the PIPE(A|B)STAT regs before the IIR otherwise
- * we may get extra interrupts.
- */
- if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
+ for (;;) {
+ irq_received = iir != 0;
+
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
pipea_stats = I915_READ(PIPEASTAT);
- if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
- pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
- PIPE_VBLANK_INTERRUPT_ENABLE);
- else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
- PIPE_VBLANK_INTERRUPT_STATUS)) {
+ pipeb_stats = I915_READ(PIPEBSTAT);
+
+ /*
+ * Clear the PIPE(A|B)STAT regs before the IIR
+ */
+ if (pipea_stats & 0x8000ffff) {
+ I915_WRITE(PIPEASTAT, pipea_stats);
+ irq_received = 1;
+ }
+
+ if (pipeb_stats & 0x8000ffff) {
+ I915_WRITE(PIPEBSTAT, pipeb_stats);
+ irq_received = 1;
+ }
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ ret = IRQ_HANDLED;
+
+ I915_WRITE(IIR, iir);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+
+ if (iir & I915_USER_INTERRUPT) {
+ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ }
+
+ if (pipea_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 0);
}
- I915_WRITE(PIPEASTAT, pipea_stats);
- }
- if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
- pipeb_stats = I915_READ(PIPEBSTAT);
- /* Ack the event */
- I915_WRITE(PIPEBSTAT, pipeb_stats);
-
- /* The vblank interrupt gets enabled even if we didn't ask for
- it, so make sure it's shut down again */
- if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
- pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
- PIPE_VBLANK_INTERRUPT_ENABLE);
- else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
- PIPE_VBLANK_INTERRUPT_STATUS)) {
+ if (pipeb_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 1);
}
- if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
+ if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ (iir & I915_ASLE_INTERRUPT))
opregion_asle_intr(dev);
- I915_WRITE(PIPEBSTAT, pipeb_stats);
- }
-
- I915_WRITE(IIR, iir);
- if (dev->pdev->msi_enabled)
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IIR); /* Flush posted writes */
-
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- if (iir & I915_USER_INTERRUPT) {
- dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
- DRM_WAKEUP(&dev_priv->irq_queue);
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ iir = new_iir;
}
- if (iir & I915_ASLE_INTERRUPT)
- opregion_asle_intr(dev);
-
- return IRQ_HANDLED;
+ return ret;
}
static int i915_emit_irq(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
RING_LOCALS;
i915_kernel_lost_context(dev);
@@ -209,8 +294,8 @@ static int i915_emit_irq(struct drm_device * dev)
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 1;
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter;
BEGIN_LP_RING(4);
OUT_RING(MI_STORE_DWORD_INDEX);
@@ -248,21 +333,20 @@ void i915_user_irq_put(struct drm_device *dev)
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
- if (dev_priv->sarea_priv) {
- dev_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return 0;
}
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
i915_user_irq_get(dev);
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
@@ -274,10 +358,6 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
}
- if (dev_priv->sarea_priv)
- dev_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
-
return ret;
}
@@ -330,48 +410,16 @@ int i915_irq_wait(struct drm_device *dev, void *data,
int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 pipestat_reg = 0;
- u32 pipestat;
- u32 interrupt = 0;
unsigned long irqflags;
- switch (pipe) {
- case 0:
- pipestat_reg = PIPEASTAT;
- interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
- break;
- case 1:
- pipestat_reg = PIPEBSTAT;
- interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
- break;
- default:
- DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
- pipe);
- return 0;
- }
-
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- /* Enabling vblank events in IMR comes before PIPESTAT write, or
- * there's a race where the PIPESTAT vblank bit gets set to 1, so
- * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in
- * ISR flashes to 1, but the IIR bit doesn't get set to 1 because
- * IMR masks it. It doesn't ever get set after we clear the masking
- * in IMR because the ISR bit is edge, not level-triggered, on the
- * OR of PIPESTAT bits.
- */
- i915_enable_irq(dev_priv, interrupt);
- pipestat = I915_READ(pipestat_reg);
if (IS_I965G(dev))
- pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
else
- pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
- /* Clear any stale interrupt status */
- pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
- PIPE_VBLANK_INTERRUPT_STATUS);
- I915_WRITE(pipestat_reg, pipestat);
- (void) I915_READ(pipestat_reg); /* Posting read */
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-
return 0;
}
@@ -381,40 +429,23 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
void i915_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 pipestat_reg = 0;
- u32 pipestat;
- u32 interrupt = 0;
unsigned long irqflags;
- switch (pipe) {
- case 0:
- pipestat_reg = PIPEASTAT;
- interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
- break;
- case 1:
- pipestat_reg = PIPEBSTAT;
- interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
- break;
- default:
- DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
- pipe);
- return;
- break;
- }
-
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_disable_irq(dev_priv, interrupt);
- pipestat = I915_READ(pipestat_reg);
- pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
- PIPE_VBLANK_INTERRUPT_ENABLE);
- /* Clear any stale interrupt status */
- pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
- PIPE_VBLANK_INTERRUPT_STATUS);
- I915_WRITE(pipestat_reg, pipestat);
- (void) I915_READ(pipestat_reg); /* Posting read */
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE |
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
+void i915_enable_interrupt (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ opregion_enable_asle(dev);
+ dev_priv->irq_enabled = 1;
+}
+
+
/* Set the vblank monitor pipe
*/
int i915_vblank_pipe_set(struct drm_device *dev, void *data,
@@ -475,33 +506,38 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ atomic_set(&dev_priv->irq_received, 0);
+
I915_WRITE(HWSTAM, 0xeffe);
+ I915_WRITE(PIPEASTAT, 0);
+ I915_WRITE(PIPEBSTAT, 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
+ (void) I915_READ(IER);
}
int i915_driver_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int ret, num_pipes = 2;
-
- /* Set initial unmasked IRQs to just the selected vblank pipes. */
- dev_priv->irq_mask_reg = ~0;
-
- ret = drm_vblank_init(dev, num_pipes);
- if (ret)
- return ret;
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ /* Disable pipe interrupt enables, clear pending pipe status */
+ I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
+ I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
+ /* Clear pending interrupt status */
+ I915_WRITE(IIR, I915_READ(IIR));
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
(void) I915_READ(IER);
opregion_enable_asle(dev);
@@ -513,7 +549,6 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 temp;
if (!dev_priv)
return;
@@ -521,13 +556,12 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
dev_priv->vblank_pipe = 0;
I915_WRITE(HWSTAM, 0xffffffff);
+ I915_WRITE(PIPEASTAT, 0);
+ I915_WRITE(PIPEBSTAT, 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
- temp = I915_READ(PIPEASTAT);
- I915_WRITE(PIPEASTAT, temp);
- temp = I915_READ(PIPEBSTAT);
- I915_WRITE(PIPEBSTAT, temp);
- temp = I915_READ(IIR);
- I915_WRITE(IIR, temp);
+ I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
+ I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
+ I915_WRITE(IIR, I915_READ(IIR));
}
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
index 6126a60dc9c..96e271986d2 100644
--- a/drivers/gpu/drm/i915/i915_mem.c
+++ b/drivers/gpu/drm/i915/i915_mem.c
@@ -46,7 +46,8 @@
static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv;
struct drm_tex_region *list;
unsigned shift, nr;
unsigned start;
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 1787a0c7e3a..ff012835a38 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -235,17 +235,15 @@ void opregion_enable_asle(struct drm_device *dev)
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
if (IS_MOBILE(dev)) {
- /* Many devices trigger events with a write to the
- legacy backlight controller, so we need to ensure
- that it's able to generate interrupts */
- I915_WRITE(PIPEBSTAT, pipeb_stats |=
- I915_LEGACY_BLC_EVENT_ENABLE);
- i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
- } else
- i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, 1,
+ I915_LEGACY_BLC_EVENT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock,
+ irqflags);
+ }
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
@@ -259,8 +257,8 @@ void opregion_enable_asle(struct drm_device *dev)
static struct intel_opregion *system_opregion;
-int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
- void *data)
+static int intel_opregion_video_event(struct notifier_block *nb,
+ unsigned long val, void *data)
{
/* The only video events relevant to opregion are 0x80. These indicate
either a docking event, lid switch or display switch request. In
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0e476eba36e..47e6bafeb74 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -175,9 +175,26 @@
#define DISPLAY_PLANE_B (1<<20)
/*
- * Instruction and interrupt control regs
+ * Fence registers
*/
+#define FENCE_REG_830_0 0x2000
+#define I830_FENCE_START_MASK 0x07f80000
+#define I830_FENCE_TILING_Y_SHIFT 12
+#define I830_FENCE_SIZE_BITS(size) ((get_order(size >> 19) - 1) << 8)
+#define I830_FENCE_PITCH_SHIFT 4
+#define I830_FENCE_REG_VALID (1<<0)
+
+#define I915_FENCE_START_MASK 0x0ff00000
+#define I915_FENCE_SIZE_BITS(size) ((get_order(size >> 20) - 1) << 8)
+#define FENCE_REG_965_0 0x03000
+#define I965_FENCE_PITCH_SHIFT 2
+#define I965_FENCE_TILING_Y_SHIFT 1
+#define I965_FENCE_REG_VALID (1<<0)
+
+/*
+ * Instruction and interrupt control regs
+ */
#define PRB0_TAIL 0x02030
#define PRB0_HEAD 0x02034
#define PRB0_START 0x02038
@@ -245,6 +262,7 @@
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
+
/*
* Framebuffer compression (915+ only)
*/
@@ -522,6 +540,7 @@
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
/** 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5ddc6e595c0..5d84027ee8f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -244,6 +244,9 @@ int i915_save_state(struct drm_device *dev)
if (IS_I965G(dev) && IS_MOBILE(dev))
dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
+ /* Hardware status page */
+ dev_priv->saveHWS = I915_READ(HWS_PGA);
+
/* Display arbitration control */
dev_priv->saveDSPARB = I915_READ(DSPARB);
@@ -373,6 +376,9 @@ int i915_restore_state(struct drm_device *dev)
if (IS_I965G(dev) && IS_MOBILE(dev))
I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
+ /* Hardware status page */
+ I915_WRITE(HWS_PGA, dev_priv->saveHWS);
+
/* Display arbitration */
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
new file mode 100644
index 00000000000..4ca82a02552
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_bios.h"
+
+
+static void *
+find_section(struct bdb_header *bdb, int section_id)
+{
+ u8 *base = (u8 *)bdb;
+ int index = 0;
+ u16 total, current_size;
+ u8 current_id;
+
+ /* skip to first section */
+ index += bdb->header_size;
+ total = bdb->bdb_size;
+
+ /* walk the sections looking for section_id */
+ while (index < total) {
+ current_id = *(base + index);
+ index++;
+ current_size = *((u16 *)(base + index));
+ index += 2;
+ if (current_id == section_id)
+ return base + index;
+ index += current_size;
+ }
+
+ return NULL;
+}
+
+/* Try to find panel data */
+static void
+parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+ struct bdb_lvds_options *lvds_options;
+ struct bdb_lvds_lfp_data *lvds_lfp_data;
+ struct bdb_lvds_lfp_data_entry *entry;
+ struct lvds_dvo_timing *dvo_timing;
+ struct drm_display_mode *panel_fixed_mode;
+
+ /* Defaults if we can't find VBT info */
+ dev_priv->lvds_dither = 0;
+ dev_priv->lvds_vbt = 0;
+
+ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return;
+
+ dev_priv->lvds_dither = lvds_options->pixel_dither;
+ if (lvds_options->panel_type == 0xff)
+ return;
+
+ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!lvds_lfp_data)
+ return;
+
+ dev_priv->lvds_vbt = 1;
+
+ entry = &lvds_lfp_data->data[lvds_options->panel_type];
+ dvo_timing = &entry->dvo_timing;
+
+ panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
+ DRM_MEM_DRIVER);
+
+ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+ dvo_timing->hactive_lo;
+ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+ dvo_timing->hsync_pulse_width;
+ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+ dvo_timing->vactive_lo;
+ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+ dvo_timing->vsync_off;
+ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+ dvo_timing->vsync_pulse_width;
+ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+ panel_fixed_mode->clock = dvo_timing->clock * 10;
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(panel_fixed_mode);
+
+ dev_priv->vbt_mode = panel_fixed_mode;
+
+ DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+
+ return;
+}
+
+static void
+parse_general_features(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_features *general;
+
+ /* Set sensible defaults in case we can't find the general block */
+ dev_priv->int_tv_support = 1;
+ dev_priv->int_crt_support = 1;
+
+ general = find_section(bdb, BDB_GENERAL_FEATURES);
+ if (general) {
+ dev_priv->int_tv_support = general->int_tv_support;
+ dev_priv->int_crt_support = general->int_crt_support;
+ }
+}
+
+/**
+ * intel_init_bios - initialize VBIOS settings & find VBT
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
+ * to appropriate values.
+ *
+ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
+ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
+ * feed an updated VBT back through that, compared to what we'll fetch using
+ * this method of groping around in the BIOS data.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+bool
+intel_init_bios(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ struct vbt_header *vbt = NULL;
+ struct bdb_header *bdb;
+ u8 __iomem *bios;
+ size_t size;
+ int i;
+
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
+ }
+
+ if (!vbt) {
+ DRM_ERROR("VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+
+ /* Grab useful general definitions */
+ parse_general_features(dev_priv, bdb);
+ parse_panel_data(dev_priv, bdb);
+
+ pci_unmap_rom(pdev, bios);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
new file mode 100644
index 00000000000..5ea715ace3a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -0,0 +1,405 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _I830_BIOS_H_
+#define _I830_BIOS_H_
+
+#include "drmP.h"
+
+struct vbt_header {
+ u8 signature[20]; /**< Always starts with 'VBT$' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 vbt_size; /**< in bytes */
+ u8 vbt_checksum;
+ u8 reserved0;
+ u32 bdb_offset; /**< from beginning of VBT */
+ u32 aim_offset[4]; /**< from beginning of VBT */
+} __attribute__((packed));
+
+struct bdb_header {
+ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
+ u16 version; /**< decimal */
+ u16 header_size; /**< in bytes */
+ u16 bdb_size; /**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+ u8 type; /* 0 == desktop, 1 == mobile */
+ u8 relstage;
+ u8 chipset;
+ u8 lvds_present:1;
+ u8 tv_present:1;
+ u8 rsvd2:6; /* finish byte */
+ u8 rsvd3[4];
+ u8 signon[155];
+ u8 copyright[61];
+ u16 code_segment;
+ u8 dos_boot_mode;
+ u8 bandwidth_percent;
+ u8 rsvd4; /* popup memory size */
+ u8 resize_pci_bios;
+ u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES 1
+#define BDB_GENERAL_DEFINITIONS 2
+#define BDB_OLD_TOGGLE_LIST 3
+#define BDB_MODE_SUPPORT_LIST 4
+#define BDB_GENERIC_MODE_TABLE 5
+#define BDB_EXT_MMIO_REGS 6
+#define BDB_SWF_IO 7
+#define BDB_SWF_MMIO 8
+#define BDB_DOT_CLOCK_TABLE 9
+#define BDB_MODE_REMOVAL_TABLE 10
+#define BDB_CHILD_DEVICE_TABLE 11
+#define BDB_DRIVER_FEATURES 12
+#define BDB_DRIVER_PERSISTENCE 13
+#define BDB_EXT_TABLE_PTRS 14
+#define BDB_DOT_CLOCK_OVERRIDE 15
+#define BDB_DISPLAY_SELECT 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION 18
+#define BDB_DISPLAY_REMOVE 19
+#define BDB_OEM_CUSTOM 20
+#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS 22
+#define BDB_SDVO_PANEL_DTDS 23
+#define BDB_SDVO_LVDS_PNP_IDS 24
+#define BDB_SDVO_LVDS_POWER_SEQ 25
+#define BDB_TV_OPTIONS 26
+#define BDB_LVDS_OPTIONS 40
+#define BDB_LVDS_LFP_DATA_PTRS 41
+#define BDB_LVDS_LFP_DATA 42
+#define BDB_LVDS_BACKLIGHT 43
+#define BDB_LVDS_POWER 44
+#define BDB_SKIP 254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+ /* bits 1 */
+ u8 panel_fitting:2;
+ u8 flexaim:1;
+ u8 msg_enable:1;
+ u8 clear_screen:3;
+ u8 color_flip:1;
+
+ /* bits 2 */
+ u8 download_ext_vbt:1;
+ u8 enable_ssc:1;
+ u8 ssc_freq:1;
+ u8 enable_lfp_on_override:1;
+ u8 disable_ssc_ddt:1;
+ u8 rsvd8:3; /* finish byte */
+
+ /* bits 3 */
+ u8 disable_smooth_vision:1;
+ u8 single_dvi:1;
+ u8 rsvd9:6; /* finish byte */
+
+ /* bits 4 */
+ u8 legacy_monitor_detect;
+
+ /* bits 5 */
+ u8 int_crt_support:1;
+ u8 int_tv_support:1;
+ u8 rsvd11:6; /* finish byte */
+} __attribute__((packed));
+
+struct bdb_general_definitions {
+ /* DDC GPIO */
+ u8 crt_ddc_gmbus_pin;
+
+ /* DPMS bits */
+ u8 dpms_acpi:1;
+ u8 skip_boot_crt_detect:1;
+ u8 dpms_aim:1;
+ u8 rsvd1:5; /* finish byte */
+
+ /* boot device bits */
+ u8 boot_display[2];
+ u8 child_dev_size;
+
+ /* device info */
+ u8 tv_or_lvds_info[33];
+ u8 dev1[33];
+ u8 dev2[33];
+ u8 dev3[33];
+ u8 dev4[33];
+ /* may be another device block here on some platforms */
+};
+
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 rsvd1;
+ /* LVDS capabilities, stored in a dword */
+ u8 rsvd2:1;
+ u8 lvds_edid:1;
+ u8 pixel_dither:1;
+ u8 pfit_ratio_auto:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_mode:2;
+ u8 rsvd4;
+} __attribute__((packed));
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __attribute__((packed));
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width;
+ u8 vsync_pulse_width:4;
+ u8 vsync_off:4;
+ u8 rsvd0:6;
+ u8 hsync_off_hi:2;
+ u8 h_image;
+ u8 v_image;
+ u8 max_hv;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 rsvd2:1;
+} __attribute__((packed));
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data {
+ struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+
+struct aimdb_header {
+ char signature[16];
+ char oem_device[20];
+ u16 aimdb_version;
+ u16 aimdb_header_size;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct aimdb_block {
+ u8 aimdb_id;
+ u16 aimdb_size;
+} __attribute__((packed));
+
+struct vch_panel_data {
+ u16 fp_timing_offset;
+ u8 fp_timing_size;
+ u16 dvo_timing_offset;
+ u8 dvo_timing_size;
+ u16 text_fitting_offset;
+ u8 text_fitting_size;
+ u16 graphics_fitting_offset;
+ u8 graphics_fitting_size;
+} __attribute__((packed));
+
+struct vch_bdb_22 {
+ struct aimdb_block aimdb_block;
+ struct vch_panel_data panels[16];
+} __attribute__((packed));
+
+bool intel_init_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
+#define GR18_HK_NONE (0x0<<3)
+#define GR18_HK_LFP_STRETCH (0x1<<3)
+#define GR18_HK_TOGGLE_DISP (0x2<<3)
+#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define GR18_HK_POPUP_DISABLED (0x6<<3)
+#define GR18_HK_POPUP_ENABLED (0x7<<3)
+#define GR18_HK_PFIT (0x8<<3)
+#define GR18_HK_APM_CHANGE (0xa<<3)
+#define GR18_HK_MULTIPLE (0xc<<3)
+#define GR18_USER_INT_EN (1<<2)
+#define GR18_A0000_FLUSH_EN (1<<1)
+#define GR18_SMM_EN (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT 16
+#define SWF00_XRES_SHIFT 0
+#define SWF00_RES_MASK 0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK 0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN (1<<28)
+#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define SWF10_OLD_TOGGLE 0x0
+#define SWF10_TOGGLE_LIST_1 0x1
+#define SWF10_TOGGLE_LIST_2 0x2
+#define SWF10_TOGGLE_LIST_3 0x3
+#define SWF10_TOGGLE_LIST_4 0x4
+#define SWF10_PANNING_EN (1<<23)
+#define SWF10_DRIVER_LOADED (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE (1<<20)
+#define SWF10_OVERLAY_EN (1<<19)
+#define SWF10_PLANEB_HOLDOFF (1<<18)
+#define SWF10_PLANEA_HOLDOFF (1<<17)
+#define SWF10_VGA_HOLDOFF (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define SWF10_PIPEB_LFP2 (1<<15)
+#define SWF10_PIPEB_EFP2 (1<<14)
+#define SWF10_PIPEB_TV2 (1<<13)
+#define SWF10_PIPEB_CRT2 (1<<12)
+#define SWF10_PIPEB_LFP (1<<11)
+#define SWF10_PIPEB_EFP (1<<10)
+#define SWF10_PIPEB_TV (1<<9)
+#define SWF10_PIPEB_CRT (1<<8)
+#define SWF10_PIPEA_LFP2 (1<<7)
+#define SWF10_PIPEA_EFP2 (1<<6)
+#define SWF10_PIPEA_TV2 (1<<5)
+#define SWF10_PIPEA_CRT2 (1<<4)
+#define SWF10_PIPEA_LFP (1<<3)
+#define SWF10_PIPEA_EFP (1<<2)
+#define SWF10_PIPEA_TV (1<<1)
+#define SWF10_PIPEA_CRT (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT 16
+#define SWF11_SV_TEST_EN (1<<15)
+#define SWF11_IS_AGP (1<<14)
+#define SWF11_DISPLAY_HOLDOFF (1<<13)
+#define SWF11_DPMS_REDUCED (1<<12)
+#define SWF11_IS_VBE_MODE (1<<11)
+#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK 0x07
+#define SWF11_DPMS_OFF (1<<2)
+#define SWF11_DPMS_SUSPEND (1<<1)
+#define SWF11_DPMS_STANDBY (1<<0)
+#define SWF11_DPMS_ON 0
+
+#define SWF14_GFX_PFIT_EN (1<<31)
+#define SWF14_TEXT_PFIT_EN (1<<30)
+#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN (1<<28)
+#define SWF14_DISPLAY_HOLDOFF (1<<27)
+#define SWF14_DISP_DETECT_EN (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS (1<<24)
+#define SWF14_OS_TYPE_WIN9X (1<<23)
+#define SWF14_OS_TYPE_WINNT (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK 0x00070000
+#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
+#define SWF14_PM_ACPI (0x3 << 16)
+#define SWF14_PM_APM_12 (0x2 << 16)
+#define SWF14_PM_APM_11 (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
+ /* if GR18 indicates a display switch */
+#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define SWF14_DS_PIPEB_TV2_EN (1<<13)
+#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define SWF14_DS_PIPEB_LFP_EN (1<<11)
+#define SWF14_DS_PIPEB_EFP_EN (1<<10)
+#define SWF14_DS_PIPEB_TV_EN (1<<9)
+#define SWF14_DS_PIPEB_CRT_EN (1<<8)
+#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define SWF14_DS_PIPEA_TV2_EN (1<<5)
+#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define SWF14_DS_PIPEA_LFP_EN (1<<3)
+#define SWF14_DS_PIPEA_EFP_EN (1<<2)
+#define SWF14_DS_PIPEA_TV_EN (1<<1)
+#define SWF14_DS_PIPEA_CRT_EN (1<<0)
+ /* if GR18 indicates a panel fitting request */
+#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
+ /* if GR18 indicates an APM change request */
+#define SWF14_APM_HIBERNATE 0x4
+#define SWF14_APM_SUSPEND 0x3
+#define SWF14_APM_STANDBY 0x1
+#define SWF14_APM_RESTORE 0x0
+
+#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
new file mode 100644
index 00000000000..dcaed3466e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 temp;
+
+ temp = I915_READ(ADPA);
+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch(mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ }
+
+ I915_WRITE(ADPA, temp);
+}
+
+static int intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (mode->clock > 400000 || mode->clock < 25000)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void intel_crt_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int dpll_md_reg;
+ u32 adpa, dpll_md;
+
+ if (intel_crtc->pipe == 0)
+ dpll_md_reg = DPLL_A_MD;
+ else
+ dpll_md_reg = DPLL_B_MD;
+
+ /*
+ * Disable separate mode multiplier used when cloning SDVO to CRT
+ * XXX this needs to be adjusted when we really are cloning
+ */
+ if (IS_I965G(dev)) {
+ dpll_md = I915_READ(dpll_md_reg);
+ I915_WRITE(dpll_md_reg,
+ dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+ }
+
+ adpa = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+ if (intel_crtc->pipe == 0)
+ adpa |= ADPA_PIPE_A_SELECT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
+
+ I915_WRITE(ADPA, adpa);
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * Not for i915G/i915GM
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 temp;
+
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ temp = I915_READ(PORT_HOTPLUG_EN);
+
+ I915_WRITE(PORT_HOTPLUG_EN,
+ temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
+
+ do {
+ if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
+ break;
+ msleep(1);
+ } while (time_after(timeout, jiffies));
+
+ if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
+ CRT_HOTPLUG_MONITOR_COLOR)
+ return true;
+
+ return false;
+}
+
+static bool intel_crt_detect_ddc(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ /* CRT should always be at 0, but check anyway */
+ if (intel_output->type != INTEL_OUTPUT_ANALOG)
+ return false;
+
+ return intel_ddc_probe(intel_output);
+}
+
+static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+ if (intel_crt_detect_hotplug(connector))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+ }
+
+ if (intel_crt_detect_ddc(connector))
+ return connector_status_connected;
+
+ /* TODO use load detect */
+ return connector_status_unknown;
+}
+
+static void intel_crt_destroy(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ intel_i2c_destroy(intel_output->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static int intel_crt_get_modes(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ return intel_ddc_get_modes(intel_output);
+}
+
+static int intel_crt_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_device *dev = connector->dev;
+
+ if (property == dev->mode_config.dpms_property && connector->encoder)
+ intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf));
+
+ return 0;
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
+ .dpms = intel_crt_dpms,
+ .mode_fixup = intel_crt_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .commit = intel_encoder_commit,
+ .mode_set = intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs intel_crt_connector_funcs = {
+ .detect = intel_crt_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = intel_crt_destroy,
+ .set_property = intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
+ .mode_valid = intel_crt_mode_valid,
+ .get_modes = intel_crt_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_crt_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+ .destroy = intel_crt_enc_destroy,
+};
+
+void intel_crt_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct intel_output *intel_output;
+
+ intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+ if (!intel_output)
+ return;
+
+ connector = &intel_output->base;
+ drm_connector_init(dev, &intel_output->base,
+ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+
+ drm_mode_connector_attach_encoder(&intel_output->base,
+ &intel_output->enc);
+
+ /* Set up the DDC bus. */
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
+ if (!intel_output->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+ "failed.\n");
+ return;
+ }
+
+ intel_output->type = INTEL_OUTPUT_ANALOG;
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
+ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+ drm_sysfs_connector_add(connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
new file mode 100644
index 00000000000..e5c1c80d1f9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -0,0 +1,1618 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#include "drm_crtc_helper.h"
+
+bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
+
+typedef struct {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+} intel_clock_t;
+
+typedef struct {
+ int min, max;
+} intel_range_t;
+
+typedef struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+} intel_p2_t;
+
+#define INTEL_P2_NUM 2
+
+typedef struct {
+ intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ intel_p2_t p2;
+} intel_limit_t;
+
+#define I8XX_DOT_MIN 25000
+#define I8XX_DOT_MAX 350000
+#define I8XX_VCO_MIN 930000
+#define I8XX_VCO_MAX 1400000
+#define I8XX_N_MIN 3
+#define I8XX_N_MAX 16
+#define I8XX_M_MIN 96
+#define I8XX_M_MAX 140
+#define I8XX_M1_MIN 18
+#define I8XX_M1_MAX 26
+#define I8XX_M2_MIN 6
+#define I8XX_M2_MAX 16
+#define I8XX_P_MIN 4
+#define I8XX_P_MAX 128
+#define I8XX_P1_MIN 2
+#define I8XX_P1_MAX 33
+#define I8XX_P1_LVDS_MIN 1
+#define I8XX_P1_LVDS_MAX 6
+#define I8XX_P2_SLOW 4
+#define I8XX_P2_FAST 2
+#define I8XX_P2_LVDS_SLOW 14
+#define I8XX_P2_LVDS_FAST 14 /* No fast option */
+#define I8XX_P2_SLOW_LIMIT 165000
+
+#define I9XX_DOT_MIN 20000
+#define I9XX_DOT_MAX 400000
+#define I9XX_VCO_MIN 1400000
+#define I9XX_VCO_MAX 2800000
+#define I9XX_N_MIN 3
+#define I9XX_N_MAX 8
+#define I9XX_M_MIN 70
+#define I9XX_M_MAX 120
+#define I9XX_M1_MIN 10
+#define I9XX_M1_MAX 20
+#define I9XX_M2_MIN 5
+#define I9XX_M2_MAX 9
+#define I9XX_P_SDVO_DAC_MIN 5
+#define I9XX_P_SDVO_DAC_MAX 80
+#define I9XX_P_LVDS_MIN 7
+#define I9XX_P_LVDS_MAX 98
+#define I9XX_P1_MIN 1
+#define I9XX_P1_MAX 8
+#define I9XX_P2_SDVO_DAC_SLOW 10
+#define I9XX_P2_SDVO_DAC_FAST 5
+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
+#define I9XX_P2_LVDS_SLOW 14
+#define I9XX_P2_LVDS_FAST 7
+#define I9XX_P2_LVDS_SLOW_LIMIT 112000
+
+#define INTEL_LIMIT_I8XX_DVO_DAC 0
+#define INTEL_LIMIT_I8XX_LVDS 1
+#define INTEL_LIMIT_I9XX_SDVO_DAC 2
+#define INTEL_LIMIT_I9XX_LVDS 3
+
+static const intel_limit_t intel_limits[] = {
+ { /* INTEL_LIMIT_I8XX_DVO_DAC */
+ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
+ .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
+ .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
+ .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
+ .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
+ .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
+ .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
+ .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
+ .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
+ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
+ },
+ { /* INTEL_LIMIT_I8XX_LVDS */
+ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
+ .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
+ .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
+ .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
+ .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
+ .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
+ .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
+ .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
+ .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
+ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
+ },
+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
+ .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
+ .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
+ .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
+ .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
+ .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
+ .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
+ .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
+ },
+ { /* INTEL_LIMIT_I9XX_LVDS */
+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
+ .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
+ .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
+ .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
+ .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
+ .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
+ .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX },
+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
+ /* The single-channel range is 25-112Mhz, and dual-channel
+ * is 80-224Mhz. Prefer single channel as much as possible.
+ */
+ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
+ },
+};
+
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ const intel_limit_t *limit;
+
+ if (IS_I9XX(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
+ else
+ limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ } else {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
+ else
+ limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
+ }
+ return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+static void i8xx_clock(int refclk, intel_clock_t *clock)
+{
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
+
+static void i9xx_clock(int refclk, intel_clock_t *clock)
+{
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+static void intel_clock(struct drm_device *dev, int refclk,
+ intel_clock_t *clock)
+{
+ if (IS_I9XX(dev))
+ i9xx_clock (refclk, clock);
+ else
+ i8xx_clock (refclk, clock);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *l_entry;
+
+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+ if (l_entry->encoder &&
+ l_entry->encoder->crtc == crtc) {
+ struct intel_output *intel_output = to_intel_output(l_entry);
+ if (intel_output->type == type)
+ return true;
+ }
+ }
+ return false;
+}
+
+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+{
+ const intel_limit_t *limit = intel_limit (crtc);
+
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ INTELPllInvalid ("p1 out of range\n");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid ("p out of range\n");
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ INTELPllInvalid ("m2 out of range\n");
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ INTELPllInvalid ("m1 out of range\n");
+ if (clock->m1 <= clock->m2)
+ INTELPllInvalid ("m1 <= m2\n");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ INTELPllInvalid ("m out of range\n");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ INTELPllInvalid ("n out of range\n");
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ INTELPllInvalid ("vco out of range\n");
+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+ * connector, etc., rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ INTELPllInvalid ("dot out of range\n");
+
+ return true;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
+ int refclk, intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_clock_t clock;
+ const intel_limit_t *limit = intel_limit(crtc);
+ int err = target;
+
+ if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ (I915_READ(LVDS) & LVDS_PORT_EN) != 0) {
+ /*
+ * For LVDS, if the panel is on, just rely on its current
+ * settings for dual-channel. We haven't figured out how to
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset (best_clock, 0, sizeof (*best_clock));
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.n = limit->n.min; clock.n <= limit->n.max;
+ clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+ intel_clock(dev, refclk, &clock);
+
+ if (!intel_PLL_is_valid(crtc, &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return (err != target);
+}
+
+void
+intel_wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ udelay(20000);
+}
+
+static void
+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ int pipe = intel_crtc->pipe;
+ unsigned long Start, Offset;
+ int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr, alignment;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return;
+ }
+
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ obj = intel_fb->obj;
+ obj_priv = obj->driver_private;
+
+ switch (obj_priv->tiling_mode) {
+ case I915_TILING_NONE:
+ alignment = 64 * 1024;
+ break;
+ case I915_TILING_X:
+ if (IS_I9XX(dev))
+ alignment = 1024 * 1024;
+ else
+ alignment = 512 * 1024;
+ break;
+ case I915_TILING_Y:
+ /* FIXME: Is this true? */
+ DRM_ERROR("Y tiled not allowed for scan out buffers\n");
+ return;
+ default:
+ BUG();
+ }
+
+ if (i915_gem_object_pin(intel_fb->obj, alignment))
+ return;
+
+ i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1);
+
+ Start = obj_priv->gtt_offset;
+ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+
+ I915_WRITE(dspstride, crtc->fb->pitch);
+
+ dspcntr = I915_READ(dspcntr_reg);
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ DRM_ERROR("Unknown color depth\n");
+ return;
+ }
+ I915_WRITE(dspcntr_reg, dspcntr);
+
+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ if (IS_I965G(dev)) {
+ I915_WRITE(dspbase, Offset);
+ I915_READ(dspbase);
+ I915_WRITE(dspsurf, Start);
+ I915_READ(dspsurf);
+ } else {
+ I915_WRITE(dspbase, Start + Offset);
+ I915_READ(dspbase);
+ }
+
+ intel_wait_for_vblank(dev);
+
+ if (old_fb) {
+ intel_fb = to_intel_framebuffer(old_fb);
+ i915_gem_object_unpin(intel_fb->obj);
+ }
+
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return;
+
+ switch (pipe) {
+ case 0:
+ master_priv->sarea_priv->pipeA_x = x;
+ master_priv->sarea_priv->pipeA_y = y;
+ break;
+ case 1:
+ master_priv->sarea_priv->pipeB_x = x;
+ master_priv->sarea_priv->pipeB_y = y;
+ break;
+ default:
+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+ break;
+ }
+}
+
+
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_master_private *master_priv;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ u32 temp;
+ bool enabled;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable the DPLL */
+ temp = I915_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ I915_WRITE(dpll_reg, temp);
+ I915_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ I915_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ I915_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+
+ /* Enable the pipe */
+ temp = I915_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) == 0)
+ I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+ /* Enable the plane */
+ temp = I915_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ }
+
+ intel_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable if it's on this pipe */
+ //intel_crtc_dpms_video(crtc, true); TODO
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable if it's on this pipe */
+ //intel_crtc_dpms_video(crtc, FALSE); TODO
+
+ /* Disable the VGA plane that we never use */
+ I915_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable display plane */
+ temp = I915_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ I915_READ(dspbase_reg);
+ }
+
+ if (!IS_I9XX(dev)) {
+ /* Wait for vblank for the disable to take effect */
+ intel_wait_for_vblank(dev);
+ }
+
+ /* Next, disable display pipes */
+ temp = I915_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ I915_READ(pipeconf_reg);
+ }
+
+ /* Wait for vblank for the disable to take effect. */
+ intel_wait_for_vblank(dev);
+
+ temp = I915_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ I915_READ(dpll_reg);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+
+ if (!dev->primary->master)
+ return;
+
+ master_priv = dev->primary->master->driver_priv;
+ if (!master_priv->sarea_priv)
+ return;
+
+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+ switch (pipe) {
+ case 0:
+ master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
+ master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
+ break;
+ case 1:
+ master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
+ master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
+ break;
+ default:
+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+ break;
+ }
+
+ intel_crtc->dpms_mode = mode;
+}
+
+static void intel_crtc_prepare (struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void intel_crtc_commit (struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void intel_encoder_prepare (struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ /* lvds has its own version of prepare see intel_lvds_prepare */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void intel_encoder_commit (struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ /* lvds has its own version of commit see intel_lvds_commit */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+
+/** Returns the core display clock speed for i830 - i945 */
+static int intel_get_core_clock_speed(struct drm_device *dev)
+{
+
+ /* Core clock values taken from the published datasheets.
+ * The 830 may go up to 166 Mhz, which we should check.
+ */
+ if (IS_I945G(dev))
+ return 400000;
+ else if (IS_I915G(dev))
+ return 333000;
+ else if (IS_I945GM(dev) || IS_845G(dev))
+ return 200000;
+ else if (IS_I915GM(dev)) {
+ u16 gcfgc = 0;
+
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
+ return 133000;
+ else {
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_333_MHZ:
+ return 333000;
+ default:
+ case GC_DISPLAY_CLOCK_190_200_MHZ:
+ return 190000;
+ }
+ }
+ } else if (IS_I865G(dev))
+ return 266000;
+ else if (IS_I855(dev)) {
+ u16 hpllcc = 0;
+ /* Assume that the hardware is in the high speed state. This
+ * should be the default.
+ */
+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+ case GC_CLOCK_133_200:
+ case GC_CLOCK_100_200:
+ return 200000;
+ case GC_CLOCK_166_250:
+ return 250000;
+ case GC_CLOCK_100_133:
+ return 133000;
+ }
+ } else /* 852, 830 */
+ return 133000;
+
+ return 0; /* Silence gcc warning */
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int intel_panel_fitter_pipe (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /* i830 doesn't have a panel fitter */
+ if (IS_I830(dev))
+ return -1;
+
+ pfit_control = I915_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+
+ /* 965 can place panel fitter on either pipe */
+ if (IS_I965G(dev))
+ return (pfit_control >> 29) & 0x3;
+
+ /* older chips can only use pipe 1 */
+ return 1;
+}
+
+static void intel_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int refclk;
+ intel_clock_t clock;
+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+ bool ok, is_sdvo = false, is_dvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ drm_vblank_pre_modeset(dev, pipe);
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+
+ switch (intel_output->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ is_sdvo = true;
+ break;
+ case INTEL_OUTPUT_DVO:
+ is_dvo = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ }
+ }
+
+ if (IS_I9XX(dev)) {
+ refclk = 96000;
+ } else {
+ refclk = 48000;
+ }
+
+ ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
+ if (!ok) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return;
+ }
+
+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+
+ dpll = DPLL_VGA_MODE_DIS;
+ if (IS_I9XX(dev)) {
+ if (is_lvds)
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+ }
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock.p1 - 1)) << 16;
+ switch (clock.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (IS_I965G(dev))
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+ } else {
+ if (is_lvds) {
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock.p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock.p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+ }
+
+ if (is_tv) {
+ /* XXX: just matching BIOS for now */
+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ }
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ /* setup pipeconf */
+ pipeconf = I915_READ(pipeconf_reg);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ if (pipe == 0 && !IS_I965G(dev)) {
+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
+ * core speed.
+ *
+ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
+ * pipe == 0 check?
+ */
+ if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10)
+ pipeconf |= PIPEACONF_DOUBLE_WIDE;
+ else
+ pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
+ }
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+ pipeconf |= PIPEACONF_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
+
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (intel_panel_fitter_pipe(dev) == pipe)
+ I915_WRITE(PFIT_CONTROL, 0);
+
+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ drm_mode_debug_printmodeline(mode);
+
+
+ if (dpll & DPLL_VCO_ENABLE) {
+ I915_WRITE(fp_reg, fp);
+ I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+ I915_READ(dpll_reg);
+ udelay(150);
+ }
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (is_lvds) {
+ u32 lvds = I915_READ(LVDS);
+
+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock.p2 == 7)
+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+
+ I915_WRITE(LVDS, lvds);
+ I915_READ(LVDS);
+ }
+
+ I915_WRITE(fp_reg, fp);
+ I915_WRITE(dpll_reg, dpll);
+ I915_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ if (IS_I965G(dev)) {
+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
+ ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ } else {
+ /* write it again -- the BIOS does, after all */
+ I915_WRITE(dpll_reg, dpll);
+ }
+ I915_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+
+ I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ /* pipesrc and dspsize control the size that is scaled from, which should
+ * always be the user's requested size.
+ */
+ I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ I915_WRITE(dsppos_reg, 0);
+ I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ I915_WRITE(pipeconf_reg, pipeconf);
+ I915_READ(pipeconf_reg);
+
+ intel_wait_for_vblank(dev);
+
+ I915_WRITE(dspcntr_reg, dspcntr);
+
+ /* Flush the plane changes */
+ intel_pipe_set_base(crtc, x, y, old_fb);
+
+ drm_vblank_post_modeset(dev, pipe);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ for (i = 0; i < 256; i++) {
+ I915_WRITE(palreg + 4 * i,
+ (intel_crtc->lut_r[i] << 16) |
+ (intel_crtc->lut_g[i] << 8) |
+ intel_crtc->lut_b[i]);
+ }
+}
+
+static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_gem_object *bo;
+ struct drm_i915_gem_object *obj_priv;
+ int pipe = intel_crtc->pipe;
+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+ uint32_t temp;
+ size_t addr;
+
+ DRM_DEBUG("\n");
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!handle) {
+ DRM_DEBUG("cursor off\n");
+ /* turn of the cursor */
+ temp = 0;
+ temp |= CURSOR_MODE_DISABLE;
+
+ I915_WRITE(control, temp);
+ I915_WRITE(base, 0);
+ return 0;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ DRM_ERROR("we currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ bo = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!bo)
+ return -ENOENT;
+
+ obj_priv = bo->driver_private;
+
+ if (bo->size < width * height * 4) {
+ DRM_ERROR("buffer is to small\n");
+ drm_gem_object_unreference(bo);
+ return -ENOMEM;
+ }
+
+ if (dev_priv->cursor_needs_physical) {
+ addr = dev->agp->base + obj_priv->gtt_offset;
+ } else {
+ addr = obj_priv->gtt_offset;
+ }
+
+ intel_crtc->cursor_addr = addr;
+ temp = 0;
+ /* set the pipe for the cursor */
+ temp |= (pipe << 28);
+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+ I915_WRITE(control, temp);
+ I915_WRITE(base, addr);
+
+ return 0;
+}
+
+static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint32_t temp = 0;
+ uint32_t adder;
+
+ if (x < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+ x = -x;
+ }
+ if (y < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+ y = -y;
+ }
+
+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+ adder = intel_crtc->cursor_addr;
+ I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+ I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+
+ return 0;
+}
+
+/** Sets the color ramps on behalf of RandR */
+void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ intel_crtc->lut_r[regno] = red >> 8;
+ intel_crtc->lut_g[regno] = green >> 8;
+ intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int i;
+
+ if (size != 256)
+ return;
+
+ for (i = 0; i < 256; i++) {
+ intel_crtc->lut_r[i] = red[i] >> 8;
+ intel_crtc->lut_g[i] = green[i] >> 8;
+ intel_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ intel_crtc_load_lut(crtc);
+}
+
+/**
+ * Get a pipe with a simple mode set on it for doing load-based monitor
+ * detection.
+ *
+ * It will be up to the load-detect code to adjust the pipe as appropriate for
+ * its requirements. The pipe will be connected to no other outputs.
+ *
+ * Currently this code will only succeed if there is a pipe with no outputs
+ * configured for it. In the future, it could choose to temporarily disable
+ * some outputs to free up a pipe for its use.
+ *
+ * \return crtc, or NULL if no pipes are available.
+ */
+
+/* VESA 640x480x72Hz mode to set on the pipe */
+static struct drm_display_mode load_detect_mode = {
+ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
+
+struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
+ struct drm_display_mode *mode,
+ int *dpms_mode)
+{
+ struct intel_crtc *intel_crtc;
+ struct drm_crtc *possible_crtc;
+ struct drm_crtc *supported_crtc =NULL;
+ struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_crtc *crtc = NULL;
+ struct drm_device *dev = encoder->dev;
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ struct drm_crtc_helper_funcs *crtc_funcs;
+ int i = -1;
+
+ /*
+ * Algorithm gets a little messy:
+ * - if the connector already has an assigned crtc, use it (but make
+ * sure it's on first)
+ * - try to find the first unused crtc that can drive this connector,
+ * and use that if we find one
+ * - if there are no unused crtcs available, try to use the first
+ * one we found that supports the connector
+ */
+
+ /* See if we already have a CRTC for this connector */
+ if (encoder->crtc) {
+ crtc = encoder->crtc;
+ /* Make sure the crtc and connector are running */
+ intel_crtc = to_intel_crtc(crtc);
+ *dpms_mode = intel_crtc->dpms_mode;
+ if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
+ crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+ }
+ return crtc;
+ }
+
+ /* Find an unused one (if possible) */
+ list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
+ i++;
+ if (!(encoder->possible_crtcs & (1 << i)))
+ continue;
+ if (!possible_crtc->enabled) {
+ crtc = possible_crtc;
+ break;
+ }
+ if (!supported_crtc)
+ supported_crtc = possible_crtc;
+ }
+
+ /*
+ * If we didn't find an unused CRTC, don't use any.
+ */
+ if (!crtc) {
+ return NULL;
+ }
+
+ encoder->crtc = crtc;
+ intel_output->load_detect_temp = true;
+
+ intel_crtc = to_intel_crtc(crtc);
+ *dpms_mode = intel_crtc->dpms_mode;
+
+ if (!crtc->enabled) {
+ if (!mode)
+ mode = &load_detect_mode;
+ drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb);
+ } else {
+ if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
+ crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+ }
+
+ /* Add this connector to the crtc */
+ encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode);
+ encoder_funcs->commit(encoder);
+ }
+ /* let the connector get through one full cycle before testing */
+ intel_wait_for_vblank(dev);
+
+ return crtc;
+}
+
+void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode)
+{
+ struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ if (intel_output->load_detect_temp) {
+ encoder->crtc = NULL;
+ intel_output->load_detect_temp = false;
+ crtc->enabled = drm_helper_crtc_in_use(crtc);
+ drm_helper_disable_unused_functions(dev);
+ }
+
+ /* Switch crtc and output back off if necessary */
+ if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
+ if (encoder->crtc == crtc)
+ encoder_funcs->dpms(encoder, dpms_mode);
+ crtc_funcs->dpms(crtc, dpms_mode);
+ }
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ u32 fp;
+ intel_clock_t clock;
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
+ else
+ fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ if (IS_I9XX(dev)) {
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ switch (dpll & DPLL_MODE_MASK) {
+ case DPLLB_MODE_DAC_SERIAL:
+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+ 5 : 10;
+ break;
+ case DPLLB_MODE_LVDS:
+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+ 7 : 14;
+ break;
+ default:
+ DRM_DEBUG("Unknown DPLL mode %08x in programmed "
+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ return 0;
+ }
+
+ /* XXX: Handle the 100Mhz refclk */
+ i9xx_clock(96000, &clock);
+ } else {
+ bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+
+ if (is_lvds) {
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+ clock.p2 = 14;
+
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+ i8xx_clock(66000, &clock);
+ } else
+ i8xx_clock(48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+
+ i8xx_clock(48000, &clock);
+ }
+ }
+
+ /* XXX: It would be nice to validate the clocks, but we can't reuse
+ * i830PllIsValid() because it relies on the xf86_config connector
+ * configuration being accurate, which it isn't necessarily.
+ */
+
+ return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ struct drm_display_mode *mode;
+ int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+ int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+ int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+ int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ mode->clock = intel_crtc_clock_get(dev, crtc);
+ mode->hdisplay = (htot & 0xffff) + 1;
+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ mode->hsync_start = (hsync & 0xffff) + 1;
+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+ mode->vdisplay = (vtot & 0xffff) + 1;
+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+ mode->vsync_start = (vsync & 0xffff) + 1;
+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ return mode;
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(intel_crtc);
+}
+
+static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .dpms = intel_crtc_dpms,
+ .mode_fixup = intel_crtc_mode_fixup,
+ .mode_set = intel_crtc_mode_set,
+ .mode_set_base = intel_pipe_set_base,
+ .prepare = intel_crtc_prepare,
+ .commit = intel_crtc_commit,
+};
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+ .cursor_set = intel_crtc_cursor_set,
+ .cursor_move = intel_crtc_cursor_move,
+ .gamma_set = intel_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = intel_crtc_destroy,
+};
+
+
+static void intel_crtc_init(struct drm_device *dev, int pipe)
+{
+ struct intel_crtc *intel_crtc;
+ int i;
+
+ intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ if (intel_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
+ intel_crtc->pipe = pipe;
+ for (i = 0; i < 256; i++) {
+ intel_crtc->lut_r[i] = i;
+ intel_crtc->lut_g[i] = i;
+ intel_crtc->lut_b[i] = i;
+ }
+
+ intel_crtc->cursor_addr = 0;
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+ drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+
+ intel_crtc->mode_set.crtc = &intel_crtc->base;
+ intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1);
+ intel_crtc->mode_set.num_connectors = 0;
+
+ if (i915_fbpercrtc) {
+
+
+
+ }
+}
+
+struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+ struct drm_crtc *crtc = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ if (intel_crtc->pipe == pipe)
+ break;
+ }
+ return crtc;
+}
+
+static int intel_connector_clones(struct drm_device *dev, int type_mask)
+{
+ int index_mask = 0;
+ struct drm_connector *connector;
+ int entry = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct intel_output *intel_output = to_intel_output(connector);
+ if (type_mask & (1 << intel_output->type))
+ index_mask |= (1 << entry);
+ entry++;
+ }
+ return index_mask;
+}
+
+
+static void intel_setup_outputs(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+
+ intel_crt_init(dev);
+
+ /* Set up integrated LVDS */
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ intel_lvds_init(dev);
+
+ if (IS_I9XX(dev)) {
+ intel_sdvo_init(dev, SDVOB);
+ intel_sdvo_init(dev, SDVOC);
+ } else
+ intel_dvo_init(dev);
+
+ if (IS_I9XX(dev) && !IS_I915G(dev))
+ intel_tv_init(dev);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_encoder *encoder = &intel_output->enc;
+ int crtc_mask = 0, clone_mask = 0;
+
+ /* valid crtcs */
+ switch(intel_output->type) {
+ case INTEL_OUTPUT_DVO:
+ case INTEL_OUTPUT_SDVO:
+ crtc_mask = ((1 << 0)|
+ (1 << 1));
+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
+ (1 << INTEL_OUTPUT_DVO) |
+ (1 << INTEL_OUTPUT_SDVO));
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ crtc_mask = ((1 << 0)|
+ (1 << 1));
+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
+ (1 << INTEL_OUTPUT_DVO) |
+ (1 << INTEL_OUTPUT_SDVO));
+ break;
+ case INTEL_OUTPUT_LVDS:
+ crtc_mask = (1 << 1);
+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ crtc_mask = ((1 << 0) |
+ (1 << 1));
+ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
+ break;
+ }
+ encoder->possible_crtcs = crtc_mask;
+ encoder->possible_clones = intel_connector_clones(dev, clone_mask);
+ }
+}
+
+static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_device *dev = fb->dev;
+
+ if (fb->fbdev)
+ intelfb_remove(dev, fb);
+
+ drm_framebuffer_cleanup(fb);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(intel_fb->obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(intel_fb);
+}
+
+static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_gem_object *object = intel_fb->obj;
+
+ return drm_gem_handle_create(file_priv, object, handle);
+}
+
+static const struct drm_framebuffer_funcs intel_fb_funcs = {
+ .destroy = intel_user_framebuffer_destroy,
+ .create_handle = intel_user_framebuffer_create_handle,
+};
+
+int intel_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_framebuffer **fb,
+ struct drm_gem_object *obj)
+{
+ struct intel_framebuffer *intel_fb;
+ int ret;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return -ENOMEM;
+
+ ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+
+ drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+
+ intel_fb->obj = obj;
+
+ *fb = &intel_fb->base;
+
+ return 0;
+}
+
+
+static struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
+ if (!obj)
+ return NULL;
+
+ ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ return NULL;
+ }
+
+ return fb;
+}
+
+static const struct drm_mode_config_funcs intel_mode_funcs = {
+ .fb_create = intel_user_framebuffer_create,
+ .fb_changed = intelfb_probe,
+};
+
+void intel_modeset_init(struct drm_device *dev)
+{
+ int num_pipe;
+ int i;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.funcs = (void *)&intel_mode_funcs;
+
+ if (IS_I965G(dev)) {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ } else {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ }
+
+ /* set memory base */
+ if (IS_I9XX(dev))
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+ else
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+
+ if (IS_MOBILE(dev) || IS_I9XX(dev))
+ num_pipe = 2;
+ else
+ num_pipe = 1;
+ DRM_DEBUG("%d display pipe%s available.\n",
+ num_pipe, num_pipe > 1 ? "s" : "");
+
+ for (i = 0; i < num_pipe; i++) {
+ intel_crtc_init(dev, i);
+ }
+
+ intel_setup_outputs(dev);
+}
+
+void intel_modeset_cleanup(struct drm_device *dev)
+{
+ drm_mode_config_cleanup(dev);
+}
+
+
+/* current intel driver doesn't take advantage of encoders
+ always give back the encoder for the connector
+*/
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ return &intel_output->enc;
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
new file mode 100644
index 00000000000..407edd5bf58
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+#include "drm_crtc.h"
+
+#include "drm_crtc_helper.h"
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* these are outputs from the chip - integrated only
+ external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+struct intel_i2c_chan {
+ struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
+ u32 reg; /* GPIO reg */
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ u8 slave_addr;
+};
+
+struct intel_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+
+struct intel_output {
+ struct drm_connector base;
+
+ struct drm_encoder enc;
+ int type;
+ struct intel_i2c_chan *i2c_bus; /* for control functions */
+ struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
+ bool load_detect_temp;
+ void *dev_priv;
+};
+
+struct intel_crtc {
+ struct drm_crtc base;
+ int pipe;
+ int plane;
+ uint32_t cursor_addr;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int dpms_mode;
+ struct intel_framebuffer *fbdev_fb;
+ /* a mode_set for fbdev users on this crtc */
+ struct drm_mode_set mode_set;
+};
+
+#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_output(x) container_of(x, struct intel_output, base)
+#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
+#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+
+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name);
+void intel_i2c_destroy(struct intel_i2c_chan *chan);
+int intel_ddc_get_modes(struct intel_output *intel_output);
+extern bool intel_ddc_probe(struct intel_output *intel_output);
+
+extern void intel_crt_init(struct drm_device *dev);
+extern void intel_sdvo_init(struct drm_device *dev, int output_device);
+extern void intel_dvo_init(struct drm_device *dev);
+extern void intel_tv_init(struct drm_device *dev);
+extern void intel_lvds_init(struct drm_device *dev);
+
+extern void intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void intel_encoder_prepare (struct drm_encoder *encoder);
+extern void intel_encoder_commit (struct drm_encoder *encoder);
+
+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+
+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+extern void intel_wait_for_vblank(struct drm_device *dev);
+extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
+extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
+ struct drm_display_mode *mode,
+ int *dpms_mode);
+extern void intel_release_load_detect_pipe(struct intel_output *intel_output,
+ int dpms_mode);
+
+extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
+extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
+extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
+extern int intelfb_probe(struct drm_device *dev);
+extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
+extern void intelfb_restore(void);
+extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+
+extern int intel_framebuffer_create(struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_framebuffer **fb,
+ struct drm_gem_object *obj);
+#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
new file mode 100644
index 00000000000..8b8d6e65cd3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "dvo.h"
+
+#define SIL164_ADDR 0x38
+#define CH7xxx_ADDR 0x76
+#define TFP410_ADDR 0x38
+
+static struct intel_dvo_device intel_dvo_devices[] = {
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "sil164",
+ .dvo_reg = DVOC,
+ .slave_addr = SIL164_ADDR,
+ .dev_ops = &sil164_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "ch7xxx",
+ .dvo_reg = DVOC,
+ .slave_addr = CH7xxx_ADDR,
+ .dev_ops = &ch7xxx_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_LVDS,
+ .name = "ivch",
+ .dvo_reg = DVOA,
+ .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
+ .dev_ops = &ivch_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_TMDS,
+ .name = "tfp410",
+ .dvo_reg = DVOC,
+ .slave_addr = TFP410_ADDR,
+ .dev_ops = &tfp410_ops,
+ },
+ {
+ .type = INTEL_DVO_CHIP_LVDS,
+ .name = "ch7017",
+ .dvo_reg = DVOC,
+ .slave_addr = 0x75,
+ .gpio = GPIOE,
+ .dev_ops = &ch7017_ops,
+ }
+};
+
+static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dvo_device *dvo = intel_output->dev_priv;
+ u32 dvo_reg = dvo->dvo_reg;
+ u32 temp = I915_READ(dvo_reg);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+ I915_READ(dvo_reg);
+ dvo->dev_ops->dpms(dvo, mode);
+ } else {
+ dvo->dev_ops->dpms(dvo, mode);
+ I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+ I915_READ(dvo_reg);
+ }
+}
+
+static void intel_dvo_save(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+ /* Each output should probably just save the registers it touches,
+ * but for now, use more overkill.
+ */
+ dev_priv->saveDVOA = I915_READ(DVOA);
+ dev_priv->saveDVOB = I915_READ(DVOB);
+ dev_priv->saveDVOC = I915_READ(DVOC);
+
+ dvo->dev_ops->save(dvo);
+}
+
+static void intel_dvo_restore(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+ dvo->dev_ops->restore(dvo);
+
+ I915_WRITE(DVOA, dev_priv->saveDVOA);
+ I915_WRITE(DVOB, dev_priv->saveDVOB);
+ I915_WRITE(DVOC, dev_priv->saveDVOC);
+}
+
+static int intel_dvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* XXX: Validate clock range */
+
+ if (dvo->panel_fixed_mode) {
+ if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return dvo->dev_ops->mode_valid(dvo, mode);
+}
+
+static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+ /* If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x
+ C(hdisplay);
+ C(hsync_start);
+ C(hsync_end);
+ C(htotal);
+ C(vdisplay);
+ C(vsync_start);
+ C(vsync_end);
+ C(vtotal);
+ C(clock);
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+#undef C
+ }
+
+ if (dvo->dev_ops->mode_fixup)
+ return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode);
+
+ return true;
+}
+
+static void intel_dvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dvo_device *dvo = intel_output->dev_priv;
+ int pipe = intel_crtc->pipe;
+ u32 dvo_val;
+ u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+
+ switch (dvo_reg) {
+ case DVOA:
+ default:
+ dvo_srcdim_reg = DVOA_SRCDIM;
+ break;
+ case DVOB:
+ dvo_srcdim_reg = DVOB_SRCDIM;
+ break;
+ case DVOC:
+ dvo_srcdim_reg = DVOC_SRCDIM;
+ break;
+ }
+
+ dvo->dev_ops->mode_set(dvo, mode, adjusted_mode);
+
+ /* Save the data order, since I don't know what it should be set to. */
+ dvo_val = I915_READ(dvo_reg) &
+ (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
+ dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
+ DVO_BLANK_ACTIVE_HIGH;
+
+ if (pipe == 1)
+ dvo_val |= DVO_PIPE_B_SELECT;
+ dvo_val |= DVO_PIPE_STALL;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
+
+ I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
+
+ /*I915_WRITE(DVOB_SRCDIM,
+ (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+ (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
+ I915_WRITE(dvo_srcdim_reg,
+ (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+ (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+ /*I915_WRITE(DVOB, dvo_val);*/
+ I915_WRITE(dvo_reg, dvo_val);
+}
+
+/**
+ * Detect the output connection on our DVO device.
+ *
+ * Unimplemented.
+ */
+static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+ return dvo->dev_ops->detect(dvo);
+}
+
+static int intel_dvo_get_modes(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+ /* We should probably have an i2c driver get_modes function for those
+ * devices which will have a fixed set of modes determined by the chip
+ * (TV-out, for example), but for now with just TMDS and LVDS,
+ * that's not the case.
+ */
+ intel_ddc_get_modes(intel_output);
+ if (!list_empty(&connector->probed_modes))
+ return 1;
+
+
+ if (dvo->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode;
+ mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static void intel_dvo_destroy (struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+ if (dvo) {
+ if (dvo->dev_ops->destroy)
+ dvo->dev_ops->destroy(dvo);
+ if (dvo->panel_fixed_mode)
+ kfree(dvo->panel_fixed_mode);
+ /* no need, in i830_dvoices[] now */
+ //kfree(dvo);
+ }
+ if (intel_output->i2c_bus)
+ intel_i2c_destroy(intel_output->i2c_bus);
+ if (intel_output->ddc_bus)
+ intel_i2c_destroy(intel_output->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(intel_output);
+}
+
+#ifdef RANDR_GET_CRTC_INTERFACE
+static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dvo_device *dvo = intel_output->dev_priv;
+ int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
+
+ return intel_pipe_to_crtc(pScrn, pipe);
+}
+#endif
+
+static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
+ .dpms = intel_dvo_dpms,
+ .mode_fixup = intel_dvo_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_dvo_mode_set,
+ .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+ .save = intel_dvo_save,
+ .restore = intel_dvo_restore,
+ .detect = intel_dvo_detect,
+ .destroy = intel_dvo_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
+ .mode_valid = intel_dvo_mode_valid,
+ .get_modes = intel_dvo_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
+ .destroy = intel_dvo_enc_destroy,
+};
+
+
+/**
+ * Attempts to get a fixed panel timing for LVDS (currently only the i830).
+ *
+ * Other chips with DVO LVDS will need to extend this to deal with the LVDS
+ * chip being on DVOB/C and having multiple pipes.
+ */
+static struct drm_display_mode *
+intel_dvo_get_current_mode (struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dvo_device *dvo = intel_output->dev_priv;
+ uint32_t dvo_reg = dvo->dvo_reg;
+ uint32_t dvo_val = I915_READ(dvo_reg);
+ struct drm_display_mode *mode = NULL;
+
+ /* If the DVO port is active, that'll be the LVDS, so we can pull out
+ * its timings to get how the BIOS set up the panel.
+ */
+ if (dvo_val & DVO_ENABLE) {
+ struct drm_crtc *crtc;
+ int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
+
+ crtc = intel_get_crtc_from_pipe(dev, pipe);
+ if (crtc) {
+ mode = intel_crtc_mode_get(dev, crtc);
+
+ if (mode) {
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ }
+ }
+ }
+ return mode;
+}
+
+void intel_dvo_init(struct drm_device *dev)
+{
+ struct intel_output *intel_output;
+ struct intel_dvo_device *dvo;
+ struct intel_i2c_chan *i2cbus = NULL;
+ int ret = 0;
+ int i;
+ int gpio_inited = 0;
+ int encoder_type = DRM_MODE_ENCODER_NONE;
+ intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
+ if (!intel_output)
+ return;
+
+ /* Set up the DDC bus */
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
+ if (!intel_output->ddc_bus)
+ goto free_intel;
+
+ /* Now, try to find a controller */
+ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+ struct drm_connector *connector = &intel_output->base;
+ int gpio;
+
+ dvo = &intel_dvo_devices[i];
+
+ /* Allow the I2C driver info to specify the GPIO to be used in
+ * special cases, but otherwise default to what's defined
+ * in the spec.
+ */
+ if (dvo->gpio != 0)
+ gpio = dvo->gpio;
+ else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+ gpio = GPIOB;
+ else
+ gpio = GPIOE;
+
+ /* Set up the I2C bus necessary for the chip we're probing.
+ * It appears that everything is on GPIOE except for panels
+ * on i830 laptops, which are on GPIOB (DVOA).
+ */
+ if (gpio_inited != gpio) {
+ if (i2cbus != NULL)
+ intel_i2c_destroy(i2cbus);
+ if (!(i2cbus = intel_i2c_create(dev, gpio,
+ gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
+ continue;
+ }
+ gpio_inited = gpio;
+ }
+
+ if (dvo->dev_ops!= NULL)
+ ret = dvo->dev_ops->init(dvo, i2cbus);
+ else
+ ret = false;
+
+ if (!ret)
+ continue;
+
+ intel_output->type = INTEL_OUTPUT_DVO;
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ drm_connector_init(dev, connector,
+ &intel_dvo_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII);
+ encoder_type = DRM_MODE_ENCODER_TMDS;
+ break;
+ case INTEL_DVO_CHIP_LVDS:
+ drm_connector_init(dev, connector,
+ &intel_dvo_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ encoder_type = DRM_MODE_ENCODER_LVDS;
+ break;
+ }
+
+ drm_connector_helper_add(connector,
+ &intel_dvo_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ intel_output->dev_priv = dvo;
+ intel_output->i2c_bus = i2cbus;
+
+ drm_encoder_init(dev, &intel_output->enc,
+ &intel_dvo_enc_funcs, encoder_type);
+ drm_encoder_helper_add(&intel_output->enc,
+ &intel_dvo_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&intel_output->base,
+ &intel_output->enc);
+ if (dvo->type == INTEL_DVO_CHIP_LVDS) {
+ /* For our LVDS chipsets, we should hopefully be able
+ * to dig the fixed panel mode out of the BIOS data.
+ * However, it's in a different format from the BIOS
+ * data on chipsets with integrated LVDS (stored in AIM
+ * headers, likely), so for now, just get the current
+ * mode being output through DVO.
+ */
+ dvo->panel_fixed_mode =
+ intel_dvo_get_current_mode(connector);
+ dvo->panel_wants_dither = true;
+ }
+
+ drm_sysfs_connector_add(connector);
+ return;
+ }
+
+ intel_i2c_destroy(intel_output->ddc_bus);
+ /* Didn't find a chip, so tear down. */
+ if (i2cbus != NULL)
+ intel_i2c_destroy(i2cbus);
+free_intel:
+ kfree(intel_output);
+}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
new file mode 100644
index 00000000000..afd1217b8a0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -0,0 +1,925 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+struct intelfb_par {
+ struct drm_device *dev;
+ struct drm_display_mode *our_mode;
+ struct intel_framebuffer *intel_fb;
+ int crtc_count;
+ /* crtc currently bound to this */
+ uint32_t crtc_ids[2];
+};
+
+static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct intelfb_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_crtc *crtc;
+ int i;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_mode_set *modeset = &intel_crtc->mode_set;
+ struct drm_framebuffer *fb = modeset->fb;
+
+ for (i = 0; i < par->crtc_count; i++)
+ if (crtc->base.id == par->crtc_ids[i])
+ break;
+
+ if (i == par->crtc_count)
+ continue;
+
+
+ if (regno > 255)
+ return 1;
+
+ if (fb->depth == 8) {
+ intel_crtc_fb_gamma_set(crtc, red, green, blue, regno);
+ return 0;
+ }
+
+ if (regno < 16) {
+ switch (fb->depth) {
+ case 15:
+ fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
+ ((green & 0xf800) >> 6) |
+ ((blue & 0xf800) >> 11);
+ break;
+ case 16:
+ fb->pseudo_palette[regno] = (red & 0xf800) |
+ ((green & 0xfc00) >> 5) |
+ ((blue & 0xf800) >> 11);
+ break;
+ case 24:
+ case 32:
+ fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
+ (green & 0xff00) |
+ ((blue & 0xff00) >> 8);
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static int intelfb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct intelfb_par *par = info->par;
+ struct intel_framebuffer *intel_fb = par->intel_fb;
+ struct drm_framebuffer *fb = &intel_fb->base;
+ int depth;
+
+ if (var->pixclock == -1 || !var->pixclock)
+ return -EINVAL;
+
+ /* Need to resize the fb object !!! */
+ if (var->xres > fb->width || var->yres > fb->height) {
+ DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height);
+ DRM_ERROR("Need resizing code.\n");
+ return -EINVAL;
+ }
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ depth = (var->green.length == 6) ? 16 : 15;
+ break;
+ case 32:
+ depth = (var->transp.length > 0) ? 32 : 24;
+ break;
+ default:
+ depth = var->bits_per_pixel;
+ break;
+ }
+
+ switch (depth) {
+ case 8:
+ var->red.offset = 0;
+ var->green.offset = 0;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 15:
+ var->red.offset = 10;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 5;
+ var->blue.length = 5;
+ var->transp.length = 1;
+ var->transp.offset = 15;
+ break;
+ case 16:
+ var->red.offset = 11;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 6;
+ var->blue.length = 5;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 24:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 32:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 8;
+ var->transp.offset = 24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* this will let fbcon do the mode init */
+/* FIXME: take mode config lock? */
+static int intelfb_set_par(struct fb_info *info)
+{
+ struct intelfb_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct fb_var_screeninfo *var = &info->var;
+ int i;
+
+ DRM_DEBUG("%d %d\n", var->xres, var->pixclock);
+
+ if (var->pixclock != -1) {
+
+ DRM_ERROR("PIXEL CLCOK SET\n");
+ return -EINVAL;
+ } else {
+ struct drm_crtc *crtc;
+ int ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ for (i = 0; i < par->crtc_count; i++)
+ if (crtc->base.id == par->crtc_ids[i])
+ break;
+
+ if (i == par->crtc_count)
+ continue;
+
+ if (crtc->fb == intel_crtc->mode_set.fb) {
+ mutex_lock(&dev->mode_config.mutex);
+ ret = crtc->funcs->set_config(&intel_crtc->mode_set);
+ mutex_unlock(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+ }
+}
+
+static int intelfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct intelfb_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_mode_set *modeset;
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+ int ret = 0;
+ int i;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ for (i = 0; i < par->crtc_count; i++)
+ if (crtc->base.id == par->crtc_ids[i])
+ break;
+
+ if (i == par->crtc_count)
+ continue;
+
+ intel_crtc = to_intel_crtc(crtc);
+ modeset = &intel_crtc->mode_set;
+
+ modeset->x = var->xoffset;
+ modeset->y = var->yoffset;
+
+ if (modeset->num_connectors) {
+ mutex_lock(&dev->mode_config.mutex);
+ ret = crtc->funcs->set_config(modeset);
+ mutex_unlock(&dev->mode_config.mutex);
+ if (!ret) {
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void intelfb_on(struct fb_info *info)
+{
+ struct intelfb_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int i;
+
+ /*
+ * For each CRTC in this fb, find all associated encoders
+ * and turn them off, then turn off the CRTC.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ for (i = 0; i < par->crtc_count; i++)
+ if (crtc->base.id == par->crtc_ids[i])
+ break;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+ }
+ }
+ }
+}
+
+static void intelfb_off(struct fb_info *info, int dpms_mode)
+{
+ struct intelfb_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int i;
+
+ /*
+ * For each CRTC in this fb, find all associated encoders
+ * and turn them off, then turn off the CRTC.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ for (i = 0; i < par->crtc_count; i++)
+ if (crtc->base.id == par->crtc_ids[i])
+ break;
+
+ /* Found a CRTC on this fb, now find encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ encoder_funcs = encoder->helper_private;
+ encoder_funcs->dpms(encoder, dpms_mode);
+ }
+ }
+ if (dpms_mode == DRM_MODE_DPMS_OFF)
+ crtc_funcs->dpms(crtc, dpms_mode);
+ }
+}
+
+static int intelfb_blank(int blank, struct fb_info *info)
+{
+ switch (blank) {
+ case FB_BLANK_UNBLANK:
+ intelfb_on(info);
+ break;
+ case FB_BLANK_NORMAL:
+ intelfb_off(info, DRM_MODE_DPMS_STANDBY);
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ intelfb_off(info, DRM_MODE_DPMS_STANDBY);
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ intelfb_off(info, DRM_MODE_DPMS_SUSPEND);
+ break;
+ case FB_BLANK_POWERDOWN:
+ intelfb_off(info, DRM_MODE_DPMS_OFF);
+ break;
+ }
+ return 0;
+}
+
+static struct fb_ops intelfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = intelfb_check_var,
+ .fb_set_par = intelfb_set_par,
+ .fb_setcolreg = intelfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_pan_display = intelfb_pan_display,
+ .fb_blank = intelfb_blank,
+};
+
+/**
+ * Curretly it is assumed that the old framebuffer is reused.
+ *
+ * LOCKING
+ * caller should hold the mode config lock.
+ *
+ */
+int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_display_mode *mode = crtc->desired_mode;
+
+ fb = crtc->fb;
+ if (!fb)
+ return 1;
+
+ info = fb->fbdev;
+ if (!info)
+ return 1;
+
+ if (!mode)
+ return 1;
+
+ info->var.xres = mode->hdisplay;
+ info->var.right_margin = mode->hsync_start - mode->hdisplay;
+ info->var.hsync_len = mode->hsync_end - mode->hsync_start;
+ info->var.left_margin = mode->htotal - mode->hsync_end;
+ info->var.yres = mode->vdisplay;
+ info->var.lower_margin = mode->vsync_start - mode->vdisplay;
+ info->var.vsync_len = mode->vsync_end - mode->vsync_start;
+ info->var.upper_margin = mode->vtotal - mode->vsync_end;
+ info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
+ /* avoid overflow */
+ info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
+
+ return 0;
+}
+EXPORT_SYMBOL(intelfb_resize);
+
+static struct drm_mode_set kernelfb_mode;
+
+static int intelfb_panic(struct notifier_block *n, unsigned long ununsed,
+ void *panic_str)
+{
+ DRM_ERROR("panic occurred, switching back to text console\n");
+
+ intelfb_restore();
+ return 0;
+}
+
+static struct notifier_block paniced = {
+ .notifier_call = intelfb_panic,
+};
+
+static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
+ uint32_t fb_height, uint32_t surface_width,
+ uint32_t surface_height,
+ struct intel_framebuffer **intel_fb_p)
+{
+ struct fb_info *info;
+ struct intelfb_par *par;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_mode_fb_cmd mode_cmd;
+ struct drm_gem_object *fbo = NULL;
+ struct drm_i915_gem_object *obj_priv;
+ struct device *device = &dev->pdev->dev;
+ int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+
+ mode_cmd.width = surface_width;
+ mode_cmd.height = surface_height;
+
+ mode_cmd.bpp = 32;
+ mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
+ mode_cmd.depth = 24;
+
+ size = mode_cmd.pitch * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+ fbo = drm_gem_object_alloc(dev, size);
+ if (!fbo) {
+ printk(KERN_ERR "failed to allocate framebuffer\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ obj_priv = fbo->driver_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_pin(fbo, PAGE_SIZE);
+ if (ret) {
+ DRM_ERROR("failed to pin fb: %d\n", ret);
+ goto out_unref;
+ }
+
+ /* Flush everything out, we'll be doing GTT only from now on */
+ i915_gem_object_set_to_gtt_domain(fbo, 1);
+
+ ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
+ if (ret) {
+ DRM_ERROR("failed to allocate fb.\n");
+ goto out_unref;
+ }
+
+ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
+
+ intel_fb = to_intel_framebuffer(fb);
+ *intel_fb_p = intel_fb;
+
+ info = framebuffer_alloc(sizeof(struct intelfb_par), device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
+ par = info->par;
+
+ strcpy(info->fix.id, "inteldrmfb");
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 1; /* doing it in hw */
+ info->fix.ypanstep = 1; /* doing it in hw */
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_I830;
+ info->fix.type_aux = 0;
+
+ info->flags = FBINFO_DEFAULT;
+
+ info->fbops = &intelfb_ops;
+
+ info->fix.line_length = fb->pitch;
+ info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
+ info->fix.smem_len = size;
+
+ info->flags = FBINFO_DEFAULT;
+
+ info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
+ size);
+ if (!info->screen_base) {
+ ret = -ENOSPC;
+ goto out_unref;
+ }
+ info->screen_size = size;
+
+// memset(info->screen_base, 0, size);
+
+ info->pseudo_palette = fb->pseudo_palette;
+ info->var.xres_virtual = fb->width;
+ info->var.yres_virtual = fb->height;
+ info->var.bits_per_pixel = fb->bits_per_pixel;
+ info->var.xoffset = 0;
+ info->var.yoffset = 0;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->var.height = -1;
+ info->var.width = -1;
+
+ info->var.xres = fb_width;
+ info->var.yres = fb_height;
+
+ /* FIXME: we really shouldn't expose mmio space at all */
+ info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
+ info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar);
+
+ info->pixmap.size = 64*1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+
+ switch(fb->depth) {
+ case 8:
+ info->var.red.offset = 0;
+ info->var.green.offset = 0;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8; /* 8bit DAC */
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+ break;
+ case 15:
+ info->var.red.offset = 10;
+ info->var.green.offset = 5;
+ info->var.blue.offset = 0;
+ info->var.red.length = 5;
+ info->var.green.length = 5;
+ info->var.blue.length = 5;
+ info->var.transp.offset = 15;
+ info->var.transp.length = 1;
+ break;
+ case 16:
+ info->var.red.offset = 11;
+ info->var.green.offset = 5;
+ info->var.blue.offset = 0;
+ info->var.red.length = 5;
+ info->var.green.length = 6;
+ info->var.blue.length = 5;
+ info->var.transp.offset = 0;
+ break;
+ case 24:
+ info->var.red.offset = 16;
+ info->var.green.offset = 8;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+ break;
+ case 32:
+ info->var.red.offset = 16;
+ info->var.green.offset = 8;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 24;
+ info->var.transp.length = 8;
+ break;
+ default:
+ break;
+ }
+
+ fb->fbdev = info;
+
+ par->intel_fb = intel_fb;
+ par->dev = dev;
+
+ /* To allow resizeing without swapping buffers */
+ printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
+ intel_fb->base.height, obj_priv->gtt_offset, fbo);
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+
+out_unref:
+ drm_gem_object_unreference(fbo);
+ mutex_unlock(&dev->struct_mutex);
+out:
+ return ret;
+}
+
+static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_framebuffer *fb;
+ struct drm_connector *connector;
+ struct fb_info *info;
+ struct intelfb_par *par;
+ struct drm_mode_set *modeset;
+ unsigned int width, height;
+ int new_fb = 0;
+ int ret, i, conn_count;
+
+ if (!drm_helper_crtc_in_use(crtc))
+ return 0;
+
+ if (!crtc->desired_mode)
+ return 0;
+
+ width = crtc->desired_mode->hdisplay;
+ height = crtc->desired_mode->vdisplay;
+
+ /* is there an fb bound to this crtc already */
+ if (!intel_crtc->mode_set.fb) {
+ ret = intelfb_create(dev, width, height, width, height, &intel_fb);
+ if (ret)
+ return -EINVAL;
+ new_fb = 1;
+ } else {
+ fb = intel_crtc->mode_set.fb;
+ intel_fb = to_intel_framebuffer(fb);
+ if ((intel_fb->base.width < width) || (intel_fb->base.height < height))
+ return -EINVAL;
+ }
+
+ info = intel_fb->base.fbdev;
+ par = info->par;
+
+ modeset = &intel_crtc->mode_set;
+ modeset->fb = &intel_fb->base;
+ conn_count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder)
+ if (connector->encoder->crtc == modeset->crtc) {
+ modeset->connectors[conn_count] = connector;
+ conn_count++;
+ if (conn_count > INTELFB_CONN_LIMIT)
+ BUG();
+ }
+ }
+
+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
+ modeset->connectors[i] = NULL;
+
+ par->crtc_ids[0] = crtc->base.id;
+
+ modeset->num_connectors = conn_count;
+ if (modeset->mode != modeset->crtc->desired_mode)
+ modeset->mode = modeset->crtc->desired_mode;
+
+ par->crtc_count = 1;
+
+ if (new_fb) {
+ info->var.pixclock = -1;
+ if (register_framebuffer(info) < 0)
+ return -EINVAL;
+ } else
+ intelfb_set_par(info);
+
+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ info->fix.id);
+
+ /* Switch back to kernel console on panic */
+ kernelfb_mode = *modeset;
+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
+ printk(KERN_INFO "registered panic notifier\n");
+
+ return 0;
+}
+
+static int intelfb_multi_fb_probe(struct drm_device *dev)
+{
+
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ ret = intelfb_multi_fb_probe_crtc(dev, crtc);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static int intelfb_single_fb_probe(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
+ unsigned int surface_width = 0, surface_height = 0;
+ int new_fb = 0;
+ int crtc_count = 0;
+ int ret, i, conn_count = 0;
+ struct intel_framebuffer *intel_fb;
+ struct fb_info *info;
+ struct intelfb_par *par;
+ struct drm_mode_set *modeset = NULL;
+
+ DRM_DEBUG("\n");
+
+ /* Get a count of crtcs now in use and new min/maxes width/heights */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (!drm_helper_crtc_in_use(crtc))
+ continue;
+
+ crtc_count++;
+ if (!crtc->desired_mode)
+ continue;
+
+ /* Smallest mode determines console size... */
+ if (crtc->desired_mode->hdisplay < fb_width)
+ fb_width = crtc->desired_mode->hdisplay;
+
+ if (crtc->desired_mode->vdisplay < fb_height)
+ fb_height = crtc->desired_mode->vdisplay;
+
+ /* ... but largest for memory allocation dimensions */
+ if (crtc->desired_mode->hdisplay > surface_width)
+ surface_width = crtc->desired_mode->hdisplay;
+
+ if (crtc->desired_mode->vdisplay > surface_height)
+ surface_height = crtc->desired_mode->vdisplay;
+ }
+
+ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+ /* hmm everyone went away - assume VGA cable just fell out
+ and will come back later. */
+ DRM_DEBUG("no CRTCs available?\n");
+ return 0;
+ }
+
+//fail
+ /* Find the fb for our new config */
+ if (list_empty(&dev->mode_config.fb_kernel_list)) {
+ DRM_DEBUG("creating new fb (console size %dx%d, "
+ "buffer size %dx%d)\n", fb_width, fb_height,
+ surface_width, surface_height);
+ ret = intelfb_create(dev, fb_width, fb_height, surface_width,
+ surface_height, &intel_fb);
+ if (ret)
+ return -EINVAL;
+ new_fb = 1;
+ } else {
+ struct drm_framebuffer *fb;
+
+ fb = list_first_entry(&dev->mode_config.fb_kernel_list,
+ struct drm_framebuffer, filp_head);
+ intel_fb = to_intel_framebuffer(fb);
+
+ /* if someone hotplugs something bigger than we have already
+ * allocated, we are pwned. As really we can't resize an
+ * fbdev that is in the wild currently due to fbdev not really
+ * being designed for the lower layers moving stuff around
+ * under it.
+ * - so in the grand style of things - punt.
+ */
+ if ((fb->width < surface_width) ||
+ (fb->height < surface_height)) {
+ DRM_ERROR("fb not large enough for console\n");
+ return -EINVAL;
+ }
+ }
+// fail
+
+ info = intel_fb->base.fbdev;
+ par = info->par;
+
+ crtc_count = 0;
+ /*
+ * For each CRTC, set up the connector list for the CRTC's mode
+ * set configuration.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ modeset = &intel_crtc->mode_set;
+ modeset->fb = &intel_fb->base;
+ conn_count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ if (!connector->encoder)
+ continue;
+
+ if(connector->encoder->crtc == modeset->crtc) {
+ modeset->connectors[conn_count++] = connector;
+ if (conn_count > INTELFB_CONN_LIMIT)
+ BUG();
+ }
+ }
+
+ /* Zero out remaining connector pointers */
+ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
+ modeset->connectors[i] = NULL;
+
+ par->crtc_ids[crtc_count++] = crtc->base.id;
+
+ modeset->num_connectors = conn_count;
+ if (modeset->mode != modeset->crtc->desired_mode)
+ modeset->mode = modeset->crtc->desired_mode;
+ }
+ par->crtc_count = crtc_count;
+
+ if (new_fb) {
+ info->var.pixclock = -1;
+ if (register_framebuffer(info) < 0)
+ return -EINVAL;
+ } else
+ intelfb_set_par(info);
+
+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ info->fix.id);
+
+ /* Switch back to kernel console on panic */
+ kernelfb_mode = *modeset;
+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
+ printk(KERN_INFO "registered panic notifier\n");
+
+ return 0;
+}
+
+/**
+ * intelfb_restore - restore the framebuffer console (kernel) config
+ *
+ * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
+ */
+void intelfb_restore(void)
+{
+ drm_crtc_helper_set_config(&kernelfb_mode);
+}
+
+static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
+{
+ intelfb_restore();
+}
+
+static struct sysrq_key_op sysrq_intelfb_restore_op = {
+ .handler = intelfb_sysrq,
+ .help_msg = "force fb",
+ .action_msg = "force restore of fb console",
+};
+
+int intelfb_probe(struct drm_device *dev)
+{
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ /* something has changed in the lower levels of hell - deal with it
+ here */
+
+ /* two modes : a) 1 fb to rule all crtcs.
+ b) one fb per crtc.
+ two actions 1) new connected device
+ 2) device removed.
+ case a/1 : if the fb surface isn't big enough - resize the surface fb.
+ if the fb size isn't big enough - resize fb into surface.
+ if everything big enough configure the new crtc/etc.
+ case a/2 : undo the configuration
+ possibly resize down the fb to fit the new configuration.
+ case b/1 : see if it is on a new crtc - setup a new fb and add it.
+ case b/2 : teardown the new fb.
+ */
+
+ /* mode a first */
+ /* search for an fb */
+ if (i915_fbpercrtc == 1) {
+ ret = intelfb_multi_fb_probe(dev);
+ } else {
+ ret = intelfb_single_fb_probe(dev);
+ }
+
+ register_sysrq_key('g', &sysrq_intelfb_restore_op);
+
+ return ret;
+}
+EXPORT_SYMBOL(intelfb_probe);
+
+int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+{
+ struct fb_info *info;
+
+ if (!fb)
+ return -EINVAL;
+
+ info = fb->fbdev;
+
+ if (info) {
+ unregister_framebuffer(info);
+ iounmap(info->screen_base);
+ framebuffer_release(info);
+ }
+
+ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
+ memset(&kernelfb_mode, 0, sizeof(struct drm_mode_set));
+ return 0;
+}
+EXPORT_SYMBOL(intelfb_remove);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
new file mode 100644
index 00000000000..a5a2f5339e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * Intel GPIO access functions
+ */
+
+#define I2C_RISEFALL_TIME 20
+
+static int get_clock(void *data)
+{
+ struct intel_i2c_chan *chan = data;
+ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+ u32 val;
+
+ val = I915_READ(chan->reg);
+ return ((val & GPIO_CLOCK_VAL_IN) != 0);
+}
+
+static int get_data(void *data)
+{
+ struct intel_i2c_chan *chan = data;
+ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+ u32 val;
+
+ val = I915_READ(chan->reg);
+ return ((val & GPIO_DATA_VAL_IN) != 0);
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+ u32 reserved = 0, clock_bits;
+
+ /* On most chips, these bits must be preserved in software. */
+ if (!IS_I830(dev) && !IS_845G(dev))
+ reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ if (state_high)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+ I915_WRITE(chan->reg, reserved | clock_bits);
+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct intel_i2c_chan *chan = data;
+ struct drm_device *dev = chan->drm_dev;
+ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+ u32 reserved = 0, data_bits;
+
+ /* On most chips, these bits must be preserved in software. */
+ if (!IS_I830(dev) && !IS_845G(dev))
+ reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ if (state_high)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+ else
+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+ I915_WRITE(chan->reg, reserved | data_bits);
+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+/**
+ * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * @dev: DRM device
+ * @output: driver specific output device
+ * @reg: GPIO reg to use
+ * @name: name for this bus
+ *
+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
+ * in output probing and control (e.g. DDC or SDVO control functions).
+ *
+ * Possible values for @reg include:
+ * %GPIOA
+ * %GPIOB
+ * %GPIOC
+ * %GPIOD
+ * %GPIOE
+ * %GPIOF
+ * %GPIOG
+ * %GPIOH
+ * see PRM for details on how these different busses are used.
+ */
+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name)
+{
+ struct intel_i2c_chan *chan;
+
+ chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
+ if (!chan)
+ goto out_free;
+
+ chan->drm_dev = dev;
+ chan->reg = reg;
+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+ chan->adapter.owner = THIS_MODULE;
+#ifndef I2C_HW_B_INTELFB
+#define I2C_HW_B_INTELFB I2C_HW_B_I810
+#endif
+ chan->adapter.id = I2C_HW_B_INTELFB;
+ chan->adapter.algo_data = &chan->algo;
+ chan->adapter.dev.parent = &dev->pdev->dev;
+ chan->algo.setsda = set_data;
+ chan->algo.setscl = set_clock;
+ chan->algo.getsda = get_data;
+ chan->algo.getscl = get_clock;
+ chan->algo.udelay = 20;
+ chan->algo.timeout = usecs_to_jiffies(2200);
+ chan->algo.data = chan;
+
+ i2c_set_adapdata(&chan->adapter, chan);
+
+ if(i2c_bit_add_bus(&chan->adapter))
+ goto out_free;
+
+ /* JJJ: raise SCL and SDA? */
+ set_data(chan, 1);
+ set_clock(chan, 1);
+ udelay(20);
+
+ return chan;
+
+out_free:
+ kfree(chan);
+ return NULL;
+}
+
+/**
+ * intel_i2c_destroy - unregister and free i2c bus resources
+ * @output: channel to free
+ *
+ * Unregister the adapter from the i2c layer, then free the structure.
+ */
+void intel_i2c_destroy(struct intel_i2c_chan *chan)
+{
+ if (!chan)
+ return;
+
+ i2c_del_adapter(&chan->adapter);
+ kfree(chan);
+}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
new file mode 100644
index 00000000000..ccecfaf6307
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/**
+ * Sets the backlight level.
+ *
+ * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
+ */
+static void intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blc_pwm_ctl;
+
+ blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+}
+
+/**
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void intel_lvds_set_power(struct drm_device *dev, bool on)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp_status;
+
+ if (on) {
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = I915_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+
+ intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
+ } else {
+ intel_lvds_set_backlight(dev, 0);
+
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = I915_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ }
+}
+
+static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+
+ if (mode == DRM_MODE_DPMS_ON)
+ intel_lvds_set_power(dev, true);
+ else
+ intel_lvds_set_power(dev, false);
+
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static void intel_lvds_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS);
+ dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS);
+ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ /*
+ * If the light is off at server startup, just make it full brightness
+ */
+ if (dev_priv->backlight_duty_cycle == 0)
+ dev_priv->backlight_duty_cycle =
+ intel_lvds_get_max_backlight(dev);
+}
+
+static void intel_lvds_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON);
+ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF);
+ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
+ intel_lvds_set_power(dev, true);
+ else
+ intel_lvds_set_power(dev, false);
+}
+
+static int intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ return MODE_OK;
+}
+
+static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct drm_encoder *tmp_encoder;
+
+ /* Should never happen!! */
+ if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
+ printk(KERN_ERR "Can't support LVDS on pipe A\n");
+ return false;
+ }
+
+ /* Should never happen!! */
+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
+ if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
+ printk(KERN_ERR "Can't enable LVDS and another "
+ "encoder on the same pipe\n");
+ return false;
+ }
+ }
+
+ /*
+ * If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (dev_priv->panel_fixed_mode != NULL) {
+ adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
+ adjusted_mode->hsync_start =
+ dev_priv->panel_fixed_mode->hsync_start;
+ adjusted_mode->hsync_end =
+ dev_priv->panel_fixed_mode->hsync_end;
+ adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
+ adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
+ adjusted_mode->vsync_start =
+ dev_priv->panel_fixed_mode->vsync_start;
+ adjusted_mode->vsync_end =
+ dev_priv->panel_fixed_mode->vsync_end;
+ adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
+ adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ }
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return true;
+}
+
+static void intel_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ intel_lvds_set_power(dev, false);
+}
+
+static void intel_lvds_commit( struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->backlight_duty_cycle == 0)
+ dev_priv->backlight_duty_cycle =
+ intel_lvds_get_max_backlight(dev);
+
+ intel_lvds_set_power(dev, true);
+}
+
+static void intel_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 pfit_control;
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+
+ /*
+ * Enable automatic panel scaling so that non-native modes fill the
+ * screen. Should be enabled before the pipe is enabled, according to
+ * register description and PRM.
+ */
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay)
+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ else
+ pfit_control = 0;
+
+ if (!IS_I965G(dev)) {
+ if (dev_priv->panel_wants_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+ }
+ else
+ pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
+ I915_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
+{
+ return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ ret = intel_ddc_get_modes(intel_output);
+
+ if (ret)
+ return ret;
+
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+
+ if (dev_priv->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode;
+
+ mutex_unlock(&dev->mode_config.mutex);
+ mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+static void intel_lvds_destroy(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ if (intel_output->ddc_bus)
+ intel_i2c_destroy(intel_output->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
+ .dpms = intel_lvds_dpms,
+ .mode_fixup = intel_lvds_mode_fixup,
+ .prepare = intel_lvds_prepare,
+ .mode_set = intel_lvds_mode_set,
+ .commit = intel_lvds_commit,
+};
+
+static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
+ .get_modes = intel_lvds_get_modes,
+ .mode_valid = intel_lvds_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+ .save = intel_lvds_save,
+ .restore = intel_lvds_restore,
+ .detect = intel_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = intel_lvds_destroy,
+};
+
+
+static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
+ .destroy = intel_lvds_enc_destroy,
+};
+
+
+
+/**
+ * intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void intel_lvds_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_output *intel_output;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
+ struct drm_crtc *crtc;
+ u32 lvds;
+ int pipe;
+
+ intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+ if (!intel_output) {
+ return;
+ }
+
+ connector = &intel_output->base;
+ encoder = &intel_output->enc;
+ drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+ drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
+ intel_output->type = INTEL_OUTPUT_LVDS;
+
+ drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
+ drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ /* Set up the DDC bus. */
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
+ if (!intel_output->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+ "failed.\n");
+ goto failed;
+ }
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ intel_ddc_get_modes(intel_output);
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ mutex_lock(&dev->mode_config.mutex);
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ dev_priv->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ mutex_unlock(&dev->mode_config.mutex);
+ goto out; /* FIXME: check for quirks */
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+
+ /* Failed to get EDID, what about VBT? */
+ if (dev_priv->vbt_mode) {
+ mutex_lock(&dev->mode_config.mutex);
+ dev_priv->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->vbt_mode);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+ * on. If so, assume that whatever is currently programmed is the
+ * correct mode.
+ */
+ lvds = I915_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+ crtc = intel_get_crtc_from_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+ dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
+ if (dev_priv->panel_fixed_mode) {
+ dev_priv->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!dev_priv->panel_fixed_mode)
+ goto failed;
+
+ /* FIXME: detect aopen & mac mini type stuff automatically? */
+ /*
+ * Blacklist machines with BIOSes that list an LVDS panel without
+ * actually having one.
+ */
+ if (IS_I945GM(dev)) {
+ /* aopen mini pc */
+ if (dev->pdev->subsystem_vendor == 0xa0a0)
+ goto failed;
+
+ if ((dev->pdev->subsystem_vendor == 0x8086) &&
+ (dev->pdev->subsystem_device == 0x7270)) {
+ /* It's a Mac Mini or Macbook Pro.
+ *
+ * Apple hardware is out to get us. The macbook pro
+ * has a real LVDS panel, but the mac mini does not,
+ * and they have the same device IDs. We'll
+ * distinguish by panel size, on the assumption
+ * that Apple isn't about to make any machines with an
+ * 800x600 display.
+ */
+
+ if (dev_priv->panel_fixed_mode != NULL &&
+ dev_priv->panel_fixed_mode->hdisplay == 800 &&
+ dev_priv->panel_fixed_mode->vdisplay == 600) {
+ DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n");
+ goto failed;
+ }
+ }
+ }
+
+
+out:
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed:
+ DRM_DEBUG("No LVDS modes found, disabling.\n");
+ if (intel_output->ddc_bus)
+ intel_i2c_destroy(intel_output->ddc_bus);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
new file mode 100644
index 00000000000..e42019e5d66
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include "drmP.h"
+#include "intel_drv.h"
+
+/**
+ * intel_ddc_probe
+ *
+ */
+bool intel_ddc_probe(struct intel_output *intel_output)
+{
+ u8 out_buf[] = { 0x0, 0x0};
+ u8 buf[2];
+ int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
+ if (ret == 2)
+ return true;
+
+ return false;
+}
+
+/**
+ * intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int intel_ddc_get_modes(struct intel_output *intel_output)
+{
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(&intel_output->base,
+ &intel_output->ddc_bus->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(&intel_output->base,
+ edid);
+ ret = drm_add_edid_modes(&intel_output->base, edid);
+ kfree(edid);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
new file mode 100644
index 00000000000..fbbaa4f414a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -0,0 +1,1128 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_sdvo_regs.h"
+
+#undef SDVO_DEBUG
+
+struct intel_sdvo_priv {
+ struct intel_i2c_chan *i2c_bus;
+ int slaveaddr;
+ int output_device;
+
+ u16 active_outputs;
+
+ struct intel_sdvo_caps caps;
+ int pixel_clock_min, pixel_clock_max;
+
+ int save_sdvo_mult;
+ u16 save_active_outputs;
+ struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
+ struct intel_sdvo_dtd save_output_dtd[16];
+ u32 save_SDVOX;
+};
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ u32 bval = val, cval = val;
+ int i;
+
+ if (sdvo_priv->output_device == SDVOB) {
+ cval = I915_READ(SDVOC);
+ } else {
+ bval = I915_READ(SDVOB);
+ }
+ /*
+ * Write the registers twice for luck. Sometimes,
+ * writing them only once doesn't appear to 'stick'.
+ * The BIOS does this too. Yay, magic
+ */
+ for (i = 0; i < 2; i++)
+ {
+ I915_WRITE(SDVOB, bval);
+ I915_READ(SDVOB);
+ I915_WRITE(SDVOC, cval);
+ I915_READ(SDVOC);
+ }
+}
+
+static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
+ u8 *ch)
+{
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ u8 out_buf[2];
+ u8 buf[2];
+ int ret;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = sdvo_priv->i2c_bus->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = sdvo_priv->i2c_bus->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2)
+ {
+ *ch = buf[0];
+ return true;
+ }
+
+ DRM_DEBUG("i2c transfer returned %d\n", ret);
+ return false;
+}
+
+static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
+ u8 ch)
+{
+ u8 out_buf[2];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = intel_output->i2c_bus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1)
+ {
+ return true;
+ }
+ return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+const static struct _sdvo_cmd_name {
+ u8 cmd;
+ char *name;
+} sdvo_cmd_names[] = {
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+};
+
+#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
+#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
+
+#ifdef SDVO_DEBUG
+static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
+ void *args, int args_len)
+{
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ int i;
+
+ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
+ for (i = 0; i < args_len; i++)
+ printk("%02X ", ((u8 *)args)[i]);
+ for (; i < 8; i++)
+ printk(" ");
+ for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd) {
+ printk("(%s)", sdvo_cmd_names[i].name);
+ break;
+ }
+ }
+ if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
+ printk("(%02X)",cmd);
+ printk("\n");
+}
+#else
+#define intel_sdvo_debug_write(o, c, a, l)
+#endif
+
+static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
+ void *args, int args_len)
+{
+ int i;
+
+ intel_sdvo_debug_write(intel_output, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i,
+ ((u8*)args)[i]);
+ }
+
+ intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
+}
+
+#ifdef SDVO_DEBUG
+static const char *cmd_status_names[] = {
+ "Power on",
+ "Success",
+ "Not supported",
+ "Invalid arg",
+ "Pending",
+ "Target not specified",
+ "Scaling not supported"
+};
+
+static void intel_sdvo_debug_response(struct intel_output *intel_output,
+ void *response, int response_len,
+ u8 status)
+{
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
+ for (i = 0; i < response_len; i++)
+ printk("%02X ", ((u8 *)response)[i]);
+ for (; i < 8; i++)
+ printk(" ");
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ printk("(%s)", cmd_status_names[status]);
+ else
+ printk("(??? %d)", status);
+ printk("\n");
+}
+#else
+#define intel_sdvo_debug_response(o, r, l, s)
+#endif
+
+static u8 intel_sdvo_read_response(struct intel_output *intel_output,
+ void *response, int response_len)
+{
+ int i;
+ u8 status;
+ u8 retry = 50;
+
+ while (retry--) {
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ intel_sdvo_read_byte(intel_output,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]);
+ }
+
+ /* read the return status */
+ intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS,
+ &status);
+
+ intel_sdvo_debug_response(intel_output, response, response_len,
+ status);
+ if (status != SDVO_CMD_STATUS_PENDING)
+ return status;
+
+ mdelay(50);
+ }
+
+ return status;
+}
+
+static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+ if (mode->clock >= 100000)
+ return 1;
+ else if (mode->clock >= 50000)
+ return 2;
+ else
+ return 4;
+}
+
+/**
+ * Don't check status code from this as it switches the bus back to the
+ * SDVO chips which defeats the purpose of doing a bus switch in the first
+ * place.
+ */
+static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
+ u8 target)
+{
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
+}
+
+static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
+{
+ struct intel_sdvo_set_target_input_args targets = {0};
+ u8 status;
+
+ if (target_0 && target_1)
+ return SDVO_CMD_STATUS_NOTSUPP;
+
+ if (target_1)
+ targets.target_1 = 1;
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets,
+ sizeof(targets));
+
+ status = intel_sdvo_read_response(intel_output, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2)
+{
+ struct intel_sdvo_get_trained_inputs_response response;
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
+ status = intel_sdvo_read_response(intel_output, &response, sizeof(response));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ *input_1 = response.input0_trained;
+ *input_2 = response.input1_trained;
+ return true;
+}
+
+static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output,
+ u16 *outputs)
+{
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
+ status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs));
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output,
+ u16 outputs)
+{
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
+ sizeof(outputs));
+ status = intel_sdvo_read_response(intel_output, NULL, 0);
+ return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output,
+ int mode)
+{
+ u8 status, state = SDVO_ENCODER_STATE_ON;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ state = SDVO_ENCODER_STATE_ON;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ state = SDVO_ENCODER_STATE_STANDBY;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ state = SDVO_ENCODER_STATE_SUSPEND;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ state = SDVO_ENCODER_STATE_OFF;
+ break;
+ }
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
+ sizeof(state));
+ status = intel_sdvo_read_response(intel_output, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output,
+ int *clock_min,
+ int *clock_max)
+{
+ struct intel_sdvo_pixel_clock_range clocks;
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ NULL, 0);
+
+ status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks));
+
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ /* Convert the values from units of 10 kHz to kHz. */
+ *clock_min = clocks.min * 10;
+ *clock_max = clocks.max * 10;
+
+ return true;
+}
+
+static bool intel_sdvo_set_target_output(struct intel_output *intel_output,
+ u16 outputs)
+{
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
+ sizeof(outputs));
+
+ status = intel_sdvo_read_response(intel_output, NULL, 0);
+ return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd,
+ struct intel_sdvo_dtd *dtd)
+{
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_output, cmd, NULL, 0);
+ status = intel_sdvo_read_response(intel_output, &dtd->part1,
+ sizeof(dtd->part1));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0);
+ status = intel_sdvo_read_response(intel_output, &dtd->part2,
+ sizeof(dtd->part2));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
+}
+
+static bool intel_sdvo_get_input_timing(struct intel_output *intel_output,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_get_timing(intel_output,
+ SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_get_output_timing(struct intel_output *intel_output,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_get_timing(intel_output,
+ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd,
+ struct intel_sdvo_dtd *dtd)
+{
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1));
+ status = intel_sdvo_read_response(intel_output, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+ status = intel_sdvo_read_response(intel_output, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
+}
+
+static bool intel_sdvo_set_input_timing(struct intel_output *intel_output,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_timing(intel_output,
+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_set_output_timing(struct intel_output *intel_output,
+ struct intel_sdvo_dtd *dtd)
+{
+ return intel_sdvo_set_timing(intel_output,
+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+
+static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
+{
+ u8 response, status;
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
+ status = intel_sdvo_read_response(intel_output, &response, 1);
+
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
+ return SDVO_CLOCK_RATE_MULT_1X;
+ } else {
+ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
+ }
+
+ return response;
+}
+
+static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val)
+{
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+ status = intel_sdvo_read_response(intel_output, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
+}
+
+static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
+ * device will be told of the multiplier during mode_set.
+ */
+ adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ return true;
+}
+
+static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ u16 width, height;
+ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ u16 h_sync_offset, v_sync_offset;
+ u32 sdvox;
+ struct intel_sdvo_dtd output_dtd;
+ int sdvo_pixel_multiply;
+
+ if (!mode)
+ return;
+
+ width = mode->crtc_hdisplay;
+ height = mode->crtc_vdisplay;
+
+ /* do some mode translations */
+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+ output_dtd.part1.clock = mode->clock / 10;
+ output_dtd.part1.h_active = width & 0xff;
+ output_dtd.part1.h_blank = h_blank_len & 0xff;
+ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
+ ((h_blank_len >> 8) & 0xf);
+ output_dtd.part1.v_active = height & 0xff;
+ output_dtd.part1.v_blank = v_blank_len & 0xff;
+ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
+ ((v_blank_len >> 8) & 0xf);
+
+ output_dtd.part2.h_sync_off = h_sync_offset;
+ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
+ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+ (v_sync_len & 0xf);
+ output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+ ((v_sync_len & 0x30) >> 4);
+
+ output_dtd.part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ output_dtd.part2.dtd_flags |= 0x2;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ output_dtd.part2.dtd_flags |= 0x4;
+
+ output_dtd.part2.sdvo_flags = 0;
+ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
+ output_dtd.part2.reserved = 0;
+
+ /* Set the output timing to the screen */
+ intel_sdvo_set_target_output(intel_output, sdvo_priv->active_outputs);
+ intel_sdvo_set_output_timing(intel_output, &output_dtd);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ intel_sdvo_set_target_input(intel_output, true, false);
+
+ /* We would like to use i830_sdvo_create_preferred_input_timing() to
+ * provide the device with a timing it can support, if it supports that
+ * feature. However, presumably we would need to adjust the CRTC to
+ * output the preferred timing, and we don't support that currently.
+ */
+ intel_sdvo_set_input_timing(intel_output, &output_dtd);
+
+ switch (intel_sdvo_get_pixel_multiplier(mode)) {
+ case 1:
+ intel_sdvo_set_clock_rate_mult(intel_output,
+ SDVO_CLOCK_RATE_MULT_1X);
+ break;
+ case 2:
+ intel_sdvo_set_clock_rate_mult(intel_output,
+ SDVO_CLOCK_RATE_MULT_2X);
+ break;
+ case 4:
+ intel_sdvo_set_clock_rate_mult(intel_output,
+ SDVO_CLOCK_RATE_MULT_4X);
+ break;
+ }
+
+ /* Set the SDVO control regs. */
+ if (0/*IS_I965GM(dev)*/) {
+ sdvox = SDVO_BORDER_ENABLE;
+ } else {
+ sdvox = I915_READ(sdvo_priv->output_device);
+ switch (sdvo_priv->output_device) {
+ case SDVOB:
+ sdvox &= SDVOB_PRESERVE_MASK;
+ break;
+ case SDVOC:
+ sdvox &= SDVOC_PRESERVE_MASK;
+ break;
+ }
+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+ }
+ if (intel_crtc->pipe == 1)
+ sdvox |= SDVO_PIPE_B_SELECT;
+
+ sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
+ if (IS_I965G(dev)) {
+ /* done in crtc_mode_set as the dpll_md reg must be written
+ early */
+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+ /* done in crtc_mode_set as it lives inside the
+ dpll register */
+ } else {
+ sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ }
+
+ intel_sdvo_write_sdvox(intel_output, sdvox);
+}
+
+static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ u32 temp;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ intel_sdvo_set_active_outputs(intel_output, 0);
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_output, mode);
+
+ if (mode == DRM_MODE_DPMS_OFF) {
+ temp = I915_READ(sdvo_priv->output_device);
+ if ((temp & SDVO_ENABLE) != 0) {
+ intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE);
+ }
+ }
+ } else {
+ bool input1, input2;
+ int i;
+ u8 status;
+
+ temp = I915_READ(sdvo_priv->output_device);
+ if ((temp & SDVO_ENABLE) == 0)
+ intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE);
+ for (i = 0; i < 2; i++)
+ intel_wait_for_vblank(dev);
+
+ status = intel_sdvo_get_trained_inputs(intel_output, &input1,
+ &input2);
+
+
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG("First %s output reported failure to sync\n",
+ SDVO_NAME(sdvo_priv));
+ }
+
+ if (0)
+ intel_sdvo_set_encoder_power_state(intel_output, mode);
+ intel_sdvo_set_active_outputs(intel_output, sdvo_priv->active_outputs);
+ }
+ return;
+}
+
+static void intel_sdvo_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ int o;
+
+ sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output);
+ intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs);
+
+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
+ intel_sdvo_set_target_input(intel_output, true, false);
+ intel_sdvo_get_input_timing(intel_output,
+ &sdvo_priv->save_input_dtd_1);
+ }
+
+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
+ intel_sdvo_set_target_input(intel_output, false, true);
+ intel_sdvo_get_input_timing(intel_output,
+ &sdvo_priv->save_input_dtd_2);
+ }
+
+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
+ {
+ u16 this_output = (1 << o);
+ if (sdvo_priv->caps.output_flags & this_output)
+ {
+ intel_sdvo_set_target_output(intel_output, this_output);
+ intel_sdvo_get_output_timing(intel_output,
+ &sdvo_priv->save_output_dtd[o]);
+ }
+ }
+
+ sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
+}
+
+static void intel_sdvo_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ int o;
+ int i;
+ bool input1, input2;
+ u8 status;
+
+ intel_sdvo_set_active_outputs(intel_output, 0);
+
+ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
+ {
+ u16 this_output = (1 << o);
+ if (sdvo_priv->caps.output_flags & this_output) {
+ intel_sdvo_set_target_output(intel_output, this_output);
+ intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]);
+ }
+ }
+
+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
+ intel_sdvo_set_target_input(intel_output, true, false);
+ intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1);
+ }
+
+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
+ intel_sdvo_set_target_input(intel_output, false, true);
+ intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2);
+ }
+
+ intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult);
+
+ I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
+
+ if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
+ {
+ for (i = 0; i < 2; i++)
+ intel_wait_for_vblank(dev);
+ status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2);
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
+ DRM_DEBUG("First %s output reported failure to sync\n",
+ SDVO_NAME(sdvo_priv));
+ }
+
+ intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs);
+}
+
+static int intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (sdvo_priv->pixel_clock_min > mode->clock)
+ return MODE_CLOCK_LOW;
+
+ if (sdvo_priv->pixel_clock_max < mode->clock)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps)
+{
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
+ status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
+}
+
+struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
+{
+ struct drm_connector *connector = NULL;
+ struct intel_output *iout = NULL;
+ struct intel_sdvo_priv *sdvo;
+
+ /* find the sdvo connector */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ iout = to_intel_output(connector);
+
+ if (iout->type != INTEL_OUTPUT_SDVO)
+ continue;
+
+ sdvo = iout->dev_priv;
+
+ if (sdvo->output_device == SDVOB && sdvoB)
+ return connector;
+
+ if (sdvo->output_device == SDVOC && !sdvoB)
+ return connector;
+
+ }
+
+ return NULL;
+}
+
+int intel_sdvo_supports_hotplug(struct drm_connector *connector)
+{
+ u8 response[2];
+ u8 status;
+ struct intel_output *intel_output;
+ DRM_DEBUG("\n");
+
+ if (!connector)
+ return 0;
+
+ intel_output = to_intel_output(connector);
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_output, &response, 2);
+
+ if (response[0] !=0)
+ return 1;
+
+ return 0;
+}
+
+void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+{
+ u8 response[2];
+ u8 status;
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_output, &response, 2);
+
+ if (on) {
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_output, &response, 2);
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ } else {
+ response[0] = 0;
+ response[1] = 0;
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ }
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_output, &response, 2);
+}
+
+static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
+{
+ u8 response[2];
+ u8 status;
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+ status = intel_sdvo_read_response(intel_output, &response, 2);
+
+ DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
+ if ((response[0] != 0) || (response[1] != 0))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static int intel_sdvo_get_modes(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ /* set the bus switch and get the modes */
+ intel_sdvo_set_control_bus_switch(intel_output, SDVO_CONTROL_BUS_DDC2);
+ intel_ddc_get_modes(intel_output);
+
+ if (list_empty(&connector->probed_modes))
+ return 0;
+ return 1;
+}
+
+static void intel_sdvo_destroy(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ if (intel_output->i2c_bus)
+ intel_i2c_destroy(intel_output->i2c_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(intel_output);
+}
+
+static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
+ .dpms = intel_sdvo_dpms,
+ .mode_fixup = intel_sdvo_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_sdvo_mode_set,
+ .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+ .save = intel_sdvo_save,
+ .restore = intel_sdvo_restore,
+ .detect = intel_sdvo_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
+ .get_modes = intel_sdvo_get_modes,
+ .mode_valid = intel_sdvo_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
+ .destroy = intel_sdvo_enc_destroy,
+};
+
+
+void intel_sdvo_init(struct drm_device *dev, int output_device)
+{
+ struct drm_connector *connector;
+ struct intel_output *intel_output;
+ struct intel_sdvo_priv *sdvo_priv;
+ struct intel_i2c_chan *i2cbus = NULL;
+ int connector_type;
+ u8 ch[0x40];
+ int i;
+ int encoder_type, output_id;
+
+ intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
+ if (!intel_output) {
+ return;
+ }
+
+ connector = &intel_output->base;
+
+ drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+ sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
+ intel_output->type = INTEL_OUTPUT_SDVO;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ /* setup the DDC bus. */
+ if (output_device == SDVOB)
+ i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+ else
+ i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+
+ if (!i2cbus)
+ goto err_connector;
+
+ sdvo_priv->i2c_bus = i2cbus;
+
+ if (output_device == SDVOB) {
+ output_id = 1;
+ sdvo_priv->i2c_bus->slave_addr = 0x38;
+ } else {
+ output_id = 2;
+ sdvo_priv->i2c_bus->slave_addr = 0x39;
+ }
+
+ sdvo_priv->output_device = output_device;
+ intel_output->i2c_bus = i2cbus;
+ intel_output->dev_priv = sdvo_priv;
+
+
+ /* Read the regs to test if we can talk to the device */
+ for (i = 0; i < 0x40; i++) {
+ if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
+ DRM_DEBUG("No SDVO device found on SDVO%c\n",
+ output_device == SDVOB ? 'B' : 'C');
+ goto err_i2c;
+ }
+ }
+
+ intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
+
+ memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs));
+
+ /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
+ {
+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ encoder_type = DRM_MODE_ENCODER_DAC;
+ connector_type = DRM_MODE_CONNECTOR_VGA;
+ }
+ else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
+ {
+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ encoder_type = DRM_MODE_ENCODER_DAC;
+ connector_type = DRM_MODE_CONNECTOR_VGA;
+ }
+ else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
+ {
+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+ else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1)
+ {
+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+ else
+ {
+ unsigned char bytes[2];
+
+ memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
+ DRM_DEBUG("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
+ SDVO_NAME(sdvo_priv),
+ bytes[0], bytes[1]);
+ goto err_i2c;
+ }
+
+ drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type);
+ drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
+ connector->connector_type = connector_type;
+
+ drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
+ drm_sysfs_connector_add(connector);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ intel_sdvo_set_target_input(intel_output, true, false);
+
+ intel_sdvo_get_input_pixel_clock_range(intel_output,
+ &sdvo_priv->pixel_clock_min,
+ &sdvo_priv->pixel_clock_max);
+
+
+ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+ "input 1: %c, input 2: %c, "
+ "output 1: %c, output 2: %c\n",
+ SDVO_NAME(sdvo_priv),
+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
+ sdvo_priv->caps.device_rev_id,
+ sdvo_priv->pixel_clock_min / 1000,
+ sdvo_priv->pixel_clock_max / 1000,
+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ /* check currently supported outputs */
+ sdvo_priv->caps.output_flags &
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+ sdvo_priv->caps.output_flags &
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+
+ intel_output->ddc_bus = i2cbus;
+
+ return;
+
+err_i2c:
+ intel_i2c_destroy(intel_output->i2c_bus);
+err_connector:
+ drm_connector_cleanup(connector);
+ kfree(intel_output);
+
+ return;
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
new file mode 100644
index 00000000000..861a43f8693
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST (0)
+#define SDVO_OUTPUT_TMDS0 (1 << 0)
+#define SDVO_OUTPUT_RGB0 (1 << 1)
+#define SDVO_OUTPUT_CVBS0 (1 << 2)
+#define SDVO_OUTPUT_SVID0 (1 << 3)
+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
+#define SDVO_OUTPUT_SCART0 (1 << 5)
+#define SDVO_OUTPUT_LVDS0 (1 << 6)
+#define SDVO_OUTPUT_TMDS1 (1 << 8)
+#define SDVO_OUTPUT_RGB1 (1 << 9)
+#define SDVO_OUTPUT_CVBS1 (1 << 10)
+#define SDVO_OUTPUT_SVID1 (1 << 11)
+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
+#define SDVO_OUTPUT_SCART1 (1 << 13)
+#define SDVO_OUTPUT_LVDS1 (1 << 14)
+#define SDVO_OUTPUT_LAST (14)
+
+struct intel_sdvo_caps {
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
+} __attribute__((packed));
+
+/** This matches the EDID DTD structure, more or less */
+struct intel_sdvo_dtd {
+ struct {
+ u16 clock; /**< pixel clock, in 10kHz units */
+ u8 h_active; /**< lower 8 bits (pixels) */
+ u8 h_blank; /**< lower 8 bits (pixels) */
+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
+ u8 v_active; /**< lower 8 bits (lines) */
+ u8 v_blank; /**< lower 8 bits (lines) */
+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
+ u8 h_sync_width; /**< lower 8 bits (pixels) */
+ /** lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /**
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /** bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
+} __attribute__((packed));
+
+struct intel_sdvo_pixel_clock_range {
+ u16 min; /**< pixel clock, in 10kHz units */
+ u16 max; /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct intel_sdvo_preferred_input_timing_args {
+ u16 clock;
+ u16 width;
+ u16 height;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0 0x07
+#define SDVO_I2C_ARG_1 0x06
+#define SDVO_I2C_ARG_2 0x05
+#define SDVO_I2C_ARG_3 0x04
+#define SDVO_I2C_ARG_4 0x03
+#define SDVO_I2C_ARG_5 0x02
+#define SDVO_I2C_ARG_6 0x01
+#define SDVO_I2C_ARG_7 0x00
+#define SDVO_I2C_OPCODE 0x08
+#define SDVO_I2C_CMD_STATUS 0x09
+#define SDVO_I2C_RETURN_0 0x0a
+#define SDVO_I2C_RETURN_1 0x0b
+#define SDVO_I2C_RETURN_2 0x0c
+#define SDVO_I2C_RETURN_3 0x0d
+#define SDVO_I2C_RETURN_4 0x0e
+#define SDVO_I2C_RETURN_5 0x0f
+#define SDVO_I2C_RETURN_6 0x10
+#define SDVO_I2C_RETURN_7 0x11
+#define SDVO_I2C_VENDOR_BEGIN 0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON 0x0
+#define SDVO_CMD_STATUS_SUCCESS 0x1
+#define SDVO_CMD_STATUS_NOTSUPP 0x2
+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
+#define SDVO_CMD_STATUS_PENDING 0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
+struct intel_sdvo_get_trained_inputs_response {
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT 0x10
+struct intel_sdvo_set_target_input_args {
+ unsigned int target_1:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targetted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
+
+#define SDVO_CMD_GET_TV_FORMAT 0x28
+
+#define SDVO_CMD_SET_TV_FORMAT 0x29
+
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
+# define SDVO_ENCODER_STATE_ON (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
+# define SDVO_ENCODER_STATE_OFF (1 << 3)
+
+#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
+# define SDVO_CONTROL_BUS_PROM 0x0
+# define SDVO_CONTROL_BUS_DDC1 0x1
+# define SDVO_CONTROL_BUS_DDC2 0x2
+# define SDVO_CONTROL_BUS_DDC3 0x3
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
new file mode 100644
index 00000000000..fbb35dc56f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -0,0 +1,1725 @@
+/*
+ * Copyright © 2006-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file
+ * Integrated TV-out support for the 915GM and 945GM.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+enum tv_margin {
+ TV_MARGIN_LEFT, TV_MARGIN_TOP,
+ TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
+};
+
+/** Private structure for the integrated TV support */
+struct intel_tv_priv {
+ int type;
+ char *tv_format;
+ int margin[4];
+ u32 save_TV_H_CTL_1;
+ u32 save_TV_H_CTL_2;
+ u32 save_TV_H_CTL_3;
+ u32 save_TV_V_CTL_1;
+ u32 save_TV_V_CTL_2;
+ u32 save_TV_V_CTL_3;
+ u32 save_TV_V_CTL_4;
+ u32 save_TV_V_CTL_5;
+ u32 save_TV_V_CTL_6;
+ u32 save_TV_V_CTL_7;
+ u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3;
+
+ u32 save_TV_CSC_Y;
+ u32 save_TV_CSC_Y2;
+ u32 save_TV_CSC_U;
+ u32 save_TV_CSC_U2;
+ u32 save_TV_CSC_V;
+ u32 save_TV_CSC_V2;
+ u32 save_TV_CLR_KNOBS;
+ u32 save_TV_CLR_LEVEL;
+ u32 save_TV_WIN_POS;
+ u32 save_TV_WIN_SIZE;
+ u32 save_TV_FILTER_CTL_1;
+ u32 save_TV_FILTER_CTL_2;
+ u32 save_TV_FILTER_CTL_3;
+
+ u32 save_TV_H_LUMA[60];
+ u32 save_TV_H_CHROMA[60];
+ u32 save_TV_V_LUMA[43];
+ u32 save_TV_V_CHROMA[43];
+
+ u32 save_TV_DAC;
+ u32 save_TV_CTL;
+};
+
+struct video_levels {
+ int blank, black, burst;
+};
+
+struct color_conversion {
+ u16 ry, gy, by, ay;
+ u16 ru, gu, bu, au;
+ u16 rv, gv, bv, av;
+};
+
+static const u32 filter_table[] = {
+ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+ 0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+ 0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+ 0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+ 0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+ 0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+ 0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+ 0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+ 0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+ 0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+ 0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+ 0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+ 0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+ 0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+ 0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+ 0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+ 0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0,
+ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+ 0x28003100, 0x28002F00, 0x00003100, 0x36403000,
+ 0x2D002CC0, 0x30003640, 0x2D0036C0,
+ 0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+ 0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+ 0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+ 0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+ 0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+ 0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+ 0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+ 0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+ 0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+ 0x28003100, 0x28002F00, 0x00003100,
+};
+
+/*
+ * Color conversion values have 3 separate fixed point formats:
+ *
+ * 10 bit fields (ay, au)
+ * 1.9 fixed point (b.bbbbbbbbb)
+ * 11 bit fields (ry, by, ru, gu, gv)
+ * exp.mantissa (ee.mmmmmmmmm)
+ * ee = 00 = 10^-1 (0.mmmmmmmmm)
+ * ee = 01 = 10^-2 (0.0mmmmmmmmm)
+ * ee = 10 = 10^-3 (0.00mmmmmmmmm)
+ * ee = 11 = 10^-4 (0.000mmmmmmmmm)
+ * 12 bit fields (gy, rv, bu)
+ * exp.mantissa (eee.mmmmmmmmm)
+ * eee = 000 = 10^-1 (0.mmmmmmmmm)
+ * eee = 001 = 10^-2 (0.0mmmmmmmmm)
+ * eee = 010 = 10^-3 (0.00mmmmmmmmm)
+ * eee = 011 = 10^-4 (0.000mmmmmmmmm)
+ * eee = 100 = reserved
+ * eee = 101 = reserved
+ * eee = 110 = reserved
+ * eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation)
+ *
+ * Saturation and contrast are 8 bits, with their own representation:
+ * 8 bit field (saturation, contrast)
+ * exp.mantissa (ee.mmmmmm)
+ * ee = 00 = 10^-1 (0.mmmmmm)
+ * ee = 01 = 10^0 (m.mmmmm)
+ * ee = 10 = 10^1 (mm.mmmm)
+ * ee = 11 = 10^2 (mmm.mmm)
+ *
+ * Simple conversion function:
+ *
+ * static u32
+ * float_to_csc_11(float f)
+ * {
+ * u32 exp;
+ * u32 mant;
+ * u32 ret;
+ *
+ * if (f < 0)
+ * f = -f;
+ *
+ * if (f >= 1) {
+ * exp = 0x7;
+ * mant = 1 << 8;
+ * } else {
+ * for (exp = 0; exp < 3 && f < 0.5; exp++)
+ * f *= 2.0;
+ * mant = (f * (1 << 9) + 0.5);
+ * if (mant >= (1 << 9))
+ * mant = (1 << 9) - 1;
+ * }
+ * ret = (exp << 9) | mant;
+ * return ret;
+ * }
+ */
+
+/*
+ * Behold, magic numbers! If we plant them they might grow a big
+ * s-video cable to the sky... or something.
+ *
+ * Pre-converted to appropriate hex value.
+ */
+
+/*
+ * PAL & NTSC values for composite & s-video connections
+ */
+static const struct color_conversion ntsc_m_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+};
+
+static const struct video_levels ntsc_m_levels_composite = {
+ .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion ntsc_m_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+};
+
+static const struct video_levels ntsc_m_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion ntsc_j_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
+ .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00,
+ .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00,
+};
+
+static const struct video_levels ntsc_j_levels_composite = {
+ .blank = 225, .black = 225, .burst = 113,
+};
+
+static const struct color_conversion ntsc_j_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
+ .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00,
+ .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00,
+};
+
+static const struct video_levels ntsc_j_levels_svideo = {
+ .blank = 266, .black = 266, .burst = 133,
+};
+
+static const struct color_conversion pal_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
+ .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00,
+ .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00,
+};
+
+static const struct video_levels pal_levels_composite = {
+ .blank = 237, .black = 237, .burst = 118,
+};
+
+static const struct color_conversion pal_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+ .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00,
+ .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00,
+};
+
+static const struct video_levels pal_levels_svideo = {
+ .blank = 280, .black = 280, .burst = 139,
+};
+
+static const struct color_conversion pal_m_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+};
+
+static const struct video_levels pal_m_levels_composite = {
+ .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion pal_m_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+};
+
+static const struct video_levels pal_m_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion pal_n_csc_composite = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+ .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
+ .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+};
+
+static const struct video_levels pal_n_levels_composite = {
+ .blank = 225, .black = 267, .burst = 118,
+};
+
+static const struct color_conversion pal_n_csc_svideo = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
+ .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
+ .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+};
+
+static const struct video_levels pal_n_levels_svideo = {
+ .blank = 266, .black = 316, .burst = 139,
+};
+
+/*
+ * Component connections
+ */
+static const struct color_conversion sdtv_csc_yprpb = {
+ .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146,
+ .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00,
+ .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00,
+};
+
+static const struct color_conversion sdtv_csc_rgb = {
+ .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+ .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+ .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct color_conversion hdtv_csc_yprpb = {
+ .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146,
+ .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00,
+ .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00,
+};
+
+static const struct color_conversion hdtv_csc_rgb = {
+ .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+ .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+ .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct video_levels component_levels = {
+ .blank = 279, .black = 279, .burst = 0,
+};
+
+
+struct tv_mode {
+ char *name;
+ int clock;
+ int refresh; /* in millihertz (for precision) */
+ u32 oversample;
+ int hsync_end, hblank_start, hblank_end, htotal;
+ bool progressive, trilevel_sync, component_only;
+ int vsync_start_f1, vsync_start_f2, vsync_len;
+ bool veq_ena;
+ int veq_start_f1, veq_start_f2, veq_len;
+ int vi_end_f1, vi_end_f2, nbr_end;
+ bool burst_ena;
+ int hburst_start, hburst_len;
+ int vburst_start_f1, vburst_end_f1;
+ int vburst_start_f2, vburst_end_f2;
+ int vburst_start_f3, vburst_end_f3;
+ int vburst_start_f4, vburst_end_f4;
+ /*
+ * subcarrier programming
+ */
+ int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+ u32 sc_reset;
+ bool pal_burst;
+ /*
+ * blank/black levels
+ */
+ const struct video_levels *composite_levels, *svideo_levels;
+ const struct color_conversion *composite_color, *svideo_color;
+ const u32 *filter_table;
+ int max_srcw;
+};
+
+
+/*
+ * Sub carrier DDA
+ *
+ * I think this works as follows:
+ *
+ * subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096
+ *
+ * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value
+ *
+ * So,
+ * dda1_ideal = subcarrier/pixel * 4096
+ * dda1_inc = floor (dda1_ideal)
+ * dda2 = dda1_ideal - dda1_inc
+ *
+ * then pick a ratio for dda2 that gives the closest approximation. If
+ * you can't get close enough, you can play with dda3 as well. This
+ * seems likely to happen when dda2 is small as the jumps would be larger
+ *
+ * To invert this,
+ *
+ * pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size)
+ *
+ * The constants below were all computed using a 107.520MHz clock
+ */
+
+/**
+ * Register programming values for TV modes.
+ *
+ * These values account for -1s required.
+ */
+
+const static struct tv_mode tv_modes[] = {
+ {
+ .name = "NTSC-M",
+ .clock = 107520,
+ .refresh = 29970,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 136,
+ .dda2_inc = 7624, .dda2_size = 20013,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_4,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_m_levels_composite,
+ .composite_color = &ntsc_m_csc_composite,
+ .svideo_levels = &ntsc_m_levels_svideo,
+ .svideo_color = &ntsc_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "NTSC-443",
+ .clock = 107520,
+ .refresh = 29970,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = 8,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 168,
+ .dda2_inc = 18557, .dda2_size = 20625,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &ntsc_m_levels_composite,
+ .composite_color = &ntsc_m_csc_composite,
+ .svideo_levels = &ntsc_m_levels_svideo,
+ .svideo_color = &ntsc_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "NTSC-J",
+ .clock = 107520,
+ .refresh = 29970,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 136,
+ .dda2_inc = 7624, .dda2_size = 20013,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_4,
+ .pal_burst = false,
+
+ .composite_levels = &ntsc_j_levels_composite,
+ .composite_color = &ntsc_j_csc_composite,
+ .svideo_levels = &ntsc_j_levels_svideo,
+ .svideo_color = &ntsc_j_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "PAL-M",
+ .clock = 107520,
+ .refresh = 29970,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+ .hsync_end = 64, .hblank_end = 124,
+ .hblank_start = 836, .htotal = 857,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 20, .vi_end_f2 = 21,
+ .nbr_end = 240,
+
+ .burst_ena = true,
+ .hburst_start = 72, .hburst_len = 34,
+ .vburst_start_f1 = 9, .vburst_end_f1 = 240,
+ .vburst_start_f2 = 10, .vburst_end_f2 = 240,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 240,
+ .vburst_start_f4 = 10, .vburst_end_f4 = 240,
+
+ /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+ .dda1_inc = 136,
+ .dda2_inc = 7624, .dda2_size = 20013,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_4,
+ .pal_burst = false,
+
+ .composite_levels = &pal_m_levels_composite,
+ .composite_color = &pal_m_csc_composite,
+ .svideo_levels = &pal_m_levels_svideo,
+ .svideo_color = &pal_m_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL-N",
+ .clock = 107520,
+ .refresh = 25000,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ .hsync_end = 64, .hblank_end = 128,
+ .hblank_start = 844, .htotal = 863,
+
+ .progressive = false, .trilevel_sync = false,
+
+
+ .vsync_start_f1 = 6, .vsync_start_f2 = 7,
+ .vsync_len = 6,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 18,
+
+ .vi_end_f1 = 24, .vi_end_f2 = 25,
+ .nbr_end = 286,
+
+ .burst_ena = true,
+ .hburst_start = 73, .hburst_len = 34,
+ .vburst_start_f1 = 8, .vburst_end_f1 = 285,
+ .vburst_start_f2 = 8, .vburst_end_f2 = 286,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 286,
+ .vburst_start_f4 = 9, .vburst_end_f4 = 285,
+
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 168,
+ .dda2_inc = 18557, .dda2_size = 20625,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_n_levels_composite,
+ .composite_color = &pal_n_csc_composite,
+ .svideo_levels = &pal_n_levels_svideo,
+ .svideo_color = &pal_n_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL",
+ .clock = 107520,
+ .refresh = 25000,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+ .hsync_end = 64, .hblank_end = 128,
+ .hblank_start = 844, .htotal = 863,
+
+ .progressive = false, .trilevel_sync = false,
+
+ .vsync_start_f1 = 5, .vsync_start_f2 = 6,
+ .vsync_len = 5,
+
+ .veq_ena = true, .veq_start_f1 = 0,
+ .veq_start_f2 = 1, .veq_len = 15,
+
+ .vi_end_f1 = 24, .vi_end_f2 = 25,
+ .nbr_end = 286,
+
+ .burst_ena = true,
+ .hburst_start = 73, .hburst_len = 32,
+ .vburst_start_f1 = 8, .vburst_end_f1 = 285,
+ .vburst_start_f2 = 8, .vburst_end_f2 = 286,
+ .vburst_start_f3 = 9, .vburst_end_f3 = 286,
+ .vburst_start_f4 = 9, .vburst_end_f4 = 285,
+
+ /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+ .dda1_inc = 168,
+ .dda2_inc = 18557, .dda2_size = 20625,
+ .dda3_inc = 0, .dda3_size = 0,
+ .sc_reset = TV_SC_RESET_EVERY_8,
+ .pal_burst = true,
+
+ .composite_levels = &pal_levels_composite,
+ .composite_color = &pal_csc_composite,
+ .svideo_levels = &pal_levels_svideo,
+ .svideo_color = &pal_csc_svideo,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "480p@59.94Hz",
+ .clock = 107520,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 857,
+
+ .progressive = true,.trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 496,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "480p@60Hz",
+ .clock = 107520,
+ .refresh = 60000,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 856,
+
+ .progressive = true,.trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 496,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "576p",
+ .clock = 107520,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 139,
+ .hblank_start = 859, .htotal = 863,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 48, .vi_end_f2 = 48,
+ .nbr_end = 575,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@60Hz",
+ .clock = 148800,
+ .refresh = 60000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1649,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@59.94Hz",
+ .clock = 148800,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1651,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "720p@50Hz",
+ .clock = 148800,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 80, .hblank_end = 300,
+ .hblank_start = 1580, .htotal = 1979,
+
+ .progressive = true, .trilevel_sync = true,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 29, .vi_end_f2 = 29,
+ .nbr_end = 719,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ .max_srcw = 800
+ },
+ {
+ .name = "1080i@50Hz",
+ .clock = 148800,
+ .refresh = 25000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2639,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "1080i@60Hz",
+ .clock = 148800,
+ .refresh = 30000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2199,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "1080i@59.94Hz",
+ .clock = 148800,
+ .refresh = 29970,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+ .hsync_end = 88, .hblank_end = 235,
+ .hblank_start = 2155, .htotal = 2200,
+
+ .progressive = false, .trilevel_sync = true,
+
+ .vsync_start_f1 = 4, .vsync_start_f2 = 5,
+ .vsync_len = 10,
+
+ .veq_ena = true, .veq_start_f1 = 4,
+ .veq_start_f2 = 4, .veq_len = 10,
+
+
+ .vi_end_f1 = 21, .vi_end_f2 = 22,
+ .nbr_end = 539,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+};
+
+#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
+
+static void
+intel_tv_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ switch(mode) {
+ case DRM_MODE_DPMS_ON:
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
+ break;
+ }
+}
+
+static void
+intel_tv_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+ int i;
+
+ tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
+ tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
+ tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
+ tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
+ tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
+ tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
+ tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
+ tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
+ tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
+ tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
+ tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
+ tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
+ tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
+
+ tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
+ tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
+ tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
+ tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
+ tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
+ tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
+ tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
+ tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
+ tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
+ tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
+ tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
+ tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
+ tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
+
+ for (i = 0; i < 60; i++)
+ tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
+ for (i = 0; i < 60; i++)
+ tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
+ for (i = 0; i < 43; i++)
+ tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
+ for (i = 0; i < 43; i++)
+ tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
+
+ tv_priv->save_TV_DAC = I915_READ(TV_DAC);
+ tv_priv->save_TV_CTL = I915_READ(TV_CTL);
+}
+
+static void
+intel_tv_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ struct intel_crtc *intel_crtc;
+ int i;
+
+ /* FIXME: No CRTC? */
+ if (!crtc)
+ return;
+
+ intel_crtc = to_intel_crtc(crtc);
+ I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
+ I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
+ I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
+ I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
+ I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
+ I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
+ I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
+ I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
+ I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
+ I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
+ I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
+ I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
+ I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
+
+ I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
+ I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
+ I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
+ I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
+ I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
+ I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
+ I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
+ I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
+
+ {
+ int pipeconf_reg = (intel_crtc->pipe == 0) ?
+ PIPEACONF : PIPEBCONF;
+ int dspcntr_reg = (intel_crtc->plane == 0) ?
+ DSPACNTR : DSPBCNTR;
+ int pipeconf = I915_READ(pipeconf_reg);
+ int dspcntr = I915_READ(dspcntr_reg);
+ int dspbase_reg = (intel_crtc->plane == 0) ?
+ DSPAADDR : DSPBADDR;
+ /* Pipe must be off here */
+ I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+
+ if (!IS_I9XX(dev)) {
+ /* Wait for vblank for the disable to take effect */
+ intel_wait_for_vblank(dev);
+ }
+
+ I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
+ /* Wait for vblank for the disable to take effect. */
+ intel_wait_for_vblank(dev);
+
+ /* Filter ctl must be set before TV_WIN_SIZE */
+ I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
+ I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
+ I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
+ I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
+ I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
+ I915_WRITE(pipeconf_reg, pipeconf);
+ I915_WRITE(dspcntr_reg, dspcntr);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ }
+
+ for (i = 0; i < 60; i++)
+ I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
+ for (i = 0; i < 60; i++)
+ I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
+ for (i = 0; i < 43; i++)
+ I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
+ for (i = 0; i < 43; i++)
+ I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
+
+ I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
+ I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
+}
+
+static const struct tv_mode *
+intel_tv_mode_lookup (char *tv_format)
+{
+ int i;
+
+ for (i = 0; i < sizeof(tv_modes) / sizeof (tv_modes[0]); i++) {
+ const struct tv_mode *tv_mode = &tv_modes[i];
+
+ if (!strcmp(tv_format, tv_mode->name))
+ return tv_mode;
+ }
+ return NULL;
+}
+
+static const struct tv_mode *
+intel_tv_mode_find (struct intel_output *intel_output)
+{
+ struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+
+ return intel_tv_mode_lookup(tv_priv->tv_format);
+}
+
+static enum drm_mode_status
+intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+
+ /* Ensure TV refresh is close to desired refresh */
+ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1)
+ return MODE_OK;
+ return MODE_CLOCK_RANGE;
+}
+
+
+static bool
+intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_mode_config *drm_config = &dev->mode_config;
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output);
+ struct drm_encoder *other_encoder;
+
+ if (!tv_mode)
+ return false;
+
+ /* FIXME: lock encoder list */
+ list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
+ if (other_encoder != encoder &&
+ other_encoder->crtc == encoder->crtc)
+ return false;
+ }
+
+ adjusted_mode->clock = tv_mode->clock;
+ return true;
+}
+
+static void
+intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ u32 tv_ctl;
+ u32 hctl1, hctl2, hctl3;
+ u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
+ u32 scctl1, scctl2, scctl3;
+ int i, j;
+ const struct video_levels *video_levels;
+ const struct color_conversion *color_conversion;
+ bool burst_ena;
+
+ if (!tv_mode)
+ return; /* can't happen (mode_prepare prevents this) */
+
+ tv_ctl = 0;
+
+ switch (tv_priv->type) {
+ default:
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_Composite:
+ tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
+ video_levels = tv_mode->composite_levels;
+ color_conversion = tv_mode->composite_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ case DRM_MODE_CONNECTOR_Component:
+ tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
+ video_levels = &component_levels;
+ if (tv_mode->burst_ena)
+ color_conversion = &sdtv_csc_yprpb;
+ else
+ color_conversion = &hdtv_csc_yprpb;
+ burst_ena = false;
+ break;
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
+ video_levels = tv_mode->svideo_levels;
+ color_conversion = tv_mode->svideo_color;
+ burst_ena = tv_mode->burst_ena;
+ break;
+ }
+ hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
+ (tv_mode->htotal << TV_HTOTAL_SHIFT);
+
+ hctl2 = (tv_mode->hburst_start << 16) |
+ (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
+
+ if (burst_ena)
+ hctl2 |= TV_BURST_ENA;
+
+ hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
+ (tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
+
+ vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
+ (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
+ (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
+
+ vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
+ (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
+ (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
+
+ vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
+ (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
+ (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
+
+ if (tv_mode->veq_ena)
+ vctl3 |= TV_EQUAL_ENA;
+
+ vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
+ (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
+
+ vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
+ (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
+
+ vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
+ (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
+
+ vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
+ (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
+
+ if (intel_crtc->pipe == 1)
+ tv_ctl |= TV_ENC_PIPEB_SELECT;
+ tv_ctl |= tv_mode->oversample;
+
+ if (tv_mode->progressive)
+ tv_ctl |= TV_PROGRESSIVE;
+ if (tv_mode->trilevel_sync)
+ tv_ctl |= TV_TRILEVEL_SYNC;
+ if (tv_mode->pal_burst)
+ tv_ctl |= TV_PAL_BURST;
+ scctl1 = 0;
+ /* dda1 implies valid video levels */
+ if (tv_mode->dda1_inc) {
+ scctl1 |= TV_SC_DDA1_EN;
+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+ }
+
+ if (tv_mode->dda2_inc)
+ scctl1 |= TV_SC_DDA2_EN;
+
+ if (tv_mode->dda3_inc)
+ scctl1 |= TV_SC_DDA3_EN;
+
+ scctl1 |= tv_mode->sc_reset;
+ scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
+
+ scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
+ tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT;
+
+ scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT |
+ tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
+
+ /* Enable two fixes for the chips that need them. */
+ if (dev->pci_device < 0x2772)
+ tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
+
+ I915_WRITE(TV_H_CTL_1, hctl1);
+ I915_WRITE(TV_H_CTL_2, hctl2);
+ I915_WRITE(TV_H_CTL_3, hctl3);
+ I915_WRITE(TV_V_CTL_1, vctl1);
+ I915_WRITE(TV_V_CTL_2, vctl2);
+ I915_WRITE(TV_V_CTL_3, vctl3);
+ I915_WRITE(TV_V_CTL_4, vctl4);
+ I915_WRITE(TV_V_CTL_5, vctl5);
+ I915_WRITE(TV_V_CTL_6, vctl6);
+ I915_WRITE(TV_V_CTL_7, vctl7);
+ I915_WRITE(TV_SC_CTL_1, scctl1);
+ I915_WRITE(TV_SC_CTL_2, scctl2);
+ I915_WRITE(TV_SC_CTL_3, scctl3);
+
+ if (color_conversion) {
+ I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
+ color_conversion->gy);
+ I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) |
+ color_conversion->ay);
+ I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
+ color_conversion->gu);
+ I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
+ color_conversion->au);
+ I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
+ color_conversion->gv);
+ I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
+ color_conversion->av);
+ }
+
+ I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+ if (video_levels)
+ I915_WRITE(TV_CLR_LEVEL,
+ ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
+ (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+ {
+ int pipeconf_reg = (intel_crtc->pipe == 0) ?
+ PIPEACONF : PIPEBCONF;
+ int dspcntr_reg = (intel_crtc->plane == 0) ?
+ DSPACNTR : DSPBCNTR;
+ int pipeconf = I915_READ(pipeconf_reg);
+ int dspcntr = I915_READ(dspcntr_reg);
+ int dspbase_reg = (intel_crtc->plane == 0) ?
+ DSPAADDR : DSPBADDR;
+ int xpos = 0x0, ypos = 0x0;
+ unsigned int xsize, ysize;
+ /* Pipe must be off here */
+ I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+
+ /* Wait for vblank for the disable to take effect */
+ if (!IS_I9XX(dev))
+ intel_wait_for_vblank(dev);
+
+ I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
+ /* Wait for vblank for the disable to take effect. */
+ intel_wait_for_vblank(dev);
+
+ /* Filter ctl must be set before TV_WIN_SIZE */
+ I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+ xsize = tv_mode->hblank_start - tv_mode->hblank_end;
+ if (tv_mode->progressive)
+ ysize = tv_mode->nbr_end + 1;
+ else
+ ysize = 2*tv_mode->nbr_end + 1;
+
+ xpos += tv_priv->margin[TV_MARGIN_LEFT];
+ ypos += tv_priv->margin[TV_MARGIN_TOP];
+ xsize -= (tv_priv->margin[TV_MARGIN_LEFT] +
+ tv_priv->margin[TV_MARGIN_RIGHT]);
+ ysize -= (tv_priv->margin[TV_MARGIN_TOP] +
+ tv_priv->margin[TV_MARGIN_BOTTOM]);
+ I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
+ I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
+
+ I915_WRITE(pipeconf_reg, pipeconf);
+ I915_WRITE(dspcntr_reg, dspcntr);
+ /* Flush the plane changes */
+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+ }
+
+ j = 0;
+ for (i = 0; i < 60; i++)
+ I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 60; i++)
+ I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 43; i++)
+ I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ for (i = 0; i < 43; i++)
+ I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+ I915_WRITE(TV_DAC, 0);
+ I915_WRITE(TV_CTL, tv_ctl);
+}
+
+static const struct drm_display_mode reported_modes[] = {
+ {
+ .name = "NTSC 480i",
+ .clock = 107520,
+ .hdisplay = 1280,
+ .hsync_start = 1368,
+ .hsync_end = 1496,
+ .htotal = 1712,
+
+ .vdisplay = 1024,
+ .vsync_start = 1027,
+ .vsync_end = 1034,
+ .vtotal = 1104,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+};
+
+/**
+ * Detects TV presence by checking for load.
+ *
+ * Requires that the current pipe's DPLL is active.
+
+ * \return true if TV is connected.
+ * \return false if TV is disconnected.
+ */
+static int
+intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
+{
+ struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ u32 tv_ctl, save_tv_ctl;
+ u32 tv_dac, save_tv_dac;
+ int type = DRM_MODE_CONNECTOR_Unknown;
+
+ tv_dac = I915_READ(TV_DAC);
+
+ /* Disable TV interrupts around load detect or we'll recurse */
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+ /*
+ * Detect TV by polling)
+ */
+ if (intel_output->load_detect_temp) {
+ /* TV not currently running, prod it with destructive detect */
+ save_tv_dac = tv_dac;
+ tv_ctl = I915_READ(TV_CTL);
+ save_tv_ctl = tv_ctl;
+ tv_ctl &= ~TV_ENC_ENABLE;
+ tv_ctl &= ~TV_TEST_MODE_MASK;
+ tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+ tv_dac &= ~TVDAC_SENSE_MASK;
+ tv_dac |= (TVDAC_STATE_CHG_EN |
+ TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL |
+ TVDAC_C_SENSE_CTL |
+ DAC_CTL_OVERRIDE |
+ DAC_A_0_7_V |
+ DAC_B_0_7_V |
+ DAC_C_0_7_V);
+ I915_WRITE(TV_CTL, tv_ctl);
+ I915_WRITE(TV_DAC, tv_dac);
+ intel_wait_for_vblank(dev);
+ tv_dac = I915_READ(TV_DAC);
+ I915_WRITE(TV_DAC, save_tv_dac);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+ }
+ /*
+ * A B C
+ * 0 1 1 Composite
+ * 1 0 X svideo
+ * 0 0 0 Component
+ */
+ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+ DRM_DEBUG("Detected Composite TV connection\n");
+ type = DRM_MODE_CONNECTOR_Composite;
+ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+ DRM_DEBUG("Detected S-Video TV connection\n");
+ type = DRM_MODE_CONNECTOR_SVIDEO;
+ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+ DRM_DEBUG("Detected Component TV connection\n");
+ type = DRM_MODE_CONNECTOR_Component;
+ } else {
+ DRM_DEBUG("No TV connection detected\n");
+ type = -1;
+ }
+
+ /* Restore interrupt config */
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+ return type;
+}
+
+/**
+ * Detect the TV connection.
+ *
+ * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
+ * we have a pipe programmed in order to probe the TV.
+ */
+static enum drm_connector_status
+intel_tv_detect(struct drm_connector *connector)
+{
+ struct drm_crtc *crtc;
+ struct drm_display_mode mode;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+ struct drm_encoder *encoder = &intel_output->enc;
+ int dpms_mode;
+ int type = tv_priv->type;
+
+ mode = reported_modes[0];
+ drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
+
+ if (encoder->crtc) {
+ type = intel_tv_detect_type(encoder->crtc, intel_output);
+ } else {
+ crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
+ if (crtc) {
+ type = intel_tv_detect_type(crtc, intel_output);
+ intel_release_load_detect_pipe(intel_output, dpms_mode);
+ } else
+ type = -1;
+ }
+
+ if (type < 0)
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static struct input_res {
+ char *name;
+ int w, h;
+} input_res_table[] =
+{
+ {"640x480", 640, 480},
+ {"800x600", 800, 600},
+ {"1024x768", 1024, 768},
+ {"1280x1024", 1280, 1024},
+ {"848x480", 848, 480},
+ {"1280x720", 1280, 720},
+ {"1920x1080", 1920, 1080},
+};
+
+/**
+ * Stub get_modes function.
+ *
+ * This should probably return a set of fixed modes, unless we can figure out
+ * how to probe modes off of TV connections.
+ */
+
+static int
+intel_tv_get_modes(struct drm_connector *connector)
+{
+ struct drm_display_mode *mode_ptr;
+ struct intel_output *intel_output = to_intel_output(connector);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ int j;
+
+ for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
+ j++) {
+ struct input_res *input = &input_res_table[j];
+ unsigned int hactive_s = input->w;
+ unsigned int vactive_s = input->h;
+
+ if (tv_mode->max_srcw && input->w > tv_mode->max_srcw)
+ continue;
+
+ if (input->w > 1024 && (!tv_mode->progressive
+ && !tv_mode->component_only))
+ continue;
+
+ mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode),
+ DRM_MEM_DRIVER);
+ strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
+
+ mode_ptr->hdisplay = hactive_s;
+ mode_ptr->hsync_start = hactive_s + 1;
+ mode_ptr->hsync_end = hactive_s + 64;
+ if (mode_ptr->hsync_end <= mode_ptr->hsync_start)
+ mode_ptr->hsync_end = mode_ptr->hsync_start + 1;
+ mode_ptr->htotal = hactive_s + 96;
+
+ mode_ptr->vdisplay = vactive_s;
+ mode_ptr->vsync_start = vactive_s + 1;
+ mode_ptr->vsync_end = vactive_s + 32;
+ if (mode_ptr->vsync_end <= mode_ptr->vsync_start)
+ mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
+ mode_ptr->vtotal = vactive_s + 33;
+
+ mode_ptr->clock = (int) (tv_mode->refresh *
+ mode_ptr->vtotal *
+ mode_ptr->htotal / 1000) / 1000;
+
+ mode_ptr->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_probed_add(connector, mode_ptr);
+ }
+
+ return 0;
+}
+
+static void
+intel_tv_destroy (struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ drm_free(intel_output, sizeof(struct intel_output) + sizeof(struct intel_tv_priv),
+ DRM_MEM_DRIVER);
+}
+
+
+static int
+intel_tv_set_property(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+ int ret = 0;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret < 0)
+ goto out;
+
+ if (property == dev->mode_config.tv_left_margin_property)
+ tv_priv->margin[TV_MARGIN_LEFT] = val;
+ else if (property == dev->mode_config.tv_right_margin_property)
+ tv_priv->margin[TV_MARGIN_RIGHT] = val;
+ else if (property == dev->mode_config.tv_top_margin_property)
+ tv_priv->margin[TV_MARGIN_TOP] = val;
+ else if (property == dev->mode_config.tv_bottom_margin_property)
+ tv_priv->margin[TV_MARGIN_BOTTOM] = val;
+ else if (property == dev->mode_config.tv_mode_property) {
+ if (val >= NUM_TV_MODES) {
+ ret = -EINVAL;
+ goto out;
+ }
+ tv_priv->tv_format = tv_modes[val].name;
+ intel_tv_mode_set(&intel_output->enc, NULL, NULL);
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ intel_tv_mode_set(&intel_output->enc, NULL, NULL);
+out:
+ return ret;
+}
+
+static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
+ .dpms = intel_tv_dpms,
+ .mode_fixup = intel_tv_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_tv_mode_set,
+ .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_tv_connector_funcs = {
+ .save = intel_tv_save,
+ .restore = intel_tv_restore,
+ .detect = intel_tv_detect,
+ .destroy = intel_tv_destroy,
+ .set_property = intel_tv_set_property,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
+ .mode_valid = intel_tv_mode_valid,
+ .get_modes = intel_tv_get_modes,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_tv_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_tv_enc_funcs = {
+ .destroy = intel_tv_enc_destroy,
+};
+
+
+void
+intel_tv_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ struct intel_output *intel_output;
+ struct intel_tv_priv *tv_priv;
+ u32 tv_dac_on, tv_dac_off, save_tv_dac;
+ char **tv_format_names;
+ int i, initial_mode = 0;
+
+ if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+ return;
+
+ /* Even if we have an encoder we may not have a connector */
+ if (!dev_priv->int_tv_support)
+ return;
+
+ /*
+ * Sanity check the TV output by checking to see if the
+ * DAC register holds a value
+ */
+ save_tv_dac = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+ tv_dac_on = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ tv_dac_off = I915_READ(TV_DAC);
+
+ I915_WRITE(TV_DAC, save_tv_dac);
+
+ /*
+ * If the register does not hold the state change enable
+ * bit, (either as a 0 or a 1), assume it doesn't really
+ * exist
+ */
+ if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 ||
+ (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
+ return;
+
+ intel_output = drm_calloc(1, sizeof(struct intel_output) +
+ sizeof(struct intel_tv_priv), DRM_MEM_DRIVER);
+ if (!intel_output) {
+ return;
+ }
+ connector = &intel_output->base;
+
+ drm_connector_init(dev, connector, &intel_tv_connector_funcs,
+ DRM_MODE_CONNECTOR_SVIDEO);
+
+ drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs,
+ DRM_MODE_ENCODER_TVDAC);
+
+ drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
+ tv_priv = (struct intel_tv_priv *)(intel_output + 1);
+ intel_output->type = INTEL_OUTPUT_TVOUT;
+ intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
+ intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+ intel_output->dev_priv = tv_priv;
+ tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
+
+ /* BIOS margin values */
+ tv_priv->margin[TV_MARGIN_LEFT] = 54;
+ tv_priv->margin[TV_MARGIN_TOP] = 36;
+ tv_priv->margin[TV_MARGIN_RIGHT] = 46;
+ tv_priv->margin[TV_MARGIN_BOTTOM] = 37;
+
+ tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+
+ drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs);
+ drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /* Create TV properties then attach current values */
+ tv_format_names = drm_alloc(sizeof(char *) * NUM_TV_MODES,
+ DRM_MEM_DRIVER);
+ if (!tv_format_names)
+ goto out;
+ for (i = 0; i < NUM_TV_MODES; i++)
+ tv_format_names[i] = tv_modes[i].name;
+ drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names);
+
+ drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+ initial_mode);
+ drm_connector_attach_property(connector,
+ dev->mode_config.tv_left_margin_property,
+ tv_priv->margin[TV_MARGIN_LEFT]);
+ drm_connector_attach_property(connector,
+ dev->mode_config.tv_top_margin_property,
+ tv_priv->margin[TV_MARGIN_TOP]);
+ drm_connector_attach_property(connector,
+ dev->mode_config.tv_right_margin_property,
+ tv_priv->margin[TV_MARGIN_RIGHT]);
+ drm_connector_attach_property(connector,
+ dev->mode_config.tv_bottom_margin_property,
+ tv_priv->margin[TV_MARGIN_BOTTOM]);
+out:
+ drm_sysfs_connector_add(connector);
+}
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index c1d12dbfa8d..b49c5ff2958 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -396,6 +396,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
int mga_driver_load(struct drm_device * dev, unsigned long flags)
{
drm_mga_private_t *dev_priv;
+ int ret;
dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
if (!dev_priv)
@@ -415,6 +416,13 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
dev->types[7] = _DRM_STAT_PRIMARY;
dev->types[8] = _DRM_STAT_SECONDARY;
+ ret = drm_vblank_init(dev, 1);
+
+ if (ret) {
+ (void) mga_driver_unload(dev);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index bab42f41188..daa6041a483 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -152,11 +152,6 @@ void mga_driver_irq_preinstall(struct drm_device * dev)
int mga_driver_irq_postinstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- int ret;
-
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ret;
DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 3265d53ba91..601f4c0e5da 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -45,6 +45,7 @@ static struct drm_driver driver = {
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
+ .load = r128_driver_load,
.preclose = r128_driver_preclose,
.lastclose = r128_driver_lastclose,
.get_vblank_counter = r128_get_vblank_counter,
@@ -84,6 +85,11 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+int r128_driver_load(struct drm_device * dev, unsigned long flags)
+{
+ return drm_vblank_init(dev, 1);
+}
+
static int __init r128_init(void)
{
driver.num_ioctls = r128_max_ioctl;
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 5898b274279..797a26c42da 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -159,6 +159,7 @@ extern void r128_driver_irq_preinstall(struct drm_device * dev);
extern int r128_driver_irq_postinstall(struct drm_device *dev);
extern void r128_driver_irq_uninstall(struct drm_device * dev);
extern void r128_driver_lastclose(struct drm_device * dev);
+extern int r128_driver_load(struct drm_device * dev, unsigned long flags);
extern void r128_driver_preclose(struct drm_device * dev,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index d7349012a68..69810fb8ac4 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -102,7 +102,7 @@ void r128_driver_irq_preinstall(struct drm_device * dev)
int r128_driver_irq_postinstall(struct drm_device *dev)
{
- return drm_vblank_init(dev, 1);
+ return 0;
}
void r128_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 4b27d9abb7b..cace3964fee 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -860,12 +860,12 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
* The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
* be careful about how this function is called.
*/
-static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
+static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
{
- drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
- buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
+ buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
buf->pending = 1;
buf->used = 0;
}
@@ -1027,6 +1027,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf = NULL;
int emit_dispatch_age = 0;
@@ -1134,7 +1135,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
}
emit_dispatch_age = 1;
- r300_discard_buffer(dev, buf);
+ r300_discard_buffer(dev, file_priv->master, buf);
break;
case R300_CMD_WAIT:
@@ -1189,7 +1190,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
/* Emit the vertex buffer age */
BEGIN_RING(2);
- RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
+ RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
ADVANCE_RING();
}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index abdc1ae3846..63212d7bbc2 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -31,6 +31,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_sarea.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "r300_reg.h"
@@ -667,15 +668,14 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
RADEON_WRITE(RADEON_BUS_CNTL, tmp);
} /* PCIE cards appears to not need this */
- dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
- RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
+ dev_priv->scratch[0] = 0;
+ RADEON_WRITE(RADEON_LAST_FRAME_REG, 0);
- dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
- RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
- dev_priv->sarea_priv->last_dispatch);
+ dev_priv->scratch[1] = 0;
+ RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0);
- dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
- RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
+ dev_priv->scratch[2] = 0;
+ RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0);
radeon_do_wait_for_idle(dev_priv);
@@ -871,9 +871,11 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
}
}
-static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
+static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
+ struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
DRM_DEBUG("\n");
@@ -998,8 +1000,8 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
dev_priv->buffers_offset = init->buffers_offset;
dev_priv->gart_textures_offset = init->gart_textures_offset;
- dev_priv->sarea = drm_getsarea(dev);
- if (!dev_priv->sarea) {
+ master_priv->sarea = drm_getsarea(dev);
+ if (!master_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
@@ -1035,10 +1037,6 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
}
}
- dev_priv->sarea_priv =
- (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
- init->sarea_priv_offset);
-
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
drm_core_ioremap(dev_priv->cp_ring, dev);
@@ -1329,7 +1327,7 @@ int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_pri
case RADEON_INIT_CP:
case RADEON_INIT_R200_CP:
case RADEON_INIT_R300_CP:
- return radeon_do_init_cp(dev, init);
+ return radeon_do_init_cp(dev, init, file_priv);
case RADEON_CLEANUP_CP:
return radeon_do_cleanup_cp(dev);
}
@@ -1757,11 +1755,62 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
if (ret != 0)
return ret;
+ ret = drm_vblank_init(dev, 2);
+ if (ret) {
+ radeon_driver_unload(dev);
+ return ret;
+ }
+
DRM_DEBUG("%s card detected\n",
((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
return ret;
}
+int radeon_master_create(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_radeon_master_private *master_priv;
+ unsigned long sareapage;
+ int ret;
+
+ master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
+ if (!master_priv)
+ return -ENOMEM;
+
+ /* prebuild the SAREA */
+ sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
+ ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER,
+ &master_priv->sarea);
+ if (ret) {
+ DRM_ERROR("SAREA setup failed\n");
+ return ret;
+ }
+ master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
+ master_priv->sarea_priv->pfCurrentPage = 0;
+
+ master->driver_priv = master_priv;
+ return 0;
+}
+
+void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+
+ if (!master_priv)
+ return;
+
+ if (master_priv->sarea_priv &&
+ master_priv->sarea_priv->pfCurrentPage != 0)
+ radeon_cp_dispatch_flip(dev, master);
+
+ master_priv->sarea_priv = NULL;
+ if (master_priv->sarea)
+ drm_rmmap_locked(dev, master_priv->sarea);
+
+ drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
+
+ master->driver_priv = NULL;
+}
+
/* Create mappings for registers and framebuffer so userland doesn't necessarily
* have to find them.
*/
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 71af746a4e4..fef207881f4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -96,6 +96,8 @@ static struct drm_driver driver = {
.enable_vblank = radeon_enable_vblank,
.disable_vblank = radeon_disable_vblank,
.dri_library_name = dri_library_name,
+ .master_create = radeon_master_create,
+ .master_destroy = radeon_master_destroy,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,
.irq_uninstall = radeon_driver_irq_uninstall,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 7a183789be9..490bc7ceef6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -226,9 +226,13 @@ struct radeon_virt_surface {
#define RADEON_FLUSH_EMITED (1 < 0)
#define RADEON_PURGE_EMITED (1 < 1)
+struct drm_radeon_master_private {
+ drm_local_map_t *sarea;
+ drm_radeon_sarea_t *sarea_priv;
+};
+
typedef struct drm_radeon_private {
drm_radeon_ring_buffer_t ring;
- drm_radeon_sarea_t *sarea_priv;
u32 fb_location;
u32 fb_size;
@@ -299,7 +303,6 @@ typedef struct drm_radeon_private {
atomic_t swi_emitted;
int vblank_crtc;
uint32_t irq_enable_reg;
- int irq_enabled;
uint32_t r500_disp_irq_reg;
struct radeon_surface surfaces[RADEON_MAX_SURFACES];
@@ -410,6 +413,9 @@ extern int radeon_driver_open(struct drm_device *dev,
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
+extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
+extern void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master);
/* r300_cmdbuf.c */
extern void r300_init_reg_flags(struct drm_device *dev);
@@ -1336,8 +1342,9 @@ do { \
} while (0)
#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
-do { \
- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \
+do { \
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; \
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; \
if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \
int __ret = radeon_do_cp_idle( dev_priv ); \
if ( __ret ) return __ret; \
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 5079f7054a2..8289e16419a 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -44,7 +44,8 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
else
dev_priv->irq_enable_reg &= ~mask;
- RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+ if (dev->irq_enabled)
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
}
static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
@@ -56,7 +57,8 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
else
dev_priv->r500_disp_irq_reg &= ~mask;
- RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
+ if (dev->irq_enabled)
+ RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
}
int radeon_enable_vblank(struct drm_device *dev, int crtc)
@@ -337,15 +339,10 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
- int ret;
atomic_set(&dev_priv->swi_emitted, 0);
DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
- ret = drm_vblank_init(dev, 2);
- if (ret)
- return ret;
-
dev->max_vblank_count = 0x001fffff;
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
@@ -360,8 +357,6 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- dev_priv->irq_enabled = 0;
-
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
/* Disable *all* interrupts */
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 5d7153fcc7b..ef940a079dc 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -742,13 +742,14 @@ static struct {
*/
static void radeon_clear_box(drm_radeon_private_t * dev_priv,
+ struct drm_radeon_master_private *master_priv,
int x, int y, int w, int h, int r, int g, int b)
{
u32 color;
RING_LOCALS;
- x += dev_priv->sarea_priv->boxes[0].x1;
- y += dev_priv->sarea_priv->boxes[0].y1;
+ x += master_priv->sarea_priv->boxes[0].x1;
+ y += master_priv->sarea_priv->boxes[0].y1;
switch (dev_priv->color_fmt) {
case RADEON_COLOR_FORMAT_RGB565:
@@ -776,7 +777,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv,
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
- if (dev_priv->sarea_priv->pfCurrentPage == 1) {
+ if (master_priv->sarea_priv->pfCurrentPage == 1) {
OUT_RING(dev_priv->front_pitch_offset);
} else {
OUT_RING(dev_priv->back_pitch_offset);
@@ -790,7 +791,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv,
ADVANCE_RING();
}
-static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
+static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv, struct drm_radeon_master_private *master_priv)
{
/* Collapse various things into a wait flag -- trying to
* guess if userspase slept -- better just to have them tell us.
@@ -807,12 +808,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
/* Purple box for page flipping
*/
if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
- radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255);
+ radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255);
/* Red box if we have to wait for idle at any point
*/
if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
- radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0);
+ radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0);
/* Blue box: lost context?
*/
@@ -820,12 +821,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
/* Yellow box for texture swaps
*/
if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
- radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0);
+ radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0);
/* Green box if hardware never idles (as far as we can tell)
*/
if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
- radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
+ radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0);
/* Draw bars indicating number of buffers allocated
* (not a great measure, easily confused)
@@ -834,7 +835,7 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
if (dev_priv->stats.requested_bufs > 100)
dev_priv->stats.requested_bufs = 100;
- radeon_clear_box(dev_priv, 4, 16,
+ radeon_clear_box(dev_priv, master_priv, 4, 16,
dev_priv->stats.requested_bufs, 4,
196, 128, 128);
}
@@ -848,11 +849,13 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
*/
static void radeon_cp_dispatch_clear(struct drm_device * dev,
+ struct drm_master *master,
drm_radeon_clear_t * clear,
drm_radeon_clear_rect_t * depth_boxes)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
int nbox = sarea_priv->nbox;
struct drm_clip_rect *pbox = sarea_priv->boxes;
@@ -864,7 +867,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
dev_priv->stats.clears++;
- if (dev_priv->sarea_priv->pfCurrentPage == 1) {
+ if (sarea_priv->pfCurrentPage == 1) {
unsigned int tmp = flags;
flags &= ~(RADEON_FRONT | RADEON_BACK);
@@ -890,7 +893,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
/* Make sure we restore the 3D state next time.
*/
- dev_priv->sarea_priv->ctx_owner = 0;
+ sarea_priv->ctx_owner = 0;
for (i = 0; i < nbox; i++) {
int x = pbox[i].x1;
@@ -967,7 +970,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
/* Make sure we restore the 3D state next time.
* we haven't touched any "normal" state - still need this?
*/
- dev_priv->sarea_priv->ctx_owner = 0;
+ sarea_priv->ctx_owner = 0;
if ((dev_priv->flags & RADEON_HAS_HIERZ)
&& (flags & RADEON_USE_HIERZ)) {
@@ -1214,7 +1217,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
/* Make sure we restore the 3D state next time.
*/
- dev_priv->sarea_priv->ctx_owner = 0;
+ sarea_priv->ctx_owner = 0;
for (i = 0; i < nbox; i++) {
@@ -1285,7 +1288,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
/* Make sure we restore the 3D state next time.
*/
- dev_priv->sarea_priv->ctx_owner = 0;
+ sarea_priv->ctx_owner = 0;
for (i = 0; i < nbox; i++) {
@@ -1328,20 +1331,21 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
* wait on this value before performing the clear ioctl. We
* need this because the card's so damned fast...
*/
- dev_priv->sarea_priv->last_clear++;
+ sarea_priv->last_clear++;
BEGIN_RING(4);
- RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear);
+ RADEON_CLEAR_AGE(sarea_priv->last_clear);
RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING();
}
-static void radeon_cp_dispatch_swap(struct drm_device * dev)
+static void radeon_cp_dispatch_swap(struct drm_device *dev, struct drm_master *master)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
int nbox = sarea_priv->nbox;
struct drm_clip_rect *pbox = sarea_priv->boxes;
int i;
@@ -1351,7 +1355,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
/* Do some trivial performance monitoring...
*/
if (dev_priv->do_boxes)
- radeon_cp_performance_boxes(dev_priv);
+ radeon_cp_performance_boxes(dev_priv, master_priv);
/* Wait for the 3D stream to idle before dispatching the bitblt.
* This will prevent data corruption between the two streams.
@@ -1385,7 +1389,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
/* Make this work even if front & back are flipped:
*/
OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
- if (dev_priv->sarea_priv->pfCurrentPage == 0) {
+ if (sarea_priv->pfCurrentPage == 0) {
OUT_RING(dev_priv->back_pitch_offset);
OUT_RING(dev_priv->front_pitch_offset);
} else {
@@ -1405,31 +1409,32 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
* throttle the framerate by waiting for this value before
* performing the swapbuffer ioctl.
*/
- dev_priv->sarea_priv->last_frame++;
+ sarea_priv->last_frame++;
BEGIN_RING(4);
- RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
+ RADEON_FRAME_AGE(sarea_priv->last_frame);
RADEON_WAIT_UNTIL_2D_IDLE();
ADVANCE_RING();
}
-static void radeon_cp_dispatch_flip(struct drm_device * dev)
+void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle;
- int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+ struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle;
+ int offset = (master_priv->sarea_priv->pfCurrentPage == 1)
? dev_priv->front_offset : dev_priv->back_offset;
RING_LOCALS;
DRM_DEBUG("pfCurrentPage=%d\n",
- dev_priv->sarea_priv->pfCurrentPage);
+ master_priv->sarea_priv->pfCurrentPage);
/* Do some trivial performance monitoring...
*/
if (dev_priv->do_boxes) {
dev_priv->stats.boxes |= RADEON_BOX_FLIP;
- radeon_cp_performance_boxes(dev_priv);
+ radeon_cp_performance_boxes(dev_priv, master_priv);
}
/* Update the frame offsets for both CRTCs
@@ -1441,7 +1446,7 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev)
((sarea->frame.y * dev_priv->front_pitch +
sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
+ offset);
- OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
+ OUT_RING_REG(RADEON_CRTC2_OFFSET, master_priv->sarea_priv->crtc2_base
+ offset);
ADVANCE_RING();
@@ -1450,13 +1455,13 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev)
* throttle the framerate by waiting for this value before
* performing the swapbuffer ioctl.
*/
- dev_priv->sarea_priv->last_frame++;
- dev_priv->sarea_priv->pfCurrentPage =
- 1 - dev_priv->sarea_priv->pfCurrentPage;
+ master_priv->sarea_priv->last_frame++;
+ master_priv->sarea_priv->pfCurrentPage =
+ 1 - master_priv->sarea_priv->pfCurrentPage;
BEGIN_RING(2);
- RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
+ RADEON_FRAME_AGE(master_priv->sarea_priv->last_frame);
ADVANCE_RING();
}
@@ -1494,11 +1499,13 @@ typedef struct {
} drm_radeon_tcl_prim_t;
static void radeon_cp_dispatch_vertex(struct drm_device * dev,
+ struct drm_file *file_priv,
struct drm_buf * buf,
drm_radeon_tcl_prim_t * prim)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
int numverts = (int)prim->numverts;
int nbox = sarea_priv->nbox;
@@ -1539,13 +1546,14 @@ static void radeon_cp_dispatch_vertex(struct drm_device * dev,
} while (i < nbox);
}
-static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
+static void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
RING_LOCALS;
- buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
+ buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
/* Emit the vertex buffer age */
BEGIN_RING(2);
@@ -1590,12 +1598,14 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev,
}
}
-static void radeon_cp_dispatch_indices(struct drm_device * dev,
+static void radeon_cp_dispatch_indices(struct drm_device *dev,
+ struct drm_master *master,
struct drm_buf * elt_buf,
drm_radeon_tcl_prim_t * prim)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
int offset = dev_priv->gart_buffers_offset + prim->offset;
u32 *data;
int dwords;
@@ -1870,7 +1880,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
ADVANCE_RING();
COMMIT_RING();
- radeon_cp_discard_buffer(dev, buf);
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
/* Update the input parameters for next time */
image->y += height;
@@ -2110,7 +2120,8 @@ static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_fi
static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
drm_radeon_clear_t *clear = data;
drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
DRM_DEBUG("\n");
@@ -2126,7 +2137,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
sarea_priv->nbox * sizeof(depth_boxes[0])))
return -EFAULT;
- radeon_cp_dispatch_clear(dev, clear, depth_boxes);
+ radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes);
COMMIT_RING();
return 0;
@@ -2134,9 +2145,10 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
/* Not sure why this isn't set all the time:
*/
-static int radeon_do_init_pageflip(struct drm_device * dev)
+static int radeon_do_init_pageflip(struct drm_device *dev, struct drm_master *master)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = master->driver_priv;
RING_LOCALS;
DRM_DEBUG("\n");
@@ -2153,8 +2165,8 @@ static int radeon_do_init_pageflip(struct drm_device * dev)
dev_priv->page_flipping = 1;
- if (dev_priv->sarea_priv->pfCurrentPage != 1)
- dev_priv->sarea_priv->pfCurrentPage = 0;
+ if (master_priv->sarea_priv->pfCurrentPage != 1)
+ master_priv->sarea_priv->pfCurrentPage = 0;
return 0;
}
@@ -2172,9 +2184,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f
RING_SPACE_TEST_WITH_RETURN(dev_priv);
if (!dev_priv->page_flipping)
- radeon_do_init_pageflip(dev);
+ radeon_do_init_pageflip(dev, file_priv->master);
- radeon_cp_dispatch_flip(dev);
+ radeon_cp_dispatch_flip(dev, file_priv->master);
COMMIT_RING();
return 0;
@@ -2183,7 +2195,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f
static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -2193,8 +2207,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f
if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
- radeon_cp_dispatch_swap(dev);
- dev_priv->sarea_priv->ctx_owner = 0;
+ radeon_cp_dispatch_swap(dev, file_priv->master);
+ sarea_priv->ctx_owner = 0;
COMMIT_RING();
return 0;
@@ -2203,7 +2217,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f
static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_radeon_vertex_t *vertex = data;
@@ -2211,6 +2226,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
+ sarea_priv = master_priv->sarea_priv;
+
DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
@@ -2263,13 +2280,13 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file
prim.finish = vertex->count; /* unused */
prim.prim = vertex->prim;
prim.numverts = vertex->count;
- prim.vc_format = dev_priv->sarea_priv->vc_format;
+ prim.vc_format = sarea_priv->vc_format;
- radeon_cp_dispatch_vertex(dev, buf, &prim);
+ radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim);
}
if (vertex->discard) {
- radeon_cp_discard_buffer(dev, buf);
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
}
COMMIT_RING();
@@ -2279,7 +2296,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file
static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_radeon_indices_t *elts = data;
@@ -2288,6 +2306,8 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
+ sarea_priv = master_priv->sarea_priv;
+
DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
DRM_CURRENTPID, elts->idx, elts->start, elts->end,
elts->discard);
@@ -2353,11 +2373,11 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file
prim.prim = elts->prim;
prim.offset = 0; /* offset from start of dma buffers */
prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
- prim.vc_format = dev_priv->sarea_priv->vc_format;
+ prim.vc_format = sarea_priv->vc_format;
- radeon_cp_dispatch_indices(dev, buf, &prim);
+ radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim);
if (elts->discard) {
- radeon_cp_discard_buffer(dev, buf);
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
}
COMMIT_RING();
@@ -2468,7 +2488,7 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil
*/
radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
if (indirect->discard) {
- radeon_cp_discard_buffer(dev, buf);
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
}
COMMIT_RING();
@@ -2478,7 +2498,8 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil
static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+ drm_radeon_sarea_t *sarea_priv;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_radeon_vertex2_t *vertex = data;
@@ -2487,6 +2508,8 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
+ sarea_priv = master_priv->sarea_priv;
+
DRM_DEBUG("pid=%d index=%d discard=%d\n",
DRM_CURRENTPID, vertex->idx, vertex->discard);
@@ -2547,12 +2570,12 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
tclprim.offset = prim.numverts * 64;
tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
- radeon_cp_dispatch_indices(dev, buf, &tclprim);
+ radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim);
} else {
tclprim.numverts = prim.numverts;
tclprim.offset = 0; /* not used */
- radeon_cp_dispatch_vertex(dev, buf, &tclprim);
+ radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim);
}
if (sarea_priv->nbox == 1)
@@ -2560,7 +2583,7 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
}
if (vertex->discard) {
- radeon_cp_discard_buffer(dev, buf);
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
}
COMMIT_RING();
@@ -2909,7 +2932,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
goto err;
}
- radeon_cp_discard_buffer(dev, buf);
+ radeon_cp_discard_buffer(dev, file_priv->master, buf);
break;
case RADEON_CMD_PACKET3:
@@ -3020,7 +3043,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
*/
case RADEON_PARAM_SAREA_HANDLE:
/* The lock is the first dword in the sarea. */
- value = (long)dev->lock.hw_lock;
+ /* no users of this parameter */
break;
#endif
case RADEON_PARAM_GART_TEX_HANDLE:
@@ -3064,6 +3087,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
+ struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
drm_radeon_setparam_t *sp = data;
struct drm_radeon_driver_file_fields *radeon_priv;
@@ -3078,12 +3102,14 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil
DRM_DEBUG("color tiling disabled\n");
dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
- dev_priv->sarea_priv->tiling_enabled = 0;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->tiling_enabled = 0;
} else if (sp->value == 1) {
DRM_DEBUG("color tiling enabled\n");
dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
- dev_priv->sarea_priv->tiling_enabled = 1;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->tiling_enabled = 1;
}
break;
case RADEON_SETPARAM_PCIGART_LOCATION:
@@ -3129,14 +3155,6 @@ void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
void radeon_driver_lastclose(struct drm_device *dev)
{
- if (dev->dev_private) {
- drm_radeon_private_t *dev_priv = dev->dev_private;
-
- if (dev_priv->sarea_priv &&
- dev_priv->sarea_priv->pfCurrentPage != 0)
- radeon_cp_dispatch_flip(dev);
- }
-
radeon_do_release(dev);
}
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index 665d319b927..c248c1d3726 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -314,7 +314,6 @@ int via_driver_irq_postinstall(struct drm_device *dev)
if (!dev_priv)
return -EINVAL;
- drm_vblank_init(dev, 1);
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
| dev_priv->irq_enable_mask);
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index a967556be01..2c4f0b48579 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -107,8 +107,17 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
if (ret) {
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
+ return ret;
}
- return ret;
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret) {
+ drm_sman_takedown(&dev_priv->sman);
+ drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
+ return ret;
+ }
+
+ return 0;
}
int via_driver_unload(struct drm_device *dev)