summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c (renamed from drivers/gpu/drm/i915/i915_gem_debugfs.c)97
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c315
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c142
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h142
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c898
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c80
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c123
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h178
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c172
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h316
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c11
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c11
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c37
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1239
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h15
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c748
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c85
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c825
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c33
24 files changed, 4034 insertions, 1470 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 30d6b99fb30..fa7b9be096b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -4,11 +4,12 @@
ccflags-y := -Iinclude/drm
i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
+ i915_debugfs.o \
i915_suspend.o \
i915_gem.o \
i915_gem_debug.o \
- i915_gem_debugfs.o \
i915_gem_tiling.o \
+ i915_trace_points.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index cb3b97405fb..f8ce9a3a420 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,11 +96,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_gem_object *obj = obj_priv->obj;
- seq_printf(m, " %p: %s %08x %08x %d",
+ seq_printf(m, " %p: %s %8zd %08x %08x %d %s",
obj,
get_pin_flag(obj_priv),
+ obj->size,
obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
+ obj_priv->last_rendering_seqno,
+ obj_priv->dirty ? "dirty" : "");
if (obj->name)
seq_printf(m, " (name: %d)", obj->name);
@@ -158,16 +160,37 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- seq_printf(m, "Interrupt enable: %08x\n",
- I915_READ(IER));
- seq_printf(m, "Interrupt identity: %08x\n",
- I915_READ(IIR));
- seq_printf(m, "Interrupt mask: %08x\n",
- I915_READ(IMR));
- seq_printf(m, "Pipe A stat: %08x\n",
- I915_READ(PIPEASTAT));
- seq_printf(m, "Pipe B stat: %08x\n",
- I915_READ(PIPEBSTAT));
+ if (!IS_IGDNG(dev)) {
+ seq_printf(m, "Interrupt enable: %08x\n",
+ I915_READ(IER));
+ seq_printf(m, "Interrupt identity: %08x\n",
+ I915_READ(IIR));
+ seq_printf(m, "Interrupt mask: %08x\n",
+ I915_READ(IMR));
+ seq_printf(m, "Pipe A stat: %08x\n",
+ I915_READ(PIPEASTAT));
+ seq_printf(m, "Pipe B stat: %08x\n",
+ I915_READ(PIPEBSTAT));
+ } else {
+ seq_printf(m, "North Display Interrupt enable: %08x\n",
+ I915_READ(DEIER));
+ seq_printf(m, "North Display Interrupt identity: %08x\n",
+ I915_READ(DEIIR));
+ seq_printf(m, "North Display Interrupt mask: %08x\n",
+ I915_READ(DEIMR));
+ seq_printf(m, "South Display Interrupt enable: %08x\n",
+ I915_READ(SDEIER));
+ seq_printf(m, "South Display Interrupt identity: %08x\n",
+ I915_READ(SDEIIR));
+ seq_printf(m, "South Display Interrupt mask: %08x\n",
+ I915_READ(SDEIMR));
+ seq_printf(m, "Graphics Interrupt enable: %08x\n",
+ I915_READ(GTIER));
+ seq_printf(m, "Graphics Interrupt identity: %08x\n",
+ I915_READ(GTIIR));
+ seq_printf(m, "Graphics Interrupt mask: %08x\n",
+ I915_READ(GTIMR));
+ }
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
if (dev_priv->hw_status_page != NULL) {
@@ -312,15 +335,13 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned int head, tail, mask;
+ unsigned int head, tail;
head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- mask = dev_priv->ring.tail_mask;
seq_printf(m, "RingHead : %08x\n", head);
seq_printf(m, "RingTail : %08x\n", tail);
- seq_printf(m, "RingMask : %08x\n", mask);
seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
@@ -363,7 +384,37 @@ out:
return 0;
}
-static struct drm_info_list i915_gem_debugfs_list[] = {
+static int i915_registers_info(struct seq_file *m, void *data) {
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t reg;
+
+#define DUMP_RANGE(start, end) \
+ for (reg=start; reg < end; reg += 4) \
+ seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
+
+ DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
+ DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
+ DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
+ DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
+ DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
+ DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
+ DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
+ DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
+ DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
+ DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
+ DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
+ DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
+ DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
+ DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
+
+ return 0;
+}
+
+
+static struct drm_info_list i915_debugfs_list[] = {
+ {"i915_regs", i915_registers_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
@@ -377,19 +428,19 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
{"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
};
-#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
+#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
-int i915_gem_debugfs_init(struct drm_minor *minor)
+int i915_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(i915_gem_debugfs_list,
- I915_GEM_DEBUGFS_ENTRIES,
+ return drm_debugfs_create_files(i915_debugfs_list,
+ I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
}
-void i915_gem_debugfs_cleanup(struct drm_minor *minor)
+void i915_debugfs_cleanup(struct drm_minor *minor)
{
- drm_debugfs_remove_files(i915_gem_debugfs_list,
- I915_GEM_DEBUGFS_ENTRIES, minor);
+ drm_debugfs_remove_files(i915_debugfs_list,
+ I915_DEBUGFS_ENTRIES, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 50d1f782768..92aeb918e0c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -29,11 +29,12 @@
#include "drmP.h"
#include "drm.h"
#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
-
-#define I915_DRV "i915_drv"
+#include "i915_trace.h"
+#include <linux/vgaarb.h>
/* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time
@@ -50,14 +51,18 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
int i;
+ trace_i915_ring_wait_begin (dev);
+
for (i = 0; i < 100000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
- if (ring->space >= n)
+ if (ring->space >= n) {
+ trace_i915_ring_wait_end (dev);
return 0;
+ }
if (dev->primary->master) {
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -77,9 +82,38 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
}
+ trace_i915_ring_wait_end (dev);
return -EBUSY;
}
+/* As a ringbuffer is only allowed to wrap between instructions, fill
+ * the tail with NOOPs.
+ */
+int i915_wrap_ring(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ volatile unsigned int *virt;
+ int rem;
+
+ rem = dev_priv->ring.Size - dev_priv->ring.tail;
+ if (dev_priv->ring.space < rem) {
+ int ret = i915_wait_ring(dev, rem, __func__);
+ if (ret)
+ return ret;
+ }
+ dev_priv->ring.space -= rem;
+
+ virt = (unsigned int *)
+ (dev_priv->ring.virtual_start + dev_priv->ring.tail);
+ rem /= 4;
+ while (rem--)
+ *virt++ = MI_NOOP;
+
+ dev_priv->ring.tail = 0;
+
+ return 0;
+}
+
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
@@ -101,7 +135,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
- DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
+ DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
@@ -187,8 +221,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
master_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
} else {
- DRM_DEBUG_DRIVER(I915_DRV,
- "sarea not found assuming DRI2 userspace\n");
+ DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
}
if (init->ring_size != 0) {
@@ -200,7 +233,6 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
dev_priv->ring.Size = init->ring_size;
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
dev_priv->ring.map.offset = init->ring_start;
dev_priv->ring.map.size = init->ring_size;
@@ -238,7 +270,7 @@ static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
+ DRM_DEBUG_DRIVER("%s\n", __func__);
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
@@ -251,14 +283,14 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
- DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n",
+ DRM_DEBUG_DRIVER("hw status page @ %p\n",
dev_priv->hw_status_page);
if (dev_priv->status_gfx_addr != 0)
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
- DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
+ DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
@@ -552,7 +584,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
if (!master_priv->sarea_priv)
return -EINVAL;
- DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n",
+ DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
__func__,
dev_priv->current_page,
master_priv->sarea_priv->pf_current_page);
@@ -633,8 +665,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
- DRM_DEBUG_DRIVER(I915_DRV,
- "i915 batchbuffer, start %x used %d cliprects %d\n",
+ DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -681,8 +712,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
void *batch_data;
int ret;
- DRM_DEBUG_DRIVER(I915_DRV,
- "i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+ DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -735,7 +765,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
{
int ret;
- DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
+ DRM_DEBUG_DRIVER("%s\n", __func__);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -778,7 +808,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
break;
default:
- DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n",
+ DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
return -EINVAL;
}
@@ -819,7 +849,7 @@ static int i915_setparam(struct drm_device *dev, void *data,
dev_priv->fence_reg_start = param->value;
break;
default:
- DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n",
+ DRM_DEBUG_DRIVER("unknown parameter %d\n",
param->param);
return -EINVAL;
}
@@ -846,7 +876,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
return 0;
}
- DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
+ DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
@@ -868,13 +898,25 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n",
+ DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
dev_priv->status_gfx_addr);
- DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n",
+ DRM_DEBUG_DRIVER("load hws at %p\n",
dev_priv->hw_status_page);
return 0;
}
+static int i915_get_bridge_dev(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+ if (!dev_priv->bridge_dev) {
+ DRM_ERROR("bridge device not found\n");
+ return -1;
+ }
+ return 0;
+}
+
/**
* i915_probe_agp - get AGP bootup configuration
* @pdev: PCI device
@@ -886,22 +928,16 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
* how much was set aside so we can use it for our own purposes.
*/
static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
- uint32_t *preallocated_size)
+ uint32_t *preallocated_size,
+ uint32_t *start)
{
- struct pci_dev *bridge_dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u16 tmp = 0;
unsigned long overhead;
unsigned long stolen;
- bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
- if (!bridge_dev) {
- DRM_ERROR("bridge device not found\n");
- return -1;
- }
-
/* Get the fb aperture size and "stolen" memory amount. */
- pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
- pci_dev_put(bridge_dev);
+ pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
*aperture_size = 1024 * 1024;
*preallocated_size = 1024 * 1024;
@@ -980,11 +1016,174 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
return -1;
}
*preallocated_size = stolen - overhead;
+ *start = overhead;
return 0;
}
+#define PTE_ADDRESS_MASK 0xfffff000
+#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
+#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
+#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
+#define PTE_MAPPING_TYPE_CACHED (3 << 1)
+#define PTE_MAPPING_TYPE_MASK (3 << 1)
+#define PTE_VALID (1 << 0)
+
+/**
+ * i915_gtt_to_phys - take a GTT address and turn it into a physical one
+ * @dev: drm device
+ * @gtt_addr: address to translate
+ *
+ * Some chip functions require allocations from stolen space but need the
+ * physical address of the memory in question. We use this routine
+ * to get a physical address suitable for register programming from a given
+ * GTT address.
+ */
+static unsigned long i915_gtt_to_phys(struct drm_device *dev,
+ unsigned long gtt_addr)
+{
+ unsigned long *gtt;
+ unsigned long entry, phys;
+ int gtt_bar = IS_I9XX(dev) ? 0 : 1;
+ int gtt_offset, gtt_size;
+
+ if (IS_I965G(dev)) {
+ if (IS_G4X(dev) || IS_IGDNG(dev)) {
+ gtt_offset = 2*1024*1024;
+ gtt_size = 2*1024*1024;
+ } else {
+ gtt_offset = 512*1024;
+ gtt_size = 512*1024;
+ }
+ } else {
+ gtt_bar = 3;
+ gtt_offset = 0;
+ gtt_size = pci_resource_len(dev->pdev, gtt_bar);
+ }
+
+ gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
+ gtt_size);
+ if (!gtt) {
+ DRM_ERROR("ioremap of GTT failed\n");
+ return 0;
+ }
+
+ entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
+
+ DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+
+ /* Mask out these reserved bits on this hardware. */
+ if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
+ IS_I945G(dev) || IS_I945GM(dev)) {
+ entry &= ~PTE_ADDRESS_MASK_HIGH;
+ }
+
+ /* If it's not a mapping type we know, then bail. */
+ if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
+ (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
+ iounmap(gtt);
+ return 0;
+ }
+
+ if (!(entry & PTE_VALID)) {
+ DRM_ERROR("bad GTT entry in stolen space\n");
+ iounmap(gtt);
+ return 0;
+ }
+
+ iounmap(gtt);
+
+ phys =(entry & PTE_ADDRESS_MASK) |
+ ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
+
+ DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
+
+ return phys;
+}
+
+static void i915_warn_stolen(struct drm_device *dev)
+{
+ DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
+ DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+}
+
+static void i915_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *compressed_fb, *compressed_llb;
+ unsigned long cfb_base, ll_base;
+
+ /* Leave 1M for line length buffer & misc. */
+ compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
+ if (!compressed_fb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
+ if (!cfb_base) {
+ DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
+ drm_mm_put_block(compressed_fb);
+ }
+
+ if (!IS_GM45(dev)) {
+ compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
+ 4096, 0);
+ if (!compressed_llb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
+ if (!compressed_llb) {
+ i915_warn_stolen(dev);
+ return;
+ }
+
+ ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
+ if (!ll_base) {
+ DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
+ drm_mm_put_block(compressed_fb);
+ drm_mm_put_block(compressed_llb);
+ }
+ }
+
+ dev_priv->cfb_size = size;
+
+ if (IS_GM45(dev)) {
+ g4x_disable_fbc(dev);
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
+ i8xx_disable_fbc(dev);
+ I915_WRITE(FBC_CFB_BASE, cfb_base);
+ I915_WRITE(FBC_LL_BASE, ll_base);
+ }
+
+ DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
+ ll_base, size >> 20);
+}
+
+/* true = enable decode, false = disable decoder */
+static unsigned int i915_vga_set_decode(void *cookie, bool state)
+{
+ struct drm_device *dev = cookie;
+
+ intel_modeset_vga_set_state(dev, state);
+ if (state)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
static int i915_load_modeset_init(struct drm_device *dev,
+ unsigned long prealloc_start,
unsigned long prealloc_size,
unsigned long agp_size)
{
@@ -1005,6 +1204,10 @@ static int i915_load_modeset_init(struct drm_device *dev,
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
+ DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
+
+ /* We're off and running w/KMS */
+ dev_priv->mm.suspended = 0;
/* Let GEM Manage from end of prealloc space to end of aperture.
*
@@ -1017,10 +1220,25 @@ static int i915_load_modeset_init(struct drm_device *dev,
*/
i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
+ mutex_lock(&dev->struct_mutex);
ret = i915_gem_init_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
if (ret)
goto out;
+ /* Try to set up FBC with a reasonable compressed buffer size */
+ if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) &&
+ i915_powersave) {
+ int cfb_size;
+
+ /* Try to get an 8M buffer... */
+ if (prealloc_size > (9*1024*1024))
+ cfb_size = 8*1024*1024;
+ else /* fall back to 7/8 of the stolen space */
+ cfb_size = prealloc_size * 7 / 8;
+ i915_setup_compression(dev, cfb_size);
+ }
+
/* Allow hardware batchbuffers unless told otherwise.
*/
dev_priv->allow_batchbuffer = 1;
@@ -1029,6 +1247,11 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
+ /* if we have > 1 VGA cards, then disable the radeon VGA resources */
+ ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+ if (ret)
+ goto destroy_ringbuffer;
+
ret = drm_irq_install(dev);
if (ret)
goto destroy_ringbuffer;
@@ -1133,7 +1356,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
struct drm_i915_private *dev_priv = dev->dev_private;
resource_size_t base, size;
int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
- uint32_t agp_size, prealloc_size;
+ uint32_t agp_size, prealloc_size, prealloc_start;
/* i915 has 4 more counters */
dev->counters += 4;
@@ -1153,11 +1376,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
base = drm_get_resource_start(dev, mmio_bar);
size = drm_get_resource_len(dev, mmio_bar);
+ if (i915_get_bridge_dev(dev)) {
+ ret = -EIO;
+ goto free_priv;
+ }
+
dev_priv->regs = ioremap(base, size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
- goto free_priv;
+ goto put_bridge;
}
dev_priv->mm.gtt_mapping =
@@ -1182,7 +1410,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
"performance may suffer.\n");
}
- ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
+ ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
if (ret)
goto out_iomapfree;
@@ -1240,6 +1468,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->user_irq_lock);
spin_lock_init(&dev_priv->error_lock);
dev_priv->user_irq_refcount = 0;
+ dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
@@ -1248,8 +1477,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return ret;
}
+ /* Start out suspended */
+ dev_priv->mm.suspended = 1;
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
+ ret = i915_load_modeset_init(dev, prealloc_start,
+ prealloc_size, agp_size);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_workqueue_free;
@@ -1261,6 +1494,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_IGDNG(dev))
intel_opregion_init(dev, 0);
+ setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
+ (unsigned long) dev);
return 0;
out_workqueue_free:
@@ -1269,6 +1504,8 @@ out_iomapfree:
io_mapping_free(dev_priv->mm.gtt_mapping);
out_rmmap:
iounmap(dev_priv->regs);
+put_bridge:
+ pci_dev_put(dev_priv->bridge_dev);
free_priv:
kfree(dev_priv);
return ret;
@@ -1279,6 +1516,7 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
destroy_workqueue(dev_priv->wq);
+ del_timer_sync(&dev_priv->hangcheck_timer);
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -1289,6 +1527,7 @@ int i915_driver_unload(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_irq_uninstall(dev);
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
}
if (dev->pdev->msi_enabled)
@@ -1312,6 +1551,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_lastclose(dev);
}
+ pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
return 0;
@@ -1321,7 +1561,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_i915_file_private *i915_file_priv;
- DRM_DEBUG_DRIVER(I915_DRV, "\n");
+ DRM_DEBUG_DRIVER("\n");
i915_file_priv = (struct drm_i915_file_private *)
kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
@@ -1352,7 +1592,7 @@ void i915_driver_lastclose(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
- intelfb_restore();
+ drm_fb_helper_restore();
return;
}
@@ -1416,6 +1656,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index fc4b68aa2d0..b93814c0d3e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -37,12 +37,15 @@
#include <linux/console.h>
#include "drm_crtc_helper.h"
-static unsigned int i915_modeset = -1;
+static int i915_modeset = -1;
module_param_named(modeset, i915_modeset, int, 0400);
unsigned int i915_fbpercrtc = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+unsigned int i915_powersave = 1;
+module_param_named(powersave, i915_powersave, int, 0400);
+
static struct drm_driver driver;
static struct pci_device_id pciidlist[] = {
@@ -86,6 +89,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
pci_set_power_state(dev->pdev, PCI_D3hot);
}
+ dev_priv->suspended = 1;
+
return 0;
}
@@ -94,8 +99,6 @@ static int i915_resume(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
if (pci_enable_device(dev->pdev))
return -1;
pci_set_master(dev->pdev);
@@ -121,9 +124,135 @@ static int i915_resume(struct drm_device *dev)
drm_helper_resume_force_mode(dev);
}
+ dev_priv->suspended = 0;
+
return ret;
}
+/**
+ * i965_reset - reset chip after a hang
+ * @dev: drm device to reset
+ * @flags: reset domains
+ *
+ * Reset the chip. Useful if a hang is detected. Returns zero on successful
+ * reset or otherwise an error code.
+ *
+ * Procedure is fairly simple:
+ * - reset the chip using the reset reg
+ * - re-init context state
+ * - re-init hardware status page
+ * - re-init ring buffer
+ * - re-init interrupt state
+ * - re-init display
+ */
+int i965_reset(struct drm_device *dev, u8 flags)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long timeout;
+ u8 gdrst;
+ /*
+ * We really should only reset the display subsystem if we actually
+ * need to
+ */
+ bool need_display = true;
+
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * Clear request list
+ */
+ i915_gem_retire_requests(dev);
+
+ if (need_display)
+ i915_save_display(dev);
+
+ if (IS_I965G(dev) || IS_G4X(dev)) {
+ /*
+ * Set the domains we want to reset, then the reset bit (bit 0).
+ * Clear the reset bit after a while and wait for hardware status
+ * bit (bit 1) to be set
+ */
+ pci_read_config_byte(dev->pdev, GDRST, &gdrst);
+ pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
+ udelay(50);
+ pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
+
+ /* ...we don't want to loop forever though, 500ms should be plenty */
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ udelay(100);
+ pci_read_config_byte(dev->pdev, GDRST, &gdrst);
+ } while ((gdrst & 0x1) && time_after(timeout, jiffies));
+
+ if (gdrst & 0x1) {
+ WARN(true, "i915: Failed to reset chip\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EIO;
+ }
+ } else {
+ DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+ return -ENODEV;
+ }
+
+ /* Ok, now get things going again... */
+
+ /*
+ * Everything depends on having the GTT running, so we need to start
+ * there. Fortunately we don't need to do this unless we reset the
+ * chip at a PCI level.
+ *
+ * Next we need to restore the context, but we don't use those
+ * yet either...
+ *
+ * Ring buffer needs to be re-initialized in the KMS case, or if X
+ * was running at the time of the reset (i.e. we weren't VT
+ * switched away).
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET) ||
+ !dev_priv->mm.suspended) {
+ drm_i915_ring_buffer_t *ring = &dev_priv->ring;
+ struct drm_gem_object *obj = ring->ring_obj;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ dev_priv->mm.suspended = 0;
+
+ /* Stop the ring if it's running. */
+ I915_WRITE(PRB0_CTL, 0);
+ I915_WRITE(PRB0_TAIL, 0);
+ I915_WRITE(PRB0_HEAD, 0);
+
+ /* Initialize the ring. */
+ I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+ I915_WRITE(PRB0_CTL,
+ ((obj->size - 4096) & RING_NR_PAGES) |
+ RING_NO_REPORT |
+ RING_VALID);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kernel_lost_context(dev);
+ else {
+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->Size;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ drm_irq_uninstall(dev);
+ drm_irq_install(dev);
+ mutex_lock(&dev->struct_mutex);
+ }
+
+ /*
+ * Display needs restore too...
+ */
+ if (need_display)
+ i915_restore_display(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+
static int __devinit
i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -188,8 +317,8 @@ static struct drm_driver driver = {
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = i915_gem_debugfs_init,
- .debugfs_cleanup = i915_gem_debugfs_cleanup,
+ .debugfs_init = i915_debugfs_init,
+ .debugfs_cleanup = i915_debugfs_cleanup,
#endif
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
@@ -231,6 +360,8 @@ static int __init i915_init(void)
{
driver.num_ioctls = i915_max_ioctl;
+ i915_gem_shrinker_init();
+
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
@@ -257,6 +388,7 @@ static int __init i915_init(void)
static void __exit i915_exit(void)
{
+ i915_gem_shrinker_exit();
drm_exit(&driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5b4f87e5562..6035d3dae85 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -48,6 +48,11 @@ enum pipe {
PIPE_B,
};
+enum plane {
+ PLANE_A = 0,
+ PLANE_B,
+};
+
#define I915_NUM_PIPE 2
/* Interface history:
@@ -85,7 +90,6 @@ struct drm_i915_gem_phys_object {
};
typedef struct _drm_i915_ring_buffer {
- int tail_mask;
unsigned long Size;
u8 *virtual_start;
int head;
@@ -149,6 +153,23 @@ struct drm_i915_error_state {
struct timeval time;
};
+struct drm_i915_display_funcs {
+ void (*dpms)(struct drm_crtc *crtc, int mode);
+ bool (*fbc_enabled)(struct drm_crtc *crtc);
+ void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+ void (*disable_fbc)(struct drm_device *dev);
+ int (*get_display_clock_speed)(struct drm_device *dev);
+ int (*get_fifo_size)(struct drm_device *dev, int plane);
+ void (*update_wm)(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size);
+ /* clock updates for mode set */
+ /* cursor updates */
+ /* render clock increase/decrease */
+ /* display clock increase/decrease */
+ /* pll clock increase/decrease */
+ /* clock gating init */
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -156,6 +177,7 @@ typedef struct drm_i915_private {
void __iomem *regs;
+ struct pci_dev *bridge_dev;
drm_i915_ring_buffer_t ring;
drm_dma_handle_t *status_page_dmah;
@@ -180,6 +202,7 @@ typedef struct drm_i915_private {
spinlock_t user_irq_lock;
/** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
int user_irq_refcount;
+ u32 trace_irq_seqno;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask_reg;
u32 pipestat[2];
@@ -198,10 +221,21 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd;
+
bool cursor_needs_physical;
struct drm_mm vram;
+ unsigned long cfb_size;
+ unsigned long cfb_pitch;
+ int cfb_fence;
+ int cfb_plane;
+
int irq_enabled;
struct intel_opregion opregion;
@@ -222,6 +256,8 @@ typedef struct drm_i915_private {
unsigned int edp_support:1;
int lvds_ssc_freq;
+ struct notifier_block lid_notifier;
+
int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
@@ -234,7 +270,11 @@ typedef struct drm_i915_private {
struct work_struct error_work;
struct workqueue_struct *wq;
+ /* Display functions */
+ struct drm_i915_display_funcs display;
+
/* Register state */
+ bool suspended;
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
@@ -311,7 +351,7 @@ typedef struct drm_i915_private {
u32 saveIMR;
u32 saveCACHE_MODE_0;
u32 saveD_STATE;
- u32 saveCG_2D_DIS;
+ u32 saveDSPCLK_GATE_D;
u32 saveMI_ARB_STATE;
u32 saveSWF0[16];
u32 saveSWF1[16];
@@ -350,6 +390,15 @@ typedef struct drm_i915_private {
int gtt_mtrr;
/**
+ * Membership on list of all loaded devices, used to evict
+ * inactive buffers under memory pressure.
+ *
+ * Modifications should only be done whilst holding the
+ * shrink_list_lock spinlock.
+ */
+ struct list_head shrink_list;
+
+ /**
* List of objects currently involved in rendering from the
* ringbuffer.
*
@@ -432,7 +481,7 @@ typedef struct drm_i915_private {
* It prevents command submission from occuring and makes
* every pending request fail
*/
- int wedged;
+ atomic_t wedged;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
@@ -443,6 +492,14 @@ typedef struct drm_i915_private {
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
} mm;
struct sdvo_device_mapping sdvo_mappings[2];
+
+ /* Reclocking support */
+ bool render_reclock_avail;
+ bool lvds_downclock_avail;
+ struct work_struct idle_work;
+ struct timer_list idle_timer;
+ bool busy;
+ u16 orig_clock;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -483,10 +540,7 @@ struct drm_i915_gem_object {
* This is the same as gtt_space->start
*/
uint32_t gtt_offset;
- /**
- * Required alignment for the object
- */
- uint32_t gtt_alignment;
+
/**
* Fake offset for use by mmap(2)
*/
@@ -533,6 +587,11 @@ struct drm_i915_gem_object {
* in an execbuffer object list.
*/
int in_execbuffer;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ int madv;
};
/**
@@ -575,7 +634,10 @@ enum intel_chip_family {
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
+extern unsigned int i915_powersave;
+extern void i915_save_display(struct drm_device *dev);
+extern void i915_restore_display(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -595,13 +657,16 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
+extern int i965_reset(struct drm_device *dev, u8 flags);
/* i915_irq.c */
+void i915_hangcheck_elapsed(unsigned long data);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_user_irq_get(struct drm_device *dev);
+void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
void i915_user_irq_put(struct drm_device *dev);
extern void i915_enable_interrupt (struct drm_device *dev);
@@ -667,6 +732,8 @@ int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
@@ -686,6 +753,7 @@ int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
uint32_t i915_get_gem_seqno(struct drm_device *dev);
+bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
void i915_gem_retire_requests(struct drm_device *dev);
@@ -711,6 +779,9 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_shrinker_init(void);
+void i915_gem_shrinker_exit(void);
+
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
@@ -730,8 +801,8 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
void i915_dump_lru(struct drm_device *dev, const char *where);
/* i915_debugfs.c */
-int i915_gem_debugfs_init(struct drm_minor *minor);
-void i915_gem_debugfs_cleanup(struct drm_minor *minor);
+int i915_debugfs_init(struct drm_minor *minor);
+void i915_debugfs_cleanup(struct drm_minor *minor);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
@@ -757,6 +828,9 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
/* modesetting */
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
+extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void i8xx_disable_fbc(struct drm_device *dev);
+extern void g4x_disable_fbc(struct drm_device *dev);
/**
* Lock test for when it's just for synchronization of ring access.
@@ -781,33 +855,32 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
#define I915_VERBOSE 0
-#define RING_LOCALS unsigned int outring, ringmask, outcount; \
- volatile char *virt;
-
-#define BEGIN_LP_RING(n) do { \
- if (I915_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
- if (dev_priv->ring.space < (n)*4) \
- i915_wait_ring(dev, (n)*4, __func__); \
- outcount = 0; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
+#define RING_LOCALS volatile unsigned int *ring_virt__;
+
+#define BEGIN_LP_RING(n) do { \
+ int bytes__ = 4*(n); \
+ if (I915_VERBOSE) DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
+ /* a wrap must occur between instructions so pad beforehand */ \
+ if (unlikely (dev_priv->ring.tail + bytes__ > dev_priv->ring.Size)) \
+ i915_wrap_ring(dev); \
+ if (unlikely (dev_priv->ring.space < bytes__)) \
+ i915_wait_ring(dev, bytes__, __func__); \
+ ring_virt__ = (unsigned int *) \
+ (dev_priv->ring.virtual_start + dev_priv->ring.tail); \
+ dev_priv->ring.tail += bytes__; \
+ dev_priv->ring.tail &= dev_priv->ring.Size - 1; \
+ dev_priv->ring.space -= bytes__; \
} while (0)
-#define OUT_RING(n) do { \
+#define OUT_RING(n) do { \
if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = (n); \
- outcount++; \
- outring += 4; \
- outring &= ringmask; \
+ *ring_virt__++ = (n); \
} while (0)
#define ADVANCE_LP_RING() do { \
- if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
- dev_priv->ring.tail = outring; \
- dev_priv->ring.space -= outcount * 4; \
- I915_WRITE(PRB0_TAIL, outring); \
+ if (I915_VERBOSE) \
+ DRM_DEBUG("ADVANCE_LP_RING %x\n", dev_priv->ring.tail); \
+ I915_WRITE(PRB0_TAIL, dev_priv->ring.tail); \
} while(0)
/**
@@ -830,6 +903,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
+extern int i915_wrap_ring(struct drm_device * dev);
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
@@ -854,6 +928,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22 || \
(dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2E42 || \
(dev)->pci_device == 0x0042 || \
(dev)->pci_device == 0x0046)
@@ -866,6 +941,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22 || \
(dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2E42 || \
IS_GM45(dev))
#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
@@ -899,10 +975,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
-#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
+#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
+#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev)))
+
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 80e5ba490dc..abfc27b0c2e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,6 +29,8 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
#include <linux/swap.h>
#include <linux/pci.h>
@@ -47,11 +49,15 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_evict_something(struct drm_device *dev);
+static int i915_gem_evict_something(struct drm_device *dev, int min_size);
+static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
+static LIST_HEAD(shrink_list);
+static DEFINE_SPINLOCK(shrink_list_lock);
+
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
{
@@ -111,7 +117,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_create *args = data;
struct drm_gem_object *obj;
- int handle, ret;
+ int ret;
+ u32 handle;
args->size = roundup(args->size, PAGE_SIZE);
@@ -314,6 +321,45 @@ fail_unlock:
return ret;
}
+static inline gfp_t
+i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
+{
+ return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
+}
+
+static inline void
+i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
+{
+ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
+}
+
+static int
+i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = i915_gem_object_get_pages(obj);
+
+ /* If we've insufficient memory to map in the pages, attempt
+ * to make some space by throwing out some old buffers.
+ */
+ if (ret == -ENOMEM) {
+ struct drm_device *dev = obj->dev;
+ gfp_t gfp;
+
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
+ return ret;
+
+ gfp = i915_gem_object_get_page_gfp_mask(obj);
+ i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
+ ret = i915_gem_object_get_pages(obj);
+ i915_gem_object_set_page_gfp_mask (obj, gfp);
+ }
+
+ return ret;
+}
+
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -365,8 +411,8 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
- if (ret != 0)
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
goto fail_unlock;
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
@@ -840,8 +886,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_get_pages(obj);
- if (ret != 0)
+ ret = i915_gem_object_get_pages_or_evict(obj);
+ if (ret)
goto fail_unlock;
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
@@ -981,6 +1027,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_set_domain *args = data;
struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
int ret;
@@ -1004,15 +1051,17 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
+ obj_priv = obj->driver_private;
mutex_lock(&dev->struct_mutex);
+
+ intel_mark_busy(dev, obj);
+
#if WATCH_BUF
DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
obj, obj->size, read_domains, write_domain);
#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
/* Update the LRU on the fence for the CPU access that's
@@ -1150,28 +1199,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
if (!obj_priv->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
+ ret = i915_gem_object_bind_to_gtt(obj, 0);
+ if (ret)
+ goto unlock;
list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
}
/* Need a new fence register? */
if (obj_priv->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return VM_FAULT_SIGBUS;
- }
+ if (ret)
+ goto unlock;
}
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
@@ -1179,18 +1222,18 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
-
+unlock:
mutex_unlock(&dev->struct_mutex);
switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ return VM_FAULT_NOPAGE;
case -ENOMEM:
case -EAGAIN:
return VM_FAULT_OOM;
- case -EFAULT:
- case -EINVAL:
- return VM_FAULT_SIGBUS;
default:
- return VM_FAULT_NOPAGE;
+ return VM_FAULT_SIGBUS;
}
}
@@ -1383,6 +1426,14 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
obj_priv = obj->driver_private;
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to mmap a purgeable buffer\n");
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+
if (!obj_priv->mmap_offset) {
ret = i915_gem_create_mmap_offset(obj);
if (ret) {
@@ -1394,22 +1445,12 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
args->offset = obj_priv->mmap_offset;
- obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
-
- /* Make sure the alignment is correct for fence regs etc */
- if (obj_priv->agp_mem &&
- (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
/*
* Pull it into the GTT so that we have a page list (makes the
* initial fault faster and any subsequent flushing possible).
*/
if (!obj_priv->agp_mem) {
- ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+ ret = i915_gem_object_bind_to_gtt(obj, 0);
if (ret) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -1432,6 +1473,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
int i;
BUG_ON(obj_priv->pages_refcount == 0);
+ BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
if (--obj_priv->pages_refcount != 0)
return;
@@ -1439,13 +1481,21 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
if (obj_priv->tiling_mode != I915_TILING_NONE)
i915_gem_object_save_bit_17_swizzle(obj);
- for (i = 0; i < page_count; i++)
- if (obj_priv->pages[i] != NULL) {
- if (obj_priv->dirty)
- set_page_dirty(obj_priv->pages[i]);
+ if (obj_priv->madv == I915_MADV_DONTNEED)
+ obj_priv->dirty = 0;
+
+ for (i = 0; i < page_count; i++) {
+ if (obj_priv->pages[i] == NULL)
+ break;
+
+ if (obj_priv->dirty)
+ set_page_dirty(obj_priv->pages[i]);
+
+ if (obj_priv->madv == I915_MADV_WILLNEED)
mark_page_accessed(obj_priv->pages[i]);
- page_cache_release(obj_priv->pages[i]);
- }
+
+ page_cache_release(obj_priv->pages[i]);
+ }
obj_priv->dirty = 0;
drm_free_large(obj_priv->pages);
@@ -1484,6 +1534,26 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
obj_priv->last_rendering_seqno = 0;
}
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct inode *inode;
+
+ inode = obj->filp->f_path.dentry->d_inode;
+ if (inode->i_op->truncate)
+ inode->i_op->truncate (inode);
+
+ obj_priv->madv = __I915_MADV_PURGED;
+}
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+{
+ return obj_priv->madv == I915_MADV_DONTNEED;
+}
+
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
@@ -1572,15 +1642,24 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
+ uint32_t old_write_domain = obj->write_domain;
+
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
}
}
- if (was_empty && !dev_priv->mm.suspended)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ if (!dev_priv->mm.suspended) {
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ if (was_empty)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+ }
return seqno;
}
@@ -1618,6 +1697,8 @@ i915_gem_retire_request(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ trace_i915_gem_request_retire(dev, request->seqno);
+
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
@@ -1666,7 +1747,7 @@ out:
/**
* Returns true if seq1 is later than seq2.
*/
-static int
+bool
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
{
return (int32_t)(seq1 - seq2) >= 0;
@@ -1689,7 +1770,7 @@ i915_gem_retire_requests(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
- if (!dev_priv->hw_status_page)
+ if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
return;
seqno = i915_get_gem_seqno(dev);
@@ -1704,7 +1785,7 @@ i915_gem_retire_requests(struct drm_device *dev)
retiring_seqno = request->seqno;
if (i915_seqno_passed(seqno, retiring_seqno) ||
- dev_priv->mm.wedged) {
+ atomic_read(&dev_priv->mm.wedged)) {
i915_gem_retire_request(dev, request);
list_del(&request->list);
@@ -1713,6 +1794,12 @@ i915_gem_retire_requests(struct drm_device *dev)
} else
break;
}
+
+ if (unlikely (dev_priv->trace_irq_seqno &&
+ i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+ i915_user_irq_put(dev);
+ dev_priv->trace_irq_seqno = 0;
+ }
}
void
@@ -1746,6 +1833,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
BUG_ON(seqno == 0);
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EIO;
+
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
if (IS_IGDNG(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1758,16 +1848,20 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
i915_driver_irq_postinstall(dev);
}
+ trace_i915_gem_request_wait_begin(dev, seqno);
+
dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_get(dev);
ret = wait_event_interruptible(dev_priv->irq_queue,
i915_seqno_passed(i915_get_gem_seqno(dev),
seqno) ||
- dev_priv->mm.wedged);
+ atomic_read(&dev_priv->mm.wedged));
i915_user_irq_put(dev);
dev_priv->mm.waiting_gem_seqno = 0;
+
+ trace_i915_gem_request_wait_end(dev, seqno);
}
- if (dev_priv->mm.wedged)
+ if (atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
if (ret && ret != -ERESTARTSYS)
@@ -1798,6 +1892,8 @@ i915_gem_flush(struct drm_device *dev,
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
+ trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
+ invalidate_domains, flush_domains);
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
@@ -1910,6 +2006,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return -EINVAL;
}
+ /* blow away mappings if mapped through GTT */
+ i915_gem_release_mmap(obj);
+
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
@@ -1923,21 +2025,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
+ BUG_ON(obj_priv->active);
+
if (obj_priv->agp_mem != NULL) {
drm_unbind_agp(obj_priv->agp_mem);
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
obj_priv->agp_mem = NULL;
}
- BUG_ON(obj_priv->active);
-
- /* blow away mappings if mapped through GTT */
- i915_gem_release_mmap(obj);
-
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
i915_gem_object_put_pages(obj);
+ BUG_ON(obj_priv->pages_refcount);
if (obj_priv->gtt_space) {
atomic_dec(&dev->gtt_count);
@@ -1951,40 +2048,113 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
if (!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
+ if (i915_gem_object_is_purgeable(obj_priv))
+ i915_gem_object_truncate(obj);
+
+ trace_i915_gem_object_unbind(obj);
+
return 0;
}
+static struct drm_gem_object *
+i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *best = NULL;
+ struct drm_gem_object *first = NULL;
+
+ /* Try to find the smallest clean object */
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->size >= min_size) {
+ if ((!obj_priv->dirty ||
+ i915_gem_object_is_purgeable(obj_priv)) &&
+ (!best || obj->size < best->size)) {
+ best = obj;
+ if (best->size == min_size)
+ return best;
+ }
+ if (!first)
+ first = obj;
+ }
+ }
+
+ return best ? best : first;
+}
+
static int
-i915_gem_evict_something(struct drm_device *dev)
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t seqno;
+ int ret;
+ bool lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return -ENOSPC;
+
+ /* Flush everything (on to the inactive lists) and evict */
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+ if (seqno == 0)
+ return -ENOMEM;
+
+ ret = i915_wait_request(dev, seqno);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_evict_from_inactive_list(dev);
+ if (ret)
+ return ret;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ BUG_ON(!lists_empty);
+
+ return 0;
+}
+
+static int
+i915_gem_evict_something(struct drm_device *dev, int min_size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
+ int ret;
for (;;) {
+ i915_gem_retire_requests(dev);
+
/* If there's an inactive buffer available now, grab it
* and be done.
*/
- if (!list_empty(&dev_priv->mm.inactive_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
- BUG_ON(obj_priv->pin_count != 0);
+ obj = i915_gem_find_inactive_object(dev, min_size);
+ if (obj) {
+ struct drm_i915_gem_object *obj_priv;
+
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, obj);
#endif
+ obj_priv = obj->driver_private;
+ BUG_ON(obj_priv->pin_count != 0);
BUG_ON(obj_priv->active);
/* Wait on the rendering and unbind the buffer. */
- ret = i915_gem_object_unbind(obj);
- break;
+ return i915_gem_object_unbind(obj);
}
/* If we didn't get anything, but the ring is still processing
- * things, wait for one of those things to finish and hopefully
- * leave us a buffer to evict.
+ * things, wait for the next to finish and hopefully leave us
+ * a buffer to evict.
*/
if (!list_empty(&dev_priv->mm.request_list)) {
struct drm_i915_gem_request *request;
@@ -1995,16 +2165,9 @@ i915_gem_evict_something(struct drm_device *dev)
ret = i915_wait_request(dev, request->seqno);
if (ret)
- break;
+ return ret;
- /* if waiting caused an object to become inactive,
- * then loop around and wait for it. Otherwise, we
- * assume that waiting freed and unbound something,
- * so there should now be some space in the GTT
- */
- if (!list_empty(&dev_priv->mm.inactive_list))
- continue;
- break;
+ continue;
}
/* If we didn't have anything on the request list but there
@@ -2013,46 +2176,44 @@ i915_gem_evict_something(struct drm_device *dev)
* will get moved to inactive.
*/
if (!list_empty(&dev_priv->mm.flushing_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
+ struct drm_i915_gem_object *obj_priv;
- i915_gem_flush(dev,
- obj->write_domain,
- obj->write_domain);
- i915_add_request(dev, NULL, obj->write_domain);
+ /* Find an object that we can immediately reuse */
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ obj = obj_priv->obj;
+ if (obj->size >= min_size)
+ break;
- obj = NULL;
- continue;
- }
+ obj = NULL;
+ }
- DRM_ERROR("inactive empty %d request empty %d "
- "flushing empty %d\n",
- list_empty(&dev_priv->mm.inactive_list),
- list_empty(&dev_priv->mm.request_list),
- list_empty(&dev_priv->mm.flushing_list));
- /* If we didn't do any of the above, there's nothing to be done
- * and we just can't fit it in.
- */
- return -ENOSPC;
- }
- return ret;
-}
+ if (obj != NULL) {
+ uint32_t seqno;
-static int
-i915_gem_evict_everything(struct drm_device *dev)
-{
- int ret;
+ i915_gem_flush(dev,
+ obj->write_domain,
+ obj->write_domain);
+ seqno = i915_add_request(dev, NULL, obj->write_domain);
+ if (seqno == 0)
+ return -ENOMEM;
- for (;;) {
- ret = i915_gem_evict_something(dev);
- if (ret != 0)
- break;
+ ret = i915_wait_request(dev, seqno);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+ }
+
+ /* If we didn't do any of the above, there's no single buffer
+ * large enough to swap out for the new one, so just evict
+ * everything and start again. (This should be rare.)
+ */
+ if (!list_empty (&dev_priv->mm.inactive_list))
+ return i915_gem_evict_from_inactive_list(dev);
+ else
+ return i915_gem_evict_everything(dev);
}
- if (ret == -ENOSPC)
- return 0;
- return ret;
}
int
@@ -2075,7 +2236,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
BUG_ON(obj_priv->pages != NULL);
obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
if (obj_priv->pages == NULL) {
- DRM_ERROR("Faled to allocate page list\n");
obj_priv->pages_refcount--;
return -ENOMEM;
}
@@ -2086,7 +2246,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
page = read_mapping_page(mapping, i, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- DRM_ERROR("read_mapping_page failed: %d\n", ret);
i915_gem_object_put_pages(obj);
return ret;
}
@@ -2323,6 +2482,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
else
i830_write_fence_reg(reg);
+ trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+
return 0;
}
@@ -2405,10 +2566,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_mm_node *free_space;
- int page_count, ret;
+ bool retry_alloc = false;
+ int ret;
if (dev_priv->mm.suspended)
return -EBUSY;
+
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to bind a purgeable object\n");
+ return -EINVAL;
+ }
+
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
@@ -2428,30 +2596,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
}
}
if (obj_priv->gtt_space == NULL) {
- bool lists_empty;
-
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- spin_unlock(&dev_priv->mm.active_list_lock);
- if (lists_empty) {
- DRM_ERROR("GTT full, but LRU list empty\n");
- return -ENOSPC;
- }
-
- ret = i915_gem_evict_something(dev);
- if (ret != 0) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to evict a buffer %d\n", ret);
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
return ret;
- }
+
goto search_free;
}
@@ -2459,27 +2613,56 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
+ if (retry_alloc) {
+ i915_gem_object_set_page_gfp_mask (obj,
+ i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
+ }
ret = i915_gem_object_get_pages(obj);
+ if (retry_alloc) {
+ i915_gem_object_set_page_gfp_mask (obj,
+ i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
+ }
if (ret) {
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
+
+ if (ret == -ENOMEM) {
+ /* first try to clear up some space from the GTT */
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret) {
+ /* now try to shrink everyone else */
+ if (! retry_alloc) {
+ retry_alloc = true;
+ goto search_free;
+ }
+
+ return ret;
+ }
+
+ goto search_free;
+ }
+
return ret;
}
- page_count = obj->size / PAGE_SIZE;
/* Create an AGP memory structure pointing at our pages, and bind it
* into the GTT.
*/
obj_priv->agp_mem = drm_agp_bind_pages(dev,
obj_priv->pages,
- page_count,
+ obj->size >> PAGE_SHIFT,
obj_priv->gtt_offset,
obj_priv->agp_type);
if (obj_priv->agp_mem == NULL) {
i915_gem_object_put_pages(obj);
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
- return -ENOMEM;
+
+ ret = i915_gem_evict_something(dev, obj->size);
+ if (ret)
+ return ret;
+
+ goto search_free;
}
atomic_inc(&dev->gtt_count);
atomic_add(obj->size, &dev->gtt_memory);
@@ -2491,6 +2674,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+
return 0;
}
@@ -2506,15 +2691,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
if (obj_priv->pages == NULL)
return;
- /* XXX: The 865 in particular appears to be weird in how it handles
- * cache flushing. We haven't figured it out, but the
- * clflush+agp_chipset_flush doesn't appear to successfully get the
- * data visible to the PGU, while wbinvd + agp_chipset_flush does.
- */
- if (IS_I865G(obj->dev)) {
- wbinvd();
- return;
- }
+ trace_i915_gem_object_clflush(obj);
drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
}
@@ -2525,21 +2702,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t seqno;
+ uint32_t old_write_domain;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
/* Queue the GPU write cache flushing we need. */
+ old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
{
+ uint32_t old_write_domain;
+
if (obj->write_domain != I915_GEM_DOMAIN_GTT)
return;
@@ -2547,7 +2732,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
+ old_write_domain = obj->write_domain;
obj->write_domain = 0;
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/** Flushes the CPU write domain for the object if it's dirty. */
@@ -2555,13 +2745,19 @@ static void
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ uint32_t old_write_domain;
if (obj->write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
+ old_write_domain = obj->write_domain;
obj->write_domain = 0;
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/**
@@ -2574,6 +2770,7 @@ int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
@@ -2586,6 +2783,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
if (ret != 0)
return ret;
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
/* If we're writing through the GTT domain, then CPU and GPU caches
* will need to be invalidated at next use.
*/
@@ -2604,6 +2804,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
obj_priv->dirty = 1;
}
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
return 0;
}
@@ -2616,6 +2820,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
{
+ uint32_t old_write_domain, old_read_domains;
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
@@ -2631,6 +2836,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
@@ -2651,6 +2859,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
return 0;
}
@@ -2772,10 +2984,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
+ uint32_t old_read_domains;
BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
+ intel_mark_busy(dev, obj);
+
#if WATCH_BUF
DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
__func__, obj,
@@ -2816,6 +3031,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
i915_gem_clflush_object(obj);
}
+ old_read_domains = obj->read_domains;
+
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
@@ -2834,6 +3051,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains);
#endif
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ obj->write_domain);
}
/**
@@ -2886,6 +3107,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t size)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_read_domains;
int i, ret;
if (offset == 0 && size == obj->size)
@@ -2932,8 +3154,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ old_read_domains = obj->read_domains;
obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ obj->write_domain);
+
return 0;
}
@@ -2977,6 +3204,21 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
}
target_obj_priv = target_obj->driver_private;
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_obj_priv->gtt_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
+
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
@@ -2988,25 +3230,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL;
}
- if (reloc->offset > obj->size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset, (int) obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
- if (reloc->offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc->target_handle,
- (int) reloc->offset);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
+ /* Validate that the target is in a valid r/w GPU domain */
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
reloc->read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
@@ -3020,7 +3244,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
i915_gem_object_unpin(obj);
return -EINVAL;
}
-
if (reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
@@ -3035,21 +3258,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL;
}
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_obj_priv->gtt_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
-
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
@@ -3061,6 +3269,37 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
continue;
}
+ /* Check that the relocation address is valid... */
+ if (reloc->offset > obj->size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset, (int) obj->size);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+ if (reloc->offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
+ /* and points to somewhere within the target object. */
+ if (reloc->delta >= target_obj->size) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta, (int) target_obj->size);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret != 0) {
drm_gem_object_unreference(target_obj);
@@ -3119,6 +3358,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
+
count = nbox ? nbox : 1;
for (i = 0; i < count; i++) {
@@ -3356,7 +3597,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__);
- if (dev_priv->mm.wedged) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
DRM_ERROR("Execbuf while wedged\n");
mutex_unlock(&dev->struct_mutex);
ret = -EIO;
@@ -3414,8 +3655,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* error other than GTT full, or we've already tried again */
if (ret != -ENOSPC || pin_tries >= 1) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to pin buffers %d\n", ret);
+ if (ret != -ERESTARTSYS) {
+ unsigned long long total_size = 0;
+ for (i = 0; i < args->buffer_count; i++)
+ total_size += object_list[i]->size;
+ DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
+ pinned+1, args->buffer_count,
+ total_size, ret);
+ DRM_ERROR("%d objects [%d pinned], "
+ "%d object bytes [%d pinned], "
+ "%d/%d gtt bytes\n",
+ atomic_read(&dev->object_count),
+ atomic_read(&dev->pin_count),
+ atomic_read(&dev->object_memory),
+ atomic_read(&dev->pin_memory),
+ atomic_read(&dev->gtt_memory),
+ dev->gtt_total);
+ }
goto err;
}
@@ -3426,7 +3682,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* evict everyone we can from the aperture */
ret = i915_gem_evict_everything(dev);
- if (ret)
+ if (ret && ret != -ENOSPC)
goto err;
}
@@ -3482,8 +3738,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -3600,11 +3860,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment);
- if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to bind: %d\n", ret);
+ if (ret)
return ret;
- }
}
/*
* Pre-965 chips need a fence register set up in order to
@@ -3684,6 +3941,13 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
}
obj_priv = obj->driver_private;
+ if (obj_priv->madv != I915_MADV_WILLNEED) {
+ DRM_ERROR("Attempting to pin a purgeable buffer\n");
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
@@ -3796,6 +4060,56 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
return i915_gem_ring_throttle(dev, file_priv);
}
+int
+i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_madvise *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ switch (args->madv) {
+ case I915_MADV_DONTNEED:
+ case I915_MADV_WILLNEED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
+ args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ obj_priv = obj->driver_private;
+
+ if (obj_priv->pin_count) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
+ return -EINVAL;
+ }
+
+ if (obj_priv->madv != __I915_MADV_PURGED)
+ obj_priv->madv = args->madv;
+
+ /* if the object is no longer bound, discard its backing storage */
+ if (i915_gem_object_is_purgeable(obj_priv) &&
+ obj_priv->gtt_space == NULL)
+ i915_gem_object_truncate(obj);
+
+ args->retained = obj_priv->madv != __I915_MADV_PURGED;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
int i915_gem_init_object(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv;
@@ -3820,6 +4134,9 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
INIT_LIST_HEAD(&obj_priv->fence_list);
+ obj_priv->madv = I915_MADV_WILLNEED;
+
+ trace_i915_gem_object_create(obj);
return 0;
}
@@ -3829,6 +4146,8 @@ void i915_gem_free_object(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ trace_i915_gem_object_destroy(obj);
+
while (obj_priv->pin_count > 0)
i915_gem_object_unpin(obj);
@@ -3837,43 +4156,35 @@ void i915_gem_free_object(struct drm_gem_object *obj)
i915_gem_object_unbind(obj);
- i915_gem_free_mmap_offset(obj);
+ if (obj_priv->mmap_offset)
+ i915_gem_free_mmap_offset(obj);
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
kfree(obj->driver_private);
}
-/** Unbinds all objects that are on the given buffer list. */
+/** Unbinds all inactive objects. */
static int
-i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+i915_gem_evict_from_inactive_list(struct drm_device *dev)
{
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
+ drm_i915_private_t *dev_priv = dev->dev_private;
- while (!list_empty(head)) {
- obj_priv = list_first_entry(head,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
+ while (!list_empty(&dev_priv->mm.inactive_list)) {
+ struct drm_gem_object *obj;
+ int ret;
- if (obj_priv->pin_count != 0) {
- DRM_ERROR("Pinned object in unbind list\n");
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
+ obj = list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->obj;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
- DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
- ret);
- mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("Error unbinding object: %d\n", ret);
return ret;
}
}
-
return 0;
}
@@ -3895,6 +4206,7 @@ i915_gem_idle(struct drm_device *dev)
* We need to replace this with a semaphore, or something.
*/
dev_priv->mm.suspended = 1;
+ del_timer(&dev_priv->hangcheck_timer);
/* Cancel the retire work handler, wait for it to finish if running
*/
@@ -3924,7 +4236,7 @@ i915_gem_idle(struct drm_device *dev)
if (last_seqno == cur_seqno) {
if (stuck++ > 100) {
DRM_ERROR("hardware wedged\n");
- dev_priv->mm.wedged = 1;
+ atomic_set(&dev_priv->mm.wedged, 1);
DRM_WAKEUP(&dev_priv->irq_queue);
break;
}
@@ -3937,7 +4249,7 @@ i915_gem_idle(struct drm_device *dev)
i915_gem_retire_requests(dev);
spin_lock(&dev_priv->mm.active_list_lock);
- if (!dev_priv->mm.wedged) {
+ if (!atomic_read(&dev_priv->mm.wedged)) {
/* Active and flushing should now be empty as we've
* waited for a sequence higher than any pending execbuffer
*/
@@ -3955,29 +4267,41 @@ i915_gem_idle(struct drm_device *dev)
* the GPU domains and just stuff them onto inactive.
*/
while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ uint32_t old_write_domain;
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
+ obj = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list)->obj;
+ old_write_domain = obj->write_domain;
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
spin_unlock(&dev_priv->mm.active_list_lock);
while (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ uint32_t old_write_domain;
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
+ obj = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list)->obj;
+ old_write_domain = obj->write_domain;
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/* Move all inactive buffers out of the GTT. */
- ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+ ret = i915_gem_evict_from_inactive_list(dev);
WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
if (ret) {
mutex_unlock(&dev->struct_mutex);
@@ -4093,7 +4417,6 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
/* Set up the kernel mapping for the ring. */
ring->Size = obj->size;
- ring->tail_mask = obj->size - 1;
ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
ring->map.size = obj->size;
@@ -4200,9 +4523,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (dev_priv->mm.wedged) {
+ if (atomic_read(&dev_priv->mm.wedged)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
- dev_priv->mm.wedged = 0;
+ atomic_set(&dev_priv->mm.wedged, 0);
}
mutex_lock(&dev->struct_mutex);
@@ -4268,6 +4591,10 @@ i915_gem_load(struct drm_device *dev)
i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
+ spin_lock(&shrink_list_lock);
+ list_add(&dev_priv->mm.shrink_list, &shrink_list);
+ spin_unlock(&shrink_list_lock);
+
/* Old X drivers will take 0-2 for front, back, depth buffers */
dev_priv->fence_reg_start = 3;
@@ -4485,3 +4812,116 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
list_del_init(i915_file_priv->mm.request_list.next);
mutex_unlock(&dev->struct_mutex);
}
+
+static int
+i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+ drm_i915_private_t *dev_priv, *next_dev;
+ struct drm_i915_gem_object *obj_priv, *next_obj;
+ int cnt = 0;
+ int would_deadlock = 1;
+
+ /* "fast-path" to count number of available objects */
+ if (nr_to_scan == 0) {
+ spin_lock(&shrink_list_lock);
+ list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (mutex_trylock(&dev->struct_mutex)) {
+ list_for_each_entry(obj_priv,
+ &dev_priv->mm.inactive_list,
+ list)
+ cnt++;
+ mutex_unlock(&dev->struct_mutex);
+ }
+ }
+ spin_unlock(&shrink_list_lock);
+
+ return (cnt / 100) * sysctl_vfs_cache_pressure;
+ }
+
+ spin_lock(&shrink_list_lock);
+
+ /* first scan for clean buffers */
+ list_for_each_entry_safe(dev_priv, next_dev,
+ &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (! mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ i915_gem_retire_requests(dev);
+
+ list_for_each_entry_safe(obj_priv, next_obj,
+ &dev_priv->mm.inactive_list,
+ list) {
+ if (i915_gem_object_is_purgeable(obj_priv)) {
+ i915_gem_object_unbind(obj_priv->obj);
+ if (--nr_to_scan <= 0)
+ break;
+ }
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+
+ would_deadlock = 0;
+
+ if (nr_to_scan <= 0)
+ break;
+ }
+
+ /* second pass, evict/count anything still on the inactive list */
+ list_for_each_entry_safe(dev_priv, next_dev,
+ &shrink_list, mm.shrink_list) {
+ struct drm_device *dev = dev_priv->dev;
+
+ if (! mutex_trylock(&dev->struct_mutex))
+ continue;
+
+ spin_unlock(&shrink_list_lock);
+
+ list_for_each_entry_safe(obj_priv, next_obj,
+ &dev_priv->mm.inactive_list,
+ list) {
+ if (nr_to_scan > 0) {
+ i915_gem_object_unbind(obj_priv->obj);
+ nr_to_scan--;
+ } else
+ cnt++;
+ }
+
+ spin_lock(&shrink_list_lock);
+ mutex_unlock(&dev->struct_mutex);
+
+ would_deadlock = 0;
+ }
+
+ spin_unlock(&shrink_list_lock);
+
+ if (would_deadlock)
+ return -1;
+ else if (cnt > 0)
+ return (cnt / 100) * sysctl_vfs_cache_pressure;
+ else
+ return 0;
+}
+
+static struct shrinker shrinker = {
+ .shrink = i915_gem_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
+
+__init void
+i915_gem_shrinker_init(void)
+{
+ register_shrinker(&shrinker);
+}
+
+__exit void
+i915_gem_shrinker_exit(void)
+{
+ unregister_shrinker(&shrinker);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a2d527b22ec..200e398453c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -94,23 +94,15 @@
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
- struct pci_dev *bridge_dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret = 0;
- bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
- if (!bridge_dev) {
- DRM_DEBUG("no bridge dev?!\n");
- ret = -ENODEV;
- goto out;
- }
-
if (IS_I965G(dev))
- pci_read_config_dword(bridge_dev, reg + 4, &temp_hi);
- pci_read_config_dword(bridge_dev, reg, &temp_lo);
+ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
@@ -118,30 +110,28 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
if (mchbar_addr &&
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
ret = 0;
- goto out_put;
+ goto out;
}
#endif
/* Get some space for it */
- ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
MCHBAR_SIZE, MCHBAR_SIZE,
PCIBIOS_MIN_MEM,
0, pcibios_align_resource,
- bridge_dev);
+ dev_priv->bridge_dev);
if (ret) {
DRM_DEBUG("failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
- goto out_put;
+ goto out;
}
if (IS_I965G(dev))
- pci_write_config_dword(bridge_dev, reg + 4,
+ pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
upper_32_bits(dev_priv->mch_res.start));
- pci_write_config_dword(bridge_dev, reg,
+ pci_write_config_dword(dev_priv->bridge_dev, reg,
lower_32_bits(dev_priv->mch_res.start));
-out_put:
- pci_dev_put(bridge_dev);
out:
return ret;
}
@@ -150,44 +140,36 @@ out:
static bool
intel_setup_mchbar(struct drm_device *dev)
{
- struct pci_dev *bridge_dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool need_disable = false, enabled;
- bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
- if (!bridge_dev) {
- DRM_DEBUG("no bridge dev?!\n");
- goto out;
- }
-
if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
- pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
enabled = temp & 1;
}
/* If it's already enabled, don't have to do anything */
if (enabled)
- goto out_put;
+ goto out;
if (intel_alloc_mchbar_resource(dev))
- goto out_put;
+ goto out;
need_disable = true;
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_write_config_dword(bridge_dev, DEVEN_REG,
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
temp | DEVEN_MCHBAR_EN);
} else {
- pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
- pci_write_config_dword(bridge_dev, mchbar_reg, temp | 1);
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
}
-out_put:
- pci_dev_put(bridge_dev);
out:
return need_disable;
}
@@ -196,25 +178,18 @@ static void
intel_teardown_mchbar(struct drm_device *dev, bool disable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct pci_dev *bridge_dev;
int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
- bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
- if (!bridge_dev) {
- DRM_DEBUG("no bridge dev?!\n");
- return;
- }
-
if (disable) {
if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
temp &= ~DEVEN_MCHBAR_EN;
- pci_write_config_dword(bridge_dev, DEVEN_REG, temp);
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
} else {
- pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
temp &= ~1;
- pci_write_config_dword(bridge_dev, mchbar_reg, temp);
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
}
}
@@ -234,7 +209,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
bool need_disable;
- if (!IS_I9XX(dev)) {
+ if (IS_IGDNG(dev)) {
+ /* On IGDNG whatever DRAM config, GPU always do
+ * same swizzling setup.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if (!IS_I9XX(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
@@ -317,13 +298,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
}
- /* FIXME: check with memory config on IGDNG */
- if (IS_IGDNG(dev)) {
- DRM_ERROR("disable tiling on IGDNG...\n");
- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- }
-
dev_priv->mm.bit_6_swizzle_x = swizzle_x;
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7ebc84c2881..c3ceffa46ea 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,6 +31,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_drv.h"
#define MAX_NOPID ((u32)~0)
@@ -279,7 +280,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
}
if (gt_iir & GT_USER_INTERRUPT) {
- dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+ u32 seqno = i915_get_gem_seqno(dev);
+ dev_priv->mm.irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
DRM_WAKEUP(&dev_priv->irq_queue);
}
@@ -302,12 +305,25 @@ static void i915_error_work_func(struct work_struct *work)
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
error_work);
struct drm_device *dev = dev_priv->dev;
- char *event_string = "ERROR=1";
- char *envp[] = { event_string, NULL };
+ char *error_event[] = { "ERROR=1", NULL };
+ char *reset_event[] = { "RESET=1", NULL };
+ char *reset_done_event[] = { "ERROR=0", NULL };
DRM_DEBUG("generating error event\n");
-
- kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ if (IS_I965G(dev)) {
+ DRM_DEBUG("resetting chip\n");
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+ if (!i965_reset(dev, GDRST_RENDER)) {
+ atomic_set(&dev_priv->mm.wedged, 0);
+ kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+ }
+ } else {
+ printk("reboot required\n");
+ }
+ }
}
/**
@@ -372,7 +388,7 @@ out:
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-static void i915_handle_error(struct drm_device *dev)
+static void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 eir = I915_READ(EIR);
@@ -482,6 +498,16 @@ static void i915_handle_error(struct drm_device *dev)
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
+ if (wedged) {
+ atomic_set(&dev_priv->mm.wedged, 1);
+
+ /*
+ * Wakeup waiting processes so they don't hang
+ */
+ printk("i915: Waking up sleeping processes\n");
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ }
+
queue_work(dev_priv->wq, &dev_priv->error_work);
}
@@ -527,7 +553,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
pipeb_stats = I915_READ(PIPEBSTAT);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev);
+ i915_handle_error(dev, false);
/*
* Clear the PIPE(A|B)STAT regs before the IIR
@@ -565,6 +591,27 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
+
+ /* EOS interrupts occurs */
+ if (IS_IGD(dev) &&
+ (hotplug_status & CRT_EOS_INT_STATUS)) {
+ u32 temp;
+
+ DRM_DEBUG("EOS interrupt occurs\n");
+ /* status is already cleared */
+ temp = I915_READ(ADPA);
+ temp &= ~ADPA_DAC_ENABLE;
+ I915_WRITE(ADPA, temp);
+
+ temp = I915_READ(PORT_HOTPLUG_EN);
+ temp &= ~CRT_EOS_INT_EN;
+ I915_WRITE(PORT_HOTPLUG_EN, temp);
+
+ temp = I915_READ(PORT_HOTPLUG_STAT);
+ if (temp & CRT_EOS_INT_STATUS)
+ I915_WRITE(PORT_HOTPLUG_STAT,
+ CRT_EOS_INT_STATUS);
+ }
}
I915_WRITE(IIR, iir);
@@ -578,8 +625,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT) {
- dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+ u32 seqno = i915_get_gem_seqno(dev);
+ dev_priv->mm.irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
DRM_WAKEUP(&dev_priv->irq_queue);
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
if (pipea_stats & vblank_status) {
@@ -674,6 +725,16 @@ void i915_user_irq_put(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
+void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (dev_priv->trace_irq_seqno == 0)
+ i915_user_irq_get(dev);
+
+ dev_priv->trace_irq_seqno = seqno;
+}
+
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -859,6 +920,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
+struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
+}
+
+/**
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. The first time this is called we simply record
+ * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
+ * again, we assume the chip is wedged and try to fix it.
+ */
+void i915_hangcheck_elapsed(unsigned long data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t acthd;
+
+ if (!IS_I965G(dev))
+ acthd = I915_READ(ACTHD);
+ else
+ acthd = I915_READ(ACTHD_I965);
+
+ /* If all work is done then ACTHD clearly hasn't advanced. */
+ if (list_empty(&dev_priv->mm.request_list) ||
+ i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
+ dev_priv->hangcheck_count = 0;
+ return;
+ }
+
+ if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
+ DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
+ return;
+ }
+
+ /* Reset timer case chip hangs without another request being added */
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+
+ if (acthd != dev_priv->last_acthd)
+ dev_priv->hangcheck_count = 0;
+ else
+ dev_priv->hangcheck_count++;
+
+ dev_priv->last_acthd = acthd;
+}
+
/* drm_dma.h hooks
*/
static void igdng_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index e4b4e8898e3..2d5193556d3 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -148,6 +148,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 blc_pwm_ctl, blc_pwm_ctl2;
+ u32 max_backlight, level, shift;
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAIL;
@@ -157,14 +158,25 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return ASLE_BACKLIGHT_FAIL;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
- blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
- if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
+ if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
- else
- I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
-
+ else {
+ if (IS_IGD(dev)) {
+ blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT;
+ shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
+ } else {
+ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+ shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
+ }
+ level = (bclp * max_backlight) / 255;
+ I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
+ }
asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2955083aa47..0466ddbeba3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -30,6 +30,7 @@
* fb aperture size and the amount of pre-reserved memory.
*/
#define INTEL_GMCH_CTRL 0x52
+#define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define INTEL_GMCH_ENABLED 0x4
#define INTEL_GMCH_MEM_MASK 0x1
#define INTEL_GMCH_MEM_64M 0x1
@@ -55,7 +56,7 @@
/* PCI config space */
#define HPLLCC 0xc0 /* 855 only */
-#define GC_CLOCK_CONTROL_MASK (3 << 0)
+#define GC_CLOCK_CONTROL_MASK (0xf << 0)
#define GC_CLOCK_133_200 (0 << 0)
#define GC_CLOCK_100_200 (1 << 0)
#define GC_CLOCK_100_133 (2 << 0)
@@ -65,7 +66,30 @@
#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
#define GC_DISPLAY_CLOCK_MASK (7 << 4)
+#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0)
+#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
+#define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0)
+#define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0)
+#define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0)
+#define I965_GC_RENDER_CLOCK_MASK (0xf << 0)
+#define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0)
+#define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0)
+#define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0)
+#define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0)
+#define I945_GC_RENDER_CLOCK_MASK (7 << 0)
+#define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0)
+#define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0)
+#define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0)
+#define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0)
+#define I915_GC_RENDER_CLOCK_MASK (7 << 0)
+#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
+#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
+#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
#define LBB 0xf4
+#define GDRST 0xc0
+#define GDRST_FULL (0<<2)
+#define GDRST_RENDER (1<<2)
+#define GDRST_MEDIA (3<<2)
/* VGA stuff */
@@ -324,9 +348,37 @@
#define FBC_CTL_PLANEA (0<<0)
#define FBC_CTL_PLANEB (1<<0)
#define FBC_FENCE_OFF 0x0321b
+#define FBC_TAG 0x03300
#define FBC_LL_SIZE (1536)
+/* Framebuffer compression for GM45+ */
+#define DPFC_CB_BASE 0x3200
+#define DPFC_CONTROL 0x3208
+#define DPFC_CTL_EN (1<<31)
+#define DPFC_CTL_PLANEA (0<<30)
+#define DPFC_CTL_PLANEB (1<<30)
+#define DPFC_CTL_FENCE_EN (1<<29)
+#define DPFC_SR_EN (1<<10)
+#define DPFC_CTL_LIMIT_1X (0<<6)
+#define DPFC_CTL_LIMIT_2X (1<<6)
+#define DPFC_CTL_LIMIT_4X (2<<6)
+#define DPFC_RECOMP_CTL 0x320c
+#define DPFC_RECOMP_STALL_EN (1<<27)
+#define DPFC_RECOMP_STALL_WM_SHIFT (16)
+#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
+#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
+#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
+#define DPFC_STATUS 0x3210
+#define DPFC_INVAL_SEG_SHIFT (16)
+#define DPFC_INVAL_SEG_MASK (0x07ff0000)
+#define DPFC_COMP_SEG_SHIFT (0)
+#define DPFC_COMP_SEG_MASK (0x000003ff)
+#define DPFC_STATUS2 0x3214
+#define DPFC_FENCE_YOFF 0x3218
+#define DPFC_CHICKEN 0x3224
+#define DPFC_HT_MODIFY (1<<31)
+
/*
* GPIO regs
*/
@@ -553,9 +605,118 @@
#define DPLLA_TEST_M_BYPASS (1 << 2)
#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define D_STATE 0x6104
-#define CG_2D_DIS 0x6200
-#define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24)
-#define CG_3D_DIS 0x6204
+#define DSTATE_PLL_D3_OFF (1<<3)
+#define DSTATE_GFX_CLOCK_GATING (1<<1)
+#define DSTATE_DOT_CLOCK_GATING (1<<0)
+#define DSPCLK_GATE_D 0x6200
+# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
+# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
+# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
+# define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */
+# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
+# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
+# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
+# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
+# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
+# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
+# define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */
+# define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */
+# define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */
+# define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */
+# define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */
+# define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */
+# define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */
+# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */
+# define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */
+# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
+# define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10)
+# define DCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+# define DPUNIT_CLOCK_GATE_DISABLE (1 << 8)
+# define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */
+# define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */
+# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
+# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
+/**
+ * This bit must be set on the 830 to prevent hangs when turning off the
+ * overlay scaler.
+ */
+# define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3)
+# define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2)
+# define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1)
+# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */
+# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */
+
+#define RENCLK_GATE_D1 0x6204
+# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */
+# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */
+# define PC_FE_CLOCK_GATE_DISABLE (1 << 11)
+# define PC_BE_CLOCK_GATE_DISABLE (1 << 10)
+# define WINDOWER_CLOCK_GATE_DISABLE (1 << 9)
+# define INTERPOLATOR_CLOCK_GATE_DISABLE (1 << 8)
+# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE (1 << 7)
+# define MOTION_COMP_CLOCK_GATE_DISABLE (1 << 6)
+# define MAG_CLOCK_GATE_DISABLE (1 << 5)
+/** This bit must be unset on 855,865 */
+# define MECI_CLOCK_GATE_DISABLE (1 << 4)
+# define DCMP_CLOCK_GATE_DISABLE (1 << 3)
+# define MEC_CLOCK_GATE_DISABLE (1 << 2)
+# define MECO_CLOCK_GATE_DISABLE (1 << 1)
+/** This bit must be set on 855,865. */
+# define SV_CLOCK_GATE_DISABLE (1 << 0)
+# define I915_MPEG_CLOCK_GATE_DISABLE (1 << 16)
+# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE (1 << 15)
+# define I915_MOTION_COMP_CLOCK_GATE_DISABLE (1 << 14)
+# define I915_BD_BF_CLOCK_GATE_DISABLE (1 << 13)
+# define I915_SF_SE_CLOCK_GATE_DISABLE (1 << 12)
+# define I915_WM_CLOCK_GATE_DISABLE (1 << 11)
+# define I915_IZ_CLOCK_GATE_DISABLE (1 << 10)
+# define I915_PI_CLOCK_GATE_DISABLE (1 << 9)
+# define I915_DI_CLOCK_GATE_DISABLE (1 << 8)
+# define I915_SH_SV_CLOCK_GATE_DISABLE (1 << 7)
+# define I915_PL_DG_QC_FT_CLOCK_GATE_DISABLE (1 << 6)
+# define I915_SC_CLOCK_GATE_DISABLE (1 << 5)
+# define I915_FL_CLOCK_GATE_DISABLE (1 << 4)
+# define I915_DM_CLOCK_GATE_DISABLE (1 << 3)
+# define I915_PS_CLOCK_GATE_DISABLE (1 << 2)
+# define I915_CC_CLOCK_GATE_DISABLE (1 << 1)
+# define I915_BY_CLOCK_GATE_DISABLE (1 << 0)
+
+# define I965_RCZ_CLOCK_GATE_DISABLE (1 << 30)
+/** This bit must always be set on 965G/965GM */
+# define I965_RCC_CLOCK_GATE_DISABLE (1 << 29)
+# define I965_RCPB_CLOCK_GATE_DISABLE (1 << 28)
+# define I965_DAP_CLOCK_GATE_DISABLE (1 << 27)
+# define I965_ROC_CLOCK_GATE_DISABLE (1 << 26)
+# define I965_GW_CLOCK_GATE_DISABLE (1 << 25)
+# define I965_TD_CLOCK_GATE_DISABLE (1 << 24)
+/** This bit must always be set on 965G */
+# define I965_ISC_CLOCK_GATE_DISABLE (1 << 23)
+# define I965_IC_CLOCK_GATE_DISABLE (1 << 22)
+# define I965_EU_CLOCK_GATE_DISABLE (1 << 21)
+# define I965_IF_CLOCK_GATE_DISABLE (1 << 20)
+# define I965_TC_CLOCK_GATE_DISABLE (1 << 19)
+# define I965_SO_CLOCK_GATE_DISABLE (1 << 17)
+# define I965_FBC_CLOCK_GATE_DISABLE (1 << 16)
+# define I965_MARI_CLOCK_GATE_DISABLE (1 << 15)
+# define I965_MASF_CLOCK_GATE_DISABLE (1 << 14)
+# define I965_MAWB_CLOCK_GATE_DISABLE (1 << 13)
+# define I965_EM_CLOCK_GATE_DISABLE (1 << 12)
+# define I965_UC_CLOCK_GATE_DISABLE (1 << 11)
+# define I965_SI_CLOCK_GATE_DISABLE (1 << 6)
+# define I965_MT_CLOCK_GATE_DISABLE (1 << 5)
+# define I965_PL_CLOCK_GATE_DISABLE (1 << 4)
+# define I965_DG_CLOCK_GATE_DISABLE (1 << 3)
+# define I965_QC_CLOCK_GATE_DISABLE (1 << 2)
+# define I965_FT_CLOCK_GATE_DISABLE (1 << 1)
+# define I965_DM_CLOCK_GATE_DISABLE (1 << 0)
+
+#define RENCLK_GATE_D2 0x6208
+#define VF_UNIT_CLOCK_GATE_DISABLE (1 << 9)
+#define GS_UNIT_CLOCK_GATE_DISABLE (1 << 7)
+#define CL_UNIT_CLOCK_GATE_DISABLE (1 << 6)
+#define RAMCLK_GATE_D 0x6210 /* CRL only */
+#define DEUC 0x6214 /* CRL only */
/*
* Palette regs
@@ -683,6 +844,7 @@
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
+#define CRT_EOS_INT_EN (1 << 10)
#define CRT_HOTPLUG_INT_EN (1 << 9)
#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
@@ -717,6 +879,7 @@
#define DPC_HOTPLUG_INT_STATUS (1 << 28)
#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
#define DPD_HOTPLUG_INT_STATUS (1 << 27)
+#define CRT_EOS_INT_STATUS (1 << 12)
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
@@ -1586,6 +1749,7 @@
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
@@ -1733,6 +1897,7 @@
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
+#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* IGDNG */
#define DISPPLANE_TILED (1<<10)
#define DSPAADDR 0x70184
#define DSPASTRIDE 0x70188
@@ -1867,6 +2032,8 @@
#define PF_ENABLE (1<<31)
#define PFA_WIN_SZ 0x68074
#define PFB_WIN_SZ 0x68874
+#define PFA_WIN_POS 0x68070
+#define PFB_WIN_POS 0x68870
/* legacy palette */
#define LGC_PALETTE_A 0x4a000
@@ -1913,6 +2080,9 @@
#define GTIIR 0x44018
#define GTIER 0x4401c
+#define DISP_ARB_CTL 0x45000
+#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
+
/* PCH */
/* south display engine interrupt */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 1d04e1904ac..bd6d8d91ca9 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -228,6 +228,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
@@ -285,6 +286,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
return;
}
+
static void i915_restore_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -379,19 +381,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
return;
}
-int i915_save_state(struct drm_device *dev)
+
+void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
-
- /* Render Standby */
- if (IS_I965G(dev) && IS_MOBILE(dev))
- dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
-
- /* Hardware status page */
- dev_priv->saveHWS = I915_READ(HWS_PGA);
/* Display arbitration control */
dev_priv->saveDSPARB = I915_READ(DSPARB);
@@ -399,6 +392,7 @@ int i915_save_state(struct drm_device *dev)
/* This is only meaningful in non-KMS mode */
/* Don't save them in KMS mode */
i915_save_modeset_reg(dev);
+
/* Cursor state */
dev_priv->saveCURACNTR = I915_READ(CURACNTR);
dev_priv->saveCURAPOS = I915_READ(CURAPOS);
@@ -448,81 +442,22 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
- /* Interrupt state */
- dev_priv->saveIIR = I915_READ(IIR);
- dev_priv->saveIER = I915_READ(IER);
- dev_priv->saveIMR = I915_READ(IMR);
-
/* VGA state */
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
- /* Clock gating state */
- dev_priv->saveD_STATE = I915_READ(D_STATE);
- dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
-
- /* Cache mode state */
- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
-
- /* Memory Arbitration state */
- dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
-
- /* Scratch space */
- for (i = 0; i < 16; i++) {
- dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
- }
- for (i = 0; i < 3; i++)
- dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
-
- /* Fences */
- if (IS_I965G(dev)) {
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- } else {
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- }
i915_save_vga(dev);
-
- return 0;
}
-int i915_restore_state(struct drm_device *dev)
+void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
-
- /* Render Standby */
- if (IS_I965G(dev) && IS_MOBILE(dev))
- I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
-
- /* Hardware status page */
- I915_WRITE(HWS_PGA, dev_priv->saveHWS);
/* Display arbitration */
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
- /* Fences */
- if (IS_I965G(dev)) {
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
- } else {
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
- }
-
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
@@ -534,9 +469,11 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
}
+
/* This is only meaningful in non-KMS mode */
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
+
/* Cursor state */
I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
@@ -586,9 +523,98 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
DRM_UDELAY(150);
+ i915_restore_vga(dev);
+}
+
+int i915_save_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+
+ /* Render Standby */
+ if (IS_I965G(dev) && IS_MOBILE(dev))
+ dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
+
+ /* Hardware status page */
+ dev_priv->saveHWS = I915_READ(HWS_PGA);
+
+ i915_save_display(dev);
+
+ /* Interrupt state */
+ dev_priv->saveIER = I915_READ(IER);
+ dev_priv->saveIMR = I915_READ(IMR);
+
+ /* Clock gating state */
+ dev_priv->saveD_STATE = I915_READ(D_STATE);
+ dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
+
+ /* Cache mode state */
+ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+ /* Memory Arbitration state */
+ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
+ /* Scratch space */
+ for (i = 0; i < 16; i++) {
+ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ }
+ for (i = 0; i < 3; i++)
+ dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+ /* Fences */
+ if (IS_I965G(dev)) {
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ } else {
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ }
+
+ return 0;
+}
+
+int i915_restore_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+
+ /* Render Standby */
+ if (IS_I965G(dev) && IS_MOBILE(dev))
+ I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
+
+ /* Hardware status page */
+ I915_WRITE(HWS_PGA, dev_priv->saveHWS);
+
+ /* Fences */
+ if (IS_I965G(dev)) {
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+ } else {
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ }
+
+ i915_restore_display(dev);
+
+ /* Interrupt state */
+ I915_WRITE (IER, dev_priv->saveIER);
+ I915_WRITE (IMR, dev_priv->saveIMR);
+
/* Clock gating state */
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
- I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
+ I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -603,8 +629,6 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
- i915_restore_vga(dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
new file mode 100644
index 00000000000..01840d9bc38
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -0,0 +1,316 @@
+#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I915_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE i915_trace
+
+/* object tracking */
+
+TRACE_EVENT(i915_gem_object_create,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(u32, size)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->size = obj->size;
+ ),
+
+ TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
+);
+
+TRACE_EVENT(i915_gem_object_bind,
+
+ TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
+
+ TP_ARGS(obj, gtt_offset),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(u32, gtt_offset)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->gtt_offset = gtt_offset;
+ ),
+
+ TP_printk("obj=%p, gtt_offset=%08x",
+ __entry->obj, __entry->gtt_offset)
+);
+
+TRACE_EVENT(i915_gem_object_clflush,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
+);
+
+TRACE_EVENT(i915_gem_object_change_domain,
+
+ TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+
+ TP_ARGS(obj, old_read_domains, old_write_domain),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(u32, read_domains)
+ __field(u32, write_domain)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->read_domains = obj->read_domains | (old_read_domains << 16);
+ __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+ ),
+
+ TP_printk("obj=%p, read=%04x, write=%04x",
+ __entry->obj,
+ __entry->read_domains, __entry->write_domain)
+);
+
+TRACE_EVENT(i915_gem_object_get_fence,
+
+ TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
+
+ TP_ARGS(obj, fence, tiling_mode),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(int, fence)
+ __field(int, tiling_mode)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->fence = fence;
+ __entry->tiling_mode = tiling_mode;
+ ),
+
+ TP_printk("obj=%p, fence=%d, tiling=%d",
+ __entry->obj, __entry->fence, __entry->tiling_mode)
+);
+
+TRACE_EVENT(i915_gem_object_unbind,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
+);
+
+TRACE_EVENT(i915_gem_object_destroy,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
+);
+
+/* batch tracing */
+
+TRACE_EVENT(i915_gem_request_submit,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ i915_trace_irq_get(dev, seqno);
+ ),
+
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_flush,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno,
+ u32 flush_domains, u32 invalidate_domains),
+
+ TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, seqno)
+ __field(u32, flush_domains)
+ __field(u32, invalidate_domains)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ __entry->flush_domains = flush_domains;
+ __entry->invalidate_domains = invalidate_domains;
+ ),
+
+ TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x",
+ __entry->dev, __entry->seqno,
+ __entry->flush_domains, __entry->invalidate_domains)
+);
+
+
+TRACE_EVENT(i915_gem_request_complete,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_retire,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_wait_begin,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_wait_end,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_ring_wait_begin,
+
+ TP_PROTO(struct drm_device *dev),
+
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ ),
+
+ TP_printk("dev=%u", __entry->dev)
+);
+
+TRACE_EVENT(i915_ring_wait_end,
+
+ TP_PROTO(struct drm_device *dev),
+
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ ),
+
+ TP_printk("dev=%u", __entry->dev)
+);
+
+#endif /* _I915_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
new file mode 100644
index 00000000000..ead876eb6ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -0,0 +1,11 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * Authors:
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include "i915_drv.h"
+
+#define CREATE_TRACE_POINTS
+#include "i915_trace.h"
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f806fcc54e0..4337414846b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -217,6 +217,9 @@ parse_general_features(struct drm_i915_private *dev_priv,
if (IS_I85X(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 66 : 48;
+ else if (IS_IGDNG(dev_priv->dev))
+ dev_priv->lvds_ssc_freq =
+ general->ssc_freq ? 100 : 120;
else
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 96;
@@ -355,8 +358,14 @@ parse_driver_features(struct drm_i915_private *dev_priv,
}
driver = find_section(bdb, BDB_DRIVER_FEATURES);
- if (driver && driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+ if (!driver)
+ return;
+
+ if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
dev_priv->edp_support = 1;
+
+ if (driver->dual_frequency)
+ dev_priv->render_reclock_avail = true;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 590f81c8f59..212e22740fc 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -64,6 +64,34 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
}
I915_WRITE(reg, temp);
+
+ if (IS_IGD(dev)) {
+ if (mode == DRM_MODE_DPMS_OFF) {
+ /* turn off DAC */
+ temp = I915_READ(PORT_HOTPLUG_EN);
+ temp &= ~CRT_EOS_INT_EN;
+ I915_WRITE(PORT_HOTPLUG_EN, temp);
+
+ temp = I915_READ(PORT_HOTPLUG_STAT);
+ if (temp & CRT_EOS_INT_STATUS)
+ I915_WRITE(PORT_HOTPLUG_STAT,
+ CRT_EOS_INT_STATUS);
+ } else {
+ /* turn on DAC. EOS interrupt must be enabled after DAC
+ * is enabled, so it sounds not good to enable it in
+ * i915_driver_irq_postinstall()
+ * wait 12.5ms after DAC is enabled
+ */
+ msleep(13);
+ temp = I915_READ(PORT_HOTPLUG_STAT);
+ if (temp & CRT_EOS_INT_STATUS)
+ I915_WRITE(PORT_HOTPLUG_STAT,
+ CRT_EOS_INT_STATUS);
+ temp = I915_READ(PORT_HOTPLUG_EN);
+ temp |= CRT_EOS_INT_EN;
+ I915_WRITE(PORT_HOTPLUG_EN, temp);
+ }
+ }
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -151,13 +179,10 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 adpa, temp;
+ u32 adpa;
bool ret;
- temp = adpa = I915_READ(PCH_ADPA);
-
- adpa &= ~ADPA_DAC_ENABLE;
- I915_WRITE(PCH_ADPA, adpa);
+ adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
@@ -184,8 +209,6 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
else
ret = false;
- /* restore origin register */
- I915_WRITE(PCH_ADPA, temp);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 748ed50c55c..3c14240cc00 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,6 +24,8 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/module.h>
+#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include "drmP.h"
@@ -38,6 +40,7 @@
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
static void intel_update_watermarks(struct drm_device *dev);
+static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
typedef struct {
/* given values */
@@ -67,6 +70,8 @@ struct intel_limit {
intel_p2_t p2;
bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
int, int, intel_clock_t *);
+ bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
+ int, int, intel_clock_t *);
};
#define I8XX_DOT_MIN 25000
@@ -261,6 +266,9 @@ static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
+intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
+static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
@@ -286,6 +294,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
.find_pll = intel_find_best_PLL,
+ .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i8xx_lvds = {
@@ -300,6 +309,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
+ .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i9xx_sdvo = {
@@ -314,6 +324,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
+ .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_i9xx_lvds = {
@@ -331,6 +342,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
+ .find_reduced_pll = intel_find_best_reduced_PLL,
};
/* below parameter and function is for G4X Chipset Family*/
@@ -348,6 +360,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
.p2_fast = G4X_P2_SDVO_FAST
},
.find_pll = intel_g4x_find_best_PLL,
+ .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_hdmi = {
@@ -364,6 +377,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
.p2_fast = G4X_P2_HDMI_DAC_FAST
},
.find_pll = intel_g4x_find_best_PLL,
+ .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
@@ -388,6 +402,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
.p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
+ .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
@@ -412,6 +427,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
.p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
+ .find_reduced_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_display_port = {
@@ -449,6 +465,7 @@ static const intel_limit_t intel_limits_igd_sdvo = {
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
+ .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_igd_lvds = {
@@ -464,6 +481,7 @@ static const intel_limit_t intel_limits_igd_lvds = {
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
+ .find_reduced_pll = intel_find_best_reduced_PLL,
};
static const intel_limit_t intel_limits_igdng_sdvo = {
@@ -688,15 +706,16 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
memset (best_clock, 0, sizeof (*best_clock));
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
- for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in IGD */
- if (clock.m2 >= clock.m1 && !IS_IGD(dev))
- break;
- for (clock.n = limit->n.min; clock.n <= limit->n.max;
- clock.n++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max; clock.p1++) {
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ /* m1 is always 0 in IGD */
+ if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
int this_err;
intel_clock(dev, refclk, &clock);
@@ -717,6 +736,46 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
return (err != target);
}
+
+static bool
+intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
+
+{
+ struct drm_device *dev = crtc->dev;
+ intel_clock_t clock;
+ int err = target;
+ bool found = false;
+
+ memcpy(&clock, best_clock, sizeof(intel_clock_t));
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
+ /* m1 is always 0 in IGD */
+ if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+ break;
+ for (clock.n = limit->n.min; clock.n <= limit->n.max;
+ clock.n++) {
+ int this_err;
+
+ intel_clock(dev, refclk, &clock);
+
+ if (!intel_PLL_is_valid(crtc, &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ found = true;
+ }
+ }
+ }
+ }
+
+ return found;
+}
+
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
@@ -747,7 +806,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
max_n = limit->n.max;
/* based on hardware requriment prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2, p1 */
+ /* based on hardware requirment prefere larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
for (clock.m2 = limit->m2.max;
@@ -818,7 +877,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
refclk, best_clock);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
@@ -832,15 +891,14 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
memset(best_clock, 0, sizeof(*best_clock));
max_n = limit->n.max;
- /* based on hardware requriment prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2, p1 */
- for (clock.m1 = limit->m1.max;
- clock.m1 >= limit->m1.min; clock.m1--) {
- for (clock.m2 = limit->m2.max;
- clock.m2 >= limit->m2.min; clock.m2--) {
- for (clock.p1 = limit->p1.max;
- clock.p1 >= limit->p1.min; clock.p1--) {
+ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+ /* based on hardware requriment prefer smaller n to precision */
+ for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+ /* based on hardware requirment prefere larger m1,m2 */
+ for (clock.m1 = limit->m1.max;
+ clock.m1 >= limit->m1.min; clock.m1--) {
+ for (clock.m2 = limit->m2.max;
+ clock.m2 >= limit->m2.min; clock.m2--) {
int this_err;
intel_clock(dev, refclk, &clock);
@@ -896,6 +954,241 @@ intel_wait_for_vblank(struct drm_device *dev)
mdelay(20);
}
+/* Parameters have changed, update FBC info */
+static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane, i;
+ u32 fbc_ctl, fbc_ctl2;
+
+ dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+
+ if (fb->pitch < dev_priv->cfb_pitch)
+ dev_priv->cfb_pitch = fb->pitch;
+
+ /* FBC_CTL wants 64B units */
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
+ plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ fbc_ctl2 |= FBC_CTL_CPU_FENCE;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+
+ /* enable it... */
+ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ fbc_ctl |= dev_priv->cfb_fence;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ",
+ dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+}
+
+void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ if (!I915_HAS_FBC(dev))
+ return;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING)
+ ; /* nothing */
+
+ intel_wait_for_vblank(dev);
+
+ DRM_DEBUG("disabled FBC\n");
+}
+
+static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
+ DPFC_CTL_PLANEB);
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
+
+ dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+ } else {
+ I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ }
+
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+
+ DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+void g4x_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ intel_wait_for_vblank(dev);
+
+ DRM_DEBUG("disabled FBC\n");
+}
+
+static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+/**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @crtc: CRTC to point the compressor at
+ * @mode: mode in use
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= 2048 in width, 1536 in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+static void intel_update_fbc(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj_priv;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane;
+
+ if (!i915_powersave)
+ return;
+
+ if (!dev_priv->display.fbc_enabled ||
+ !dev_priv->display.enable_fbc ||
+ !dev_priv->display.disable_fbc)
+ return;
+
+ if (!crtc->fb)
+ return;
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj_priv = intel_fb->obj->driver_private;
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ if (intel_fb->obj->size > dev_priv->cfb_size) {
+ DRM_DEBUG("framebuffer too large, disabling compression\n");
+ goto out_disable;
+ }
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG("mode incompatible with compression, disabling\n");
+ goto out_disable;
+ }
+ if ((mode->hdisplay > 2048) ||
+ (mode->vdisplay > 1536)) {
+ DRM_DEBUG("mode too large for compression, disabling\n");
+ goto out_disable;
+ }
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
+ DRM_DEBUG("plane not 0, disabling compression\n");
+ goto out_disable;
+ }
+ if (obj_priv->tiling_mode != I915_TILING_X) {
+ DRM_DEBUG("framebuffer not tiled, disabling compression\n");
+ goto out_disable;
+ }
+
+ if (dev_priv->display.fbc_enabled(crtc)) {
+ /* We can re-enable it in this case, but need to update pitch */
+ if (fb->pitch > dev_priv->cfb_pitch)
+ dev_priv->display.disable_fbc(dev);
+ if (obj_priv->fence_reg != dev_priv->cfb_fence)
+ dev_priv->display.disable_fbc(dev);
+ if (plane != dev_priv->cfb_plane)
+ dev_priv->display.disable_fbc(dev);
+ }
+
+ if (!dev_priv->display.fbc_enabled(crtc)) {
+ /* Now try to turn it back on if possible */
+ dev_priv->display.enable_fbc(crtc, 500);
+ }
+
+ return;
+
+out_disable:
+ DRM_DEBUG("unsupported config, disabling FBC\n");
+ /* Multiple disables should be harmless */
+ if (dev_priv->display.fbc_enabled(crtc))
+ dev_priv->display.disable_fbc(dev);
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
@@ -908,12 +1201,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
unsigned long Start, Offset;
- int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF);
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
+ int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr, alignment;
int ret;
@@ -923,12 +1217,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
- switch (pipe) {
+ switch (plane) {
case 0:
case 1:
break;
default:
- DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
return -EINVAL;
}
@@ -1008,6 +1302,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dspcntr &= ~DISPPLANE_TILED;
}
+ if (IS_IGDNG(dev))
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
I915_WRITE(dspcntr_reg, dspcntr);
Start = obj_priv->gtt_offset;
@@ -1026,12 +1324,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
I915_READ(dspbase);
}
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
intel_wait_for_vblank(dev);
if (old_fb) {
intel_fb = to_intel_framebuffer(old_fb);
+ obj_priv = intel_fb->obj->driver_private;
i915_gem_object_unpin(intel_fb->obj);
}
+ intel_increase_pllclock(crtc, true);
+
mutex_unlock(&dev->struct_mutex);
if (!dev->primary->master)
@@ -1154,6 +1458,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
+ int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1205,6 +1510,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+ /* Enable panel fitting for LVDS */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(pf_ctl_reg);
+ I915_WRITE(pf_ctl_reg, temp | PF_ENABLE);
+
+ /* currently full aspect */
+ I915_WRITE(pf_win_pos, 0);
+
+ I915_WRITE(pf_win_size,
+ (dev_priv->panel_fixed_mode->hdisplay << 16) |
+ (dev_priv->panel_fixed_mode->vdisplay));
+ }
+
/* Enable CPU pipe */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) == 0) {
@@ -1469,9 +1787,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR;
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
u32 temp;
@@ -1514,6 +1833,9 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_crtc_load_lut(crtc);
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
/* Give the overlay scaler a chance to enable if it's on this pipe */
//intel_crtc_dpms_video(crtc, true); TODO
intel_update_watermarks(dev);
@@ -1523,6 +1845,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Give the overlay scaler a chance to disable if it's on this pipe */
//intel_crtc_dpms_video(crtc, FALSE); TODO
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+
/* Disable the VGA plane that we never use */
i915_disable_vga(dev);
@@ -1571,15 +1897,15 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool enabled;
- if (IS_IGDNG(dev))
- igdng_crtc_dpms(crtc, mode);
- else
- i9xx_crtc_dpms(crtc, mode);
+ dev_priv->display.dpms(crtc, mode);
+
+ intel_crtc->dpms_mode = mode;
if (!dev->primary->master)
return;
@@ -1603,8 +1929,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
break;
}
-
- intel_crtc->dpms_mode = mode;
}
static void intel_crtc_prepare (struct drm_crtc *crtc)
@@ -1646,56 +1970,68 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+static int i945_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000;
+}
-/** Returns the core display clock speed for i830 - i945 */
-static int intel_get_core_clock_speed(struct drm_device *dev)
+static int i915_get_display_clock_speed(struct drm_device *dev)
{
+ return 333000;
+}
- /* Core clock values taken from the published datasheets.
- * The 830 may go up to 166 Mhz, which we should check.
- */
- if (IS_I945G(dev))
- return 400000;
- else if (IS_I915G(dev))
- return 333000;
- else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
- return 200000;
- else if (IS_I915GM(dev)) {
- u16 gcfgc = 0;
+static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+{
+ return 200000;
+}
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+static int i915gm_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 gcfgc = 0;
- if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
- return 133000;
- else {
- switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
- case GC_DISPLAY_CLOCK_333_MHZ:
- return 333000;
- default:
- case GC_DISPLAY_CLOCK_190_200_MHZ:
- return 190000;
- }
- }
- } else if (IS_I865G(dev))
- return 266000;
- else if (IS_I855(dev)) {
- u16 hpllcc = 0;
- /* Assume that the hardware is in the high speed state. This
- * should be the default.
- */
- switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
- case GC_CLOCK_133_200:
- case GC_CLOCK_100_200:
- return 200000;
- case GC_CLOCK_166_250:
- return 250000;
- case GC_CLOCK_100_133:
- return 133000;
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
+ return 133000;
+ else {
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_333_MHZ:
+ return 333000;
+ default:
+ case GC_DISPLAY_CLOCK_190_200_MHZ:
+ return 190000;
}
- } else /* 852, 830 */
+ }
+}
+
+static int i865_get_display_clock_speed(struct drm_device *dev)
+{
+ return 266000;
+}
+
+static int i855_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 hpllcc = 0;
+ /* Assume that the hardware is in the high speed state. This
+ * should be the default.
+ */
+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+ case GC_CLOCK_133_200:
+ case GC_CLOCK_100_200:
+ return 200000;
+ case GC_CLOCK_166_250:
+ return 250000;
+ case GC_CLOCK_100_133:
return 133000;
+ }
- return 0; /* Silence gcc warning */
+ /* Shouldn't happen */
+ return 0;
+}
+
+static int i830_get_display_clock_speed(struct drm_device *dev)
+{
+ return 133000;
}
/**
@@ -1858,7 +2194,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
{
long entries_required, wm_size;
- entries_required = (clock_in_khz * pixel_size * latency_ns) / 1000000;
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ 1000;
entries_required /= wm->cacheline_size;
DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required);
@@ -1923,14 +2266,13 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
if (is_desktop == latency->is_desktop &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
- break;
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
}
- if (i >= ARRAY_SIZE(cxsr_latency_table)) {
- DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
- return NULL;
- }
- return latency;
+
+ DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
}
static void igd_disable_cxsr(struct drm_device *dev)
@@ -2021,32 +2363,36 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
*/
const static int latency_ns = 5000;
-static int intel_get_fifo_size(struct drm_device *dev, int plane)
+static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
- if (IS_I9XX(dev)) {
- if (plane == 0)
- size = dsparb & 0x7f;
- else
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
- (dsparb & 0x7f);
- } else if (IS_I85X(dev)) {
- if (plane == 0)
- size = dsparb & 0x1ff;
- else
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
- (dsparb & 0x1ff);
- size >>= 1; /* Convert to cachelines */
- } else if (IS_845G(dev)) {
+ if (plane == 0)
size = dsparb & 0x7f;
- size >>= 2; /* Convert to cachelines */
- } else {
- size = dsparb & 0x7f;
- size >>= 1; /* Convert to cachelines */
- }
+ else
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
+ (dsparb & 0x7f);
+
+ DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ if (plane == 0)
+ size = dsparb & 0x1ff;
+ else
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
+ (dsparb & 0x1ff);
+ size >>= 1; /* Convert to cachelines */
DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
size);
@@ -2054,7 +2400,51 @@ static int intel_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static void i965_update_wm(struct drm_device *dev)
+static int i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static void g4x_update_wm(struct drm_device *dev, int unused, int unused2,
+ int unused3, int unused4)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fw_blc_self = I915_READ(FW_BLC_SELF);
+
+ if (i915_powersave)
+ fw_blc_self |= FW_BLC_SELF_EN;
+ else
+ fw_blc_self &= ~FW_BLC_SELF_EN;
+ I915_WRITE(FW_BLC_SELF, fw_blc_self);
+}
+
+static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
+ int unused3, int unused4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2090,8 +2480,8 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
cacheline_size = planea_params.cacheline_size;
/* Update per-plane FIFO sizes */
- planea_params.fifo_size = intel_get_fifo_size(dev, 0);
- planeb_params.fifo_size = intel_get_fifo_size(dev, 1);
+ planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1);
planea_wm = intel_calculate_wm(planea_clock, &planea_params,
pixel_size, latency_ns);
@@ -2105,7 +2495,8 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
cwm = 2;
/* Calc sr entries for one plane configs */
- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+ if (HAS_FW_BLC(dev) && sr_hdisplay &&
+ (!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
const static int sr_latency_ns = 6000;
@@ -2120,8 +2511,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
srwm = total_size - sr_entries;
if (srwm < 0)
srwm = 1;
- if (IS_I9XX(dev))
- I915_WRITE(FW_BLC_SELF, (srwm & 0x3f));
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2138,14 +2528,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(FW_BLC2, fwater_hi);
}
-static void i830_update_wm(struct drm_device *dev, int planea_clock,
- int pixel_size)
+static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
+ int unused2, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
int planea_wm;
- i830_wm_info.fifo_size = intel_get_fifo_size(dev, 0);
+ i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
pixel_size, latency_ns);
@@ -2189,15 +2579,13 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock,
*/
static void intel_update_watermarks(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
int sr_hdisplay = 0;
unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
int enabled = 0, pixel_size = 0;
- if (DSPARB_HWCONTROL(dev))
- return;
-
/* Get the clock config from both planes */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
intel_crtc = to_intel_crtc(crtc);
@@ -2230,13 +2618,8 @@ static void intel_update_watermarks(struct drm_device *dev)
else if (IS_IGD(dev))
igd_disable_cxsr(dev);
- if (IS_I965G(dev))
- i965_update_wm(dev);
- else if (IS_I9XX(dev) || IS_MOBILE(dev))
- i9xx_update_wm(dev, planea_clock, planeb_clock, sr_hdisplay,
- pixel_size);
- else
- i830_update_wm(dev, planea_clock, pixel_size);
+ dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
+ sr_hdisplay, pixel_size);
}
static int intel_crtc_mode_set(struct drm_crtc *crtc,
@@ -2249,10 +2632,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
int fp_reg = (pipe == 0) ? FPA0 : FPB0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
@@ -2260,13 +2644,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
int refclk, num_outputs = 0;
- intel_clock_t clock;
- u32 dpll = 0, fp = 0, dspcntr, pipeconf;
- bool ok, is_sdvo = false, is_dvo = false;
+ intel_clock_t clock, reduced_clock;
+ u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
bool is_edp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -2349,6 +2733,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) {
+ memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
+ has_reduced_clock = limit->find_reduced_pll(limit, crtc,
+ (adjusted_mode->clock*3/4),
+ refclk,
+ &reduced_clock);
+ }
+
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
@@ -2394,10 +2786,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
link_bw, &m_n);
}
- if (IS_IGD(dev))
+ if (IS_IGD(dev)) {
fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
- else
+ if (has_reduced_clock)
+ fp2 = (1 << reduced_clock.n) << 16 |
+ reduced_clock.m1 << 8 | reduced_clock.m2;
+ } else {
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+ if (has_reduced_clock)
+ fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+ reduced_clock.m2;
+ }
if (!IS_IGDNG(dev))
dpll = DPLL_VGA_MODE_DIS;
@@ -2426,6 +2825,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* also FPA1 */
if (IS_IGDNG(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev) && has_reduced_clock)
+ dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
}
switch (clock.p2) {
case 5:
@@ -2477,7 +2878,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
enable color space conversion */
if (!IS_IGDNG(dev)) {
if (pipe == 0)
- dspcntr |= DISPPLANE_SEL_PIPE_A;
+ dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
}
@@ -2489,7 +2890,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* XXX: No double-wide on 915GM pipe B. Is that the only reason for the
* pipe == 0 check?
*/
- if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10)
+ if (mode->clock >
+ dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
pipeconf |= PIPEACONF_DOUBLE_WIDE;
else
pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
@@ -2561,9 +2963,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(150);
if (IS_I965G(dev) && !IS_IGDNG(dev)) {
- sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
+ if (is_sdvo) {
+ sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ } else
+ I915_WRITE(dpll_md_reg, 0);
} else {
/* write it again -- the BIOS does, after all */
I915_WRITE(dpll_reg, dpll);
@@ -2573,6 +2978,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(150);
}
+ if (is_lvds && has_reduced_clock && i915_powersave) {
+ I915_WRITE(fp_reg + 4, fp2);
+ intel_crtc->lowfreq_avail = true;
+ if (HAS_PIPE_CXSR(dev)) {
+ DRM_DEBUG("enabling CxSR downclocking\n");
+ pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+ }
+ } else {
+ I915_WRITE(fp_reg + 4, fp);
+ intel_crtc->lowfreq_avail = false;
+ if (HAS_PIPE_CXSR(dev)) {
+ DRM_DEBUG("disabling CxSR downclocking\n");
+ pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+ }
+ }
+
I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
@@ -2616,11 +3037,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_wait_for_vblank(dev);
+ if (IS_IGDNG(dev)) {
+ /* enable address swizzle for tiling buffer */
+ temp = I915_READ(DISP_ARB_CTL);
+ I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
+ }
+
I915_WRITE(dspcntr_reg, dspcntr);
/* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
intel_update_watermarks(dev);
drm_vblank_post_modeset(dev, pipe);
@@ -2750,6 +3180,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
i915_gem_object_unpin(intel_crtc->cursor_bo);
drm_gem_object_unreference(intel_crtc->cursor_bo);
}
+
mutex_unlock(&dev->struct_mutex);
intel_crtc->cursor_addr = addr;
@@ -2769,10 +3200,16 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
int pipe = intel_crtc->pipe;
uint32_t temp = 0;
uint32_t adder;
+ if (crtc->fb) {
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ intel_mark_busy(dev, intel_fb->obj);
+ }
+
if (x < 0) {
temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
x = -x;
@@ -2803,6 +3240,16 @@ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
intel_crtc->lut_b[regno] = blue >> 8;
}
+void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ *red = intel_crtc->lut_r[regno] << 8;
+ *green = intel_crtc->lut_g[regno] << 8;
+ *blue = intel_crtc->lut_b[regno] << 8;
+}
+
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size)
{
@@ -3070,12 +3517,319 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
+#define GPU_IDLE_TIMEOUT 500 /* ms */
+
+/* When this timer fires, we've been idle for awhile */
+static void intel_gpu_idle_timer(unsigned long arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("idle timer fired, downclocking\n");
+
+ dev_priv->busy = false;
+
+ queue_work(dev_priv->wq, &dev_priv->idle_work);
+}
+
+void intel_increase_renderclock(struct drm_device *dev, bool schedule)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (IS_IGDNG(dev))
+ return;
+
+ if (!dev_priv->render_reclock_avail) {
+ DRM_DEBUG("not reclocking render clock\n");
+ return;
+ }
+
+ /* Restore render clock frequency to original value */
+ if (IS_G4X(dev) || IS_I9XX(dev))
+ pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
+ else if (IS_I85X(dev))
+ pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
+ DRM_DEBUG("increasing render clock frequency\n");
+
+ /* Schedule downclock */
+ if (schedule)
+ mod_timer(&dev_priv->idle_timer, jiffies +
+ msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+}
+
+void intel_decrease_renderclock(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (IS_IGDNG(dev))
+ return;
+
+ if (!dev_priv->render_reclock_avail) {
+ DRM_DEBUG("not reclocking render clock\n");
+ return;
+ }
+
+ if (IS_G4X(dev)) {
+ u16 gcfgc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ /* Down to minimum... */
+ gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
+ gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
+
+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
+ } else if (IS_I965G(dev)) {
+ u16 gcfgc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ /* Down to minimum... */
+ gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
+ gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
+
+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+ u16 gcfgc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ /* Down to minimum... */
+ gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
+ gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
+
+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
+ } else if (IS_I915G(dev)) {
+ u16 gcfgc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ /* Down to minimum... */
+ gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
+ gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
+
+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
+ } else if (IS_I85X(dev)) {
+ u16 hpllcc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
+
+ /* Up to maximum... */
+ hpllcc &= ~GC_CLOCK_CONTROL_MASK;
+ hpllcc |= GC_CLOCK_133_200;
+
+ pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
+ }
+ DRM_DEBUG("decreasing render clock frequency\n");
+}
+
+/* Note that no increase function is needed for this - increase_renderclock()
+ * will also rewrite these bits
+ */
+void intel_decrease_displayclock(struct drm_device *dev)
+{
+ if (IS_IGDNG(dev))
+ return;
+
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
+ IS_I915GM(dev)) {
+ u16 gcfgc;
+
+ /* Adjust render clock... */
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ /* Down to minimum... */
+ gcfgc &= ~0xf0;
+ gcfgc |= 0x80;
+
+ pci_write_config_word(dev->pdev, GCFGC, gcfgc);
+ }
+}
+
+#define CRTC_IDLE_TIMEOUT 1000 /* ms */
+
+static void intel_crtc_idle_timer(unsigned long arg)
+{
+ struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
+ struct drm_crtc *crtc = &intel_crtc->base;
+ drm_i915_private_t *dev_priv = crtc->dev->dev_private;
+
+ DRM_DEBUG("idle timer fired, downclocking\n");
+
+ intel_crtc->busy = false;
+
+ queue_work(dev_priv->wq, &dev_priv->idle_work);
+}
+
+static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll = I915_READ(dpll_reg);
+
+ if (IS_IGDNG(dev))
+ return;
+
+ if (!dev_priv->lvds_downclock_avail)
+ return;
+
+ if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+ DRM_DEBUG("upclocking LVDS\n");
+
+ /* Unlock panel regs */
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+
+ dpll &= ~DISPLAY_RATE_SELECT_FPA1;
+ I915_WRITE(dpll_reg, dpll);
+ dpll = I915_READ(dpll_reg);
+ intel_wait_for_vblank(dev);
+ dpll = I915_READ(dpll_reg);
+ if (dpll & DISPLAY_RATE_SELECT_FPA1)
+ DRM_DEBUG("failed to upclock LVDS!\n");
+
+ /* ...and lock them again */
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
+ }
+
+ /* Schedule downclock */
+ if (schedule)
+ mod_timer(&intel_crtc->idle_timer, jiffies +
+ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+}
+
+static void intel_decrease_pllclock(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll = I915_READ(dpll_reg);
+
+ if (IS_IGDNG(dev))
+ return;
+
+ if (!dev_priv->lvds_downclock_avail)
+ return;
+
+ /*
+ * Since this is called by a timer, we should never get here in
+ * the manual case.
+ */
+ if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
+ DRM_DEBUG("downclocking LVDS\n");
+
+ /* Unlock panel regs */
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+
+ dpll |= DISPLAY_RATE_SELECT_FPA1;
+ I915_WRITE(dpll_reg, dpll);
+ dpll = I915_READ(dpll_reg);
+ intel_wait_for_vblank(dev);
+ dpll = I915_READ(dpll_reg);
+ if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
+ DRM_DEBUG("failed to downclock LVDS!\n");
+
+ /* ...and lock them again */
+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
+ }
+
+}
+
+/**
+ * intel_idle_update - adjust clocks for idleness
+ * @work: work struct
+ *
+ * Either the GPU or display (or both) went idle. Check the busy status
+ * here and adjust the CRTC and GPU clocks as necessary.
+ */
+static void intel_idle_update(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ idle_work);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+
+ if (!i915_powersave)
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* GPU isn't processing, downclock it. */
+ if (!dev_priv->busy) {
+ intel_decrease_renderclock(dev);
+ intel_decrease_displayclock(dev);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ /* Skip inactive CRTCs */
+ if (!crtc->fb)
+ continue;
+
+ intel_crtc = to_intel_crtc(crtc);
+ if (!intel_crtc->busy)
+ intel_decrease_pllclock(crtc);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * intel_mark_busy - mark the GPU and possibly the display busy
+ * @dev: drm device
+ * @obj: object we're operating on
+ *
+ * Callers can use this function to indicate that the GPU is busy processing
+ * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
+ * buffer), we'll also mark the display as busy, so we know to increase its
+ * clock frequency.
+ */
+void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL;
+ struct intel_framebuffer *intel_fb;
+ struct intel_crtc *intel_crtc;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ dev_priv->busy = true;
+ intel_increase_renderclock(dev, true);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (!crtc->fb)
+ continue;
+
+ intel_crtc = to_intel_crtc(crtc);
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ if (intel_fb->obj == obj) {
+ if (!intel_crtc->busy) {
+ /* Non-busy -> busy, upclock */
+ intel_increase_pllclock(crtc, true);
+ intel_crtc->busy = true;
+ } else {
+ /* Busy -> busy, put off timer */
+ mod_timer(&intel_crtc->idle_timer, jiffies +
+ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+ }
+ }
+ }
+}
+
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->mode_set.mode)
- drm_mode_destroy(crtc->dev, intel_crtc->mode_set.mode);
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
}
@@ -3087,6 +3841,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base = intel_pipe_set_base,
.prepare = intel_crtc_prepare,
.commit = intel_crtc_commit,
+ .load_lut = intel_crtc_load_lut,
};
static const struct drm_crtc_funcs intel_crtc_funcs = {
@@ -3118,19 +3873,22 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->lut_b[i] = i;
}
+ /* Swap pipes & planes for FBC on pre-965 */
+ intel_crtc->pipe = pipe;
+ intel_crtc->plane = pipe;
+ if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
+ DRM_DEBUG("swapping pipes & planes for FBC\n");
+ intel_crtc->plane = ((pipe == 0) ? 1 : 0);
+ }
+
intel_crtc->cursor_addr = 0;
intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
- intel_crtc->mode_set.crtc = &intel_crtc->base;
- intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1);
- intel_crtc->mode_set.num_connectors = 0;
-
- if (i915_fbpercrtc) {
-
-
+ intel_crtc->busy = false;
- }
+ setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
+ (unsigned long)intel_crtc);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -3138,30 +3896,26 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
- struct drm_crtc *crtc = NULL;
- int pipe = -1;
+ struct drm_mode_object *drmmode_obj;
+ struct intel_crtc *crtc;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (crtc->base.id == pipe_from_crtc_id->crtc_id) {
- pipe = intel_crtc->pipe;
- break;
- }
- }
+ drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
- if (pipe == -1) {
+ if (!drmmode_obj) {
DRM_ERROR("no such CRTC id\n");
return -EINVAL;
}
- pipe_from_crtc_id->pipe = pipe;
+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ pipe_from_crtc_id->pipe = crtc->pipe;
- return 0;
+ return 0;
}
struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
@@ -3362,8 +4116,123 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_changed = intelfb_probe,
};
+void intel_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * Disable clock gating reported to work incorrectly according to the
+ * specs, but enable as much else as we can.
+ */
+ if (IS_G4X(dev)) {
+ uint32_t dspclk_gate;
+ I915_WRITE(RENCLK_GATE_D1, 0);
+ I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ GS_UNIT_CLOCK_GATE_DISABLE |
+ CL_UNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+ OVRUNIT_CLOCK_GATE_DISABLE |
+ OVCUNIT_CLOCK_GATE_DISABLE;
+ if (IS_GM45(dev))
+ dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+ } else if (IS_I965GM(dev)) {
+ I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ I915_WRITE16(DEUC, 0);
+ } else if (IS_I965G(dev)) {
+ I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ I965_RCC_CLOCK_GATE_DISABLE |
+ I965_RCPB_CLOCK_GATE_DISABLE |
+ I965_ISC_CLOCK_GATE_DISABLE |
+ I965_FBC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ } else if (IS_I9XX(dev)) {
+ u32 dstate = I915_READ(D_STATE);
+
+ dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+ DSTATE_DOT_CLOCK_GATING;
+ I915_WRITE(D_STATE, dstate);
+ } else if (IS_I855(dev) || IS_I865G(dev)) {
+ I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+ } else if (IS_I830(dev)) {
+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+ }
+}
+
+/* Set up chip specific display functions */
+static void intel_init_display(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* We always want a DPMS function */
+ if (IS_IGDNG(dev))
+ dev_priv->display.dpms = igdng_crtc_dpms;
+ else
+ dev_priv->display.dpms = i9xx_crtc_dpms;
+
+ /* Only mobile has FBC, leave pointers NULL for other chips */
+ if (IS_MOBILE(dev)) {
+ if (IS_GM45(dev)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+ } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+ }
+ /* 855GM needs testing */
+ }
+
+ /* Returns the core display clock speed */
+ if (IS_I945G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i945_get_display_clock_speed;
+ else if (IS_I915G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915_get_display_clock_speed;
+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
+ dev_priv->display.get_display_clock_speed =
+ i9xx_misc_get_display_clock_speed;
+ else if (IS_I915GM(dev))
+ dev_priv->display.get_display_clock_speed =
+ i915gm_get_display_clock_speed;
+ else if (IS_I865G(dev))
+ dev_priv->display.get_display_clock_speed =
+ i865_get_display_clock_speed;
+ else if (IS_I855(dev))
+ dev_priv->display.get_display_clock_speed =
+ i855_get_display_clock_speed;
+ else /* 852, 830 */
+ dev_priv->display.get_display_clock_speed =
+ i830_get_display_clock_speed;
+
+ /* For FIFO watermark updates */
+ if (IS_G4X(dev))
+ dev_priv->display.update_wm = g4x_update_wm;
+ else if (IS_I965G(dev))
+ dev_priv->display.update_wm = i965_update_wm;
+ else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ } else {
+ if (IS_I85X(dev))
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
+ else if (IS_845G(dev))
+ dev_priv->display.get_fifo_size = i845_get_fifo_size;
+ else
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ dev_priv->display.update_wm = i830_update_wm;
+ }
+}
+
void intel_modeset_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int num_pipe;
int i;
@@ -3374,6 +4243,8 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = (void *)&intel_mode_funcs;
+ intel_init_display(dev);
+
if (IS_I965G(dev)) {
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
@@ -3398,15 +4269,50 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG("%d display pipe%s available.\n",
num_pipe, num_pipe > 1 ? "s" : "");
+ if (IS_I85X(dev))
+ pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
+ else if (IS_I9XX(dev) || IS_G4X(dev))
+ pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
+
for (i = 0; i < num_pipe; i++) {
intel_crtc_init(dev, i);
}
intel_setup_outputs(dev);
+
+ intel_init_clock_gating(dev);
+
+ INIT_WORK(&dev_priv->idle_work, intel_idle_update);
+ setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
+ (unsigned long)dev);
}
void intel_modeset_cleanup(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+
+ mutex_lock(&dev->struct_mutex);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ /* Skip inactive CRTCs */
+ if (!crtc->fb)
+ continue;
+
+ intel_crtc = to_intel_crtc(crtc);
+ intel_increase_pllclock(crtc, false);
+ del_timer_sync(&intel_crtc->idle_timer);
+ }
+
+ intel_increase_renderclock(dev, false);
+ del_timer_sync(&dev_priv->idle_timer);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+
drm_mode_config_cleanup(dev);
}
@@ -3420,3 +4326,20 @@ struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
return &intel_output->enc;
}
+
+/*
+ * set vga decode state - true == enable VGA decode
+ */
+int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 gmch_ctrl;
+
+ pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
+ if (state)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+ pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2b914d73207..f4856a51047 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -232,7 +232,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4) {
- uint32_t d = pack_aux(send + i, send_bytes - i);;
+ uint32_t d = pack_aux(send + i, send_bytes - i);
I915_WRITE(ch_data + i, d);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 26a6227c15f..ef61fe9507e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
+#include "i915_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
@@ -111,15 +112,15 @@ struct intel_output {
struct intel_crtc {
struct drm_crtc base;
- int pipe;
- int plane;
+ enum pipe pipe;
+ enum plane plane;
struct drm_gem_object *cursor_bo;
uint32_t cursor_addr;
u8 lut_r[256], lut_g[256], lut_b[256];
int dpms_mode;
- struct intel_framebuffer *fbdev_fb;
- /* a mode_set for fbdev users on this crtc */
- struct drm_mode_set mode_set;
+ bool busy; /* is scanout buffer being updated frequently? */
+ struct timer_list idle_timer;
+ bool lowfreq_avail;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -138,6 +139,7 @@ extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
+extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
extern void intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
@@ -173,9 +175,12 @@ extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
+extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
extern int intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_framebuffer **fb,
struct drm_gem_object *obj);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 1d30802e773..2b0fe54cd92 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -39,339 +39,36 @@
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
+#include "drm_fb_helper.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
struct intelfb_par {
- struct drm_device *dev;
- struct drm_display_mode *our_mode;
+ struct drm_fb_helper helper;
struct intel_framebuffer *intel_fb;
- int crtc_count;
- /* crtc currently bound to this */
- uint32_t crtc_ids[2];
+ struct drm_display_mode *our_mode;
};
-static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info)
-{
- struct intelfb_par *par = info->par;
- struct drm_device *dev = par->dev;
- struct drm_crtc *crtc;
- int i;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_mode_set *modeset = &intel_crtc->mode_set;
- struct drm_framebuffer *fb = modeset->fb;
-
- for (i = 0; i < par->crtc_count; i++)
- if (crtc->base.id == par->crtc_ids[i])
- break;
-
- if (i == par->crtc_count)
- continue;
-
-
- if (regno > 255)
- return 1;
-
- if (fb->depth == 8) {
- intel_crtc_fb_gamma_set(crtc, red, green, blue, regno);
- return 0;
- }
-
- if (regno < 16) {
- switch (fb->depth) {
- case 15:
- fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
- ((green & 0xf800) >> 6) |
- ((blue & 0xf800) >> 11);
- break;
- case 16:
- fb->pseudo_palette[regno] = (red & 0xf800) |
- ((green & 0xfc00) >> 5) |
- ((blue & 0xf800) >> 11);
- break;
- case 24:
- case 32:
- fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
- (green & 0xff00) |
- ((blue & 0xff00) >> 8);
- break;
- }
- }
- }
- return 0;
-}
-
-static int intelfb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct intelfb_par *par = info->par;
- struct intel_framebuffer *intel_fb = par->intel_fb;
- struct drm_framebuffer *fb = &intel_fb->base;
- int depth;
-
- if (var->pixclock == -1 || !var->pixclock)
- return -EINVAL;
-
- /* Need to resize the fb object !!! */
- if (var->xres > fb->width || var->yres > fb->height) {
- DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height);
- DRM_ERROR("Need resizing code.\n");
- return -EINVAL;
- }
-
- switch (var->bits_per_pixel) {
- case 16:
- depth = (var->green.length == 6) ? 16 : 15;
- break;
- case 32:
- depth = (var->transp.length > 0) ? 32 : 24;
- break;
- default:
- depth = var->bits_per_pixel;
- break;
- }
-
- switch (depth) {
- case 8:
- var->red.offset = 0;
- var->green.offset = 0;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 15:
- var->red.offset = 10;
- var->green.offset = 5;
- var->blue.offset = 0;
- var->red.length = 5;
- var->green.length = 5;
- var->blue.length = 5;
- var->transp.length = 1;
- var->transp.offset = 15;
- break;
- case 16:
- var->red.offset = 11;
- var->green.offset = 5;
- var->blue.offset = 0;
- var->red.length = 5;
- var->green.length = 6;
- var->blue.length = 5;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 24:
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 32:
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 8;
- var->transp.offset = 24;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* this will let fbcon do the mode init */
-/* FIXME: take mode config lock? */
-static int intelfb_set_par(struct fb_info *info)
-{
- struct intelfb_par *par = info->par;
- struct drm_device *dev = par->dev;
- struct fb_var_screeninfo *var = &info->var;
- int i;
-
- DRM_DEBUG("%d %d\n", var->xres, var->pixclock);
-
- if (var->pixclock != -1) {
-
- DRM_ERROR("PIXEL CLOCK SET\n");
- return -EINVAL;
- } else {
- struct drm_crtc *crtc;
- int ret;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- for (i = 0; i < par->crtc_count; i++)
- if (crtc->base.id == par->crtc_ids[i])
- break;
-
- if (i == par->crtc_count)
- continue;
-
- if (crtc->fb == intel_crtc->mode_set.fb) {
- mutex_lock(&dev->mode_config.mutex);
- ret = crtc->funcs->set_config(&intel_crtc->mode_set);
- mutex_unlock(&dev->mode_config.mutex);
- if (ret)
- return ret;
- }
- }
- return 0;
- }
-}
-
-static int intelfb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct intelfb_par *par = info->par;
- struct drm_device *dev = par->dev;
- struct drm_mode_set *modeset;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
- int ret = 0;
- int i;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- for (i = 0; i < par->crtc_count; i++)
- if (crtc->base.id == par->crtc_ids[i])
- break;
-
- if (i == par->crtc_count)
- continue;
-
- intel_crtc = to_intel_crtc(crtc);
- modeset = &intel_crtc->mode_set;
-
- modeset->x = var->xoffset;
- modeset->y = var->yoffset;
-
- if (modeset->num_connectors) {
- mutex_lock(&dev->mode_config.mutex);
- ret = crtc->funcs->set_config(modeset);
- mutex_unlock(&dev->mode_config.mutex);
- if (!ret) {
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
- }
- }
- }
-
- return ret;
-}
-
-static void intelfb_on(struct fb_info *info)
-{
- struct intelfb_par *par = info->par;
- struct drm_device *dev = par->dev;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int i;
-
- /*
- * For each CRTC in this fb, find all associated encoders
- * and turn them off, then turn off the CRTC.
- */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
- for (i = 0; i < par->crtc_count; i++)
- if (crtc->base.id == par->crtc_ids[i])
- break;
-
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- }
- }
- }
-}
-
-static void intelfb_off(struct fb_info *info, int dpms_mode)
-{
- struct intelfb_par *par = info->par;
- struct drm_device *dev = par->dev;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int i;
-
- /*
- * For each CRTC in this fb, find all associated encoders
- * and turn them off, then turn off the CRTC.
- */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
- for (i = 0; i < par->crtc_count; i++)
- if (crtc->base.id == par->crtc_ids[i])
- break;
-
- /* Found a CRTC on this fb, now find encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- struct drm_encoder_helper_funcs *encoder_funcs;
- encoder_funcs = encoder->helper_private;
- encoder_funcs->dpms(encoder, dpms_mode);
- }
- }
- if (dpms_mode == DRM_MODE_DPMS_OFF)
- crtc_funcs->dpms(crtc, dpms_mode);
- }
-}
-
-static int intelfb_blank(int blank, struct fb_info *info)
-{
- switch (blank) {
- case FB_BLANK_UNBLANK:
- intelfb_on(info);
- break;
- case FB_BLANK_NORMAL:
- intelfb_off(info, DRM_MODE_DPMS_STANDBY);
- break;
- case FB_BLANK_HSYNC_SUSPEND:
- intelfb_off(info, DRM_MODE_DPMS_STANDBY);
- break;
- case FB_BLANK_VSYNC_SUSPEND:
- intelfb_off(info, DRM_MODE_DPMS_SUSPEND);
- break;
- case FB_BLANK_POWERDOWN:
- intelfb_off(info, DRM_MODE_DPMS_OFF);
- break;
- }
- return 0;
-}
-
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
- .fb_check_var = intelfb_check_var,
- .fb_set_par = intelfb_set_par,
- .fb_setcolreg = intelfb_setcolreg,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
- .fb_pan_display = intelfb_pan_display,
- .fb_blank = intelfb_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
};
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ .gamma_set = intel_crtc_fb_gamma_set,
+ .gamma_get = intel_crtc_fb_gamma_get,
+};
+
+
/**
* Curretly it is assumed that the old framebuffer is reused.
*
@@ -412,25 +109,11 @@ int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
}
EXPORT_SYMBOL(intelfb_resize);
-static struct drm_mode_set kernelfb_mode;
-
-static int intelfb_panic(struct notifier_block *n, unsigned long ununsed,
- void *panic_str)
-{
- DRM_ERROR("panic occurred, switching back to text console\n");
-
- intelfb_restore();
- return 0;
-}
-
-static struct notifier_block paniced = {
- .notifier_call = intelfb_panic,
-};
-
static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
uint32_t fb_height, uint32_t surface_width,
uint32_t surface_height,
- struct intel_framebuffer **intel_fb_p)
+ uint32_t surface_depth, uint32_t surface_bpp,
+ struct drm_framebuffer **fb_p)
{
struct fb_info *info;
struct intelfb_par *par;
@@ -442,12 +125,16 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
struct device *device = &dev->pdev->dev;
int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ /* we don't do packed 24bpp */
+ if (surface_bpp == 24)
+ surface_bpp = 32;
+
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
- mode_cmd.bpp = 32;
+ mode_cmd.bpp = surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
- mode_cmd.depth = 24;
+ mode_cmd.depth = surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
@@ -479,7 +166,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
intel_fb = to_intel_framebuffer(fb);
- *intel_fb_p = intel_fb;
+ *fb_p = fb;
info = framebuffer_alloc(sizeof(struct intelfb_par), device);
if (!info) {
@@ -489,21 +176,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
par = info->par;
+ par->helper.funcs = &intel_fb_helper_funcs;
+ par->helper.dev = dev;
+ ret = drm_fb_helper_init_crtc_count(&par->helper, 2,
+ INTELFB_CONN_LIMIT);
+ if (ret)
+ goto out_unref;
+
strcpy(info->fix.id, "inteldrmfb");
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- info->fix.type_aux = 0;
- info->fix.xpanstep = 1; /* doing it in hw */
- info->fix.ypanstep = 1; /* doing it in hw */
- info->fix.ywrapstep = 0;
- info->fix.accel = FB_ACCEL_I830;
- info->fix.type_aux = 0;
info->flags = FBINFO_DEFAULT;
info->fbops = &intelfb_ops;
- info->fix.line_length = fb->pitch;
/* setup aperture base/size for vesafb takeover */
info->aperture_base = dev->mode_config.fb_base;
@@ -527,18 +212,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
// memset(info->screen_base, 0, size);
- info->pseudo_palette = fb->pseudo_palette;
- info->var.xres_virtual = fb->width;
- info->var.yres_virtual = fb->height;
- info->var.bits_per_pixel = fb->bits_per_pixel;
- info->var.xoffset = 0;
- info->var.yoffset = 0;
- info->var.activate = FB_ACTIVATE_NOW;
- info->var.height = -1;
- info->var.width = -1;
-
- info->var.xres = fb_width;
- info->var.yres = fb_height;
+ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
@@ -550,64 +225,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- switch(fb->depth) {
- case 8:
- info->var.red.offset = 0;
- info->var.green.offset = 0;
- info->var.blue.offset = 0;
- info->var.red.length = 8; /* 8bit DAC */
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 0;
- info->var.transp.length = 0;
- break;
- case 15:
- info->var.red.offset = 10;
- info->var.green.offset = 5;
- info->var.blue.offset = 0;
- info->var.red.length = 5;
- info->var.green.length = 5;
- info->var.blue.length = 5;
- info->var.transp.offset = 15;
- info->var.transp.length = 1;
- break;
- case 16:
- info->var.red.offset = 11;
- info->var.green.offset = 5;
- info->var.blue.offset = 0;
- info->var.red.length = 5;
- info->var.green.length = 6;
- info->var.blue.length = 5;
- info->var.transp.offset = 0;
- break;
- case 24:
- info->var.red.offset = 16;
- info->var.green.offset = 8;
- info->var.blue.offset = 0;
- info->var.red.length = 8;
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 0;
- info->var.transp.length = 0;
- break;
- case 32:
- info->var.red.offset = 16;
- info->var.green.offset = 8;
- info->var.blue.offset = 0;
- info->var.red.length = 8;
- info->var.green.length = 8;
- info->var.blue.length = 8;
- info->var.transp.offset = 24;
- info->var.transp.length = 8;
- break;
- default:
- break;
- }
-
fb->fbdev = info;
par->intel_fb = intel_fb;
- par->dev = dev;
/* To allow resizeing without swapping buffers */
DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
@@ -625,307 +245,12 @@ out:
return ret;
}
-static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_framebuffer *fb;
- struct drm_connector *connector;
- struct fb_info *info;
- struct intelfb_par *par;
- struct drm_mode_set *modeset;
- unsigned int width, height;
- int new_fb = 0;
- int ret, i, conn_count;
-
- if (!drm_helper_crtc_in_use(crtc))
- return 0;
-
- if (!crtc->desired_mode)
- return 0;
-
- width = crtc->desired_mode->hdisplay;
- height = crtc->desired_mode->vdisplay;
-
- /* is there an fb bound to this crtc already */
- if (!intel_crtc->mode_set.fb) {
- ret = intelfb_create(dev, width, height, width, height, &intel_fb);
- if (ret)
- return -EINVAL;
- new_fb = 1;
- } else {
- fb = intel_crtc->mode_set.fb;
- intel_fb = to_intel_framebuffer(fb);
- if ((intel_fb->base.width < width) || (intel_fb->base.height < height))
- return -EINVAL;
- }
-
- info = intel_fb->base.fbdev;
- par = info->par;
-
- modeset = &intel_crtc->mode_set;
- modeset->fb = &intel_fb->base;
- conn_count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder)
- if (connector->encoder->crtc == modeset->crtc) {
- modeset->connectors[conn_count] = connector;
- conn_count++;
- if (conn_count > INTELFB_CONN_LIMIT)
- BUG();
- }
- }
-
- for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
- modeset->connectors[i] = NULL;
-
- par->crtc_ids[0] = crtc->base.id;
-
- modeset->num_connectors = conn_count;
- if (modeset->crtc->desired_mode) {
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = drm_mode_duplicate(dev,
- modeset->crtc->desired_mode);
- }
-
- par->crtc_count = 1;
-
- if (new_fb) {
- info->var.pixclock = -1;
- if (register_framebuffer(info) < 0)
- return -EINVAL;
- } else
- intelfb_set_par(info);
-
- DRM_INFO("fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
-
- /* Switch back to kernel console on panic */
- kernelfb_mode = *modeset;
- atomic_notifier_chain_register(&panic_notifier_list, &paniced);
- DRM_DEBUG("registered panic notifier\n");
-
- return 0;
-}
-
-static int intelfb_multi_fb_probe(struct drm_device *dev)
-{
-
- struct drm_crtc *crtc;
- int ret = 0;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- ret = intelfb_multi_fb_probe_crtc(dev, crtc);
- if (ret)
- return ret;
- }
- return ret;
-}
-
-static int intelfb_single_fb_probe(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
- unsigned int surface_width = 0, surface_height = 0;
- int new_fb = 0;
- int crtc_count = 0;
- int ret, i, conn_count = 0;
- struct intel_framebuffer *intel_fb;
- struct fb_info *info;
- struct intelfb_par *par;
- struct drm_mode_set *modeset = NULL;
-
- DRM_DEBUG("\n");
-
- /* Get a count of crtcs now in use and new min/maxes width/heights */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (!drm_helper_crtc_in_use(crtc))
- continue;
-
- crtc_count++;
- if (!crtc->desired_mode)
- continue;
-
- /* Smallest mode determines console size... */
- if (crtc->desired_mode->hdisplay < fb_width)
- fb_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay < fb_height)
- fb_height = crtc->desired_mode->vdisplay;
-
- /* ... but largest for memory allocation dimensions */
- if (crtc->desired_mode->hdisplay > surface_width)
- surface_width = crtc->desired_mode->hdisplay;
-
- if (crtc->desired_mode->vdisplay > surface_height)
- surface_height = crtc->desired_mode->vdisplay;
- }
-
- if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
- /* hmm everyone went away - assume VGA cable just fell out
- and will come back later. */
- DRM_DEBUG("no CRTCs available?\n");
- return 0;
- }
-
-//fail
- /* Find the fb for our new config */
- if (list_empty(&dev->mode_config.fb_kernel_list)) {
- DRM_DEBUG("creating new fb (console size %dx%d, "
- "buffer size %dx%d)\n", fb_width, fb_height,
- surface_width, surface_height);
- ret = intelfb_create(dev, fb_width, fb_height, surface_width,
- surface_height, &intel_fb);
- if (ret)
- return -EINVAL;
- new_fb = 1;
- } else {
- struct drm_framebuffer *fb;
-
- fb = list_first_entry(&dev->mode_config.fb_kernel_list,
- struct drm_framebuffer, filp_head);
- intel_fb = to_intel_framebuffer(fb);
-
- /* if someone hotplugs something bigger than we have already
- * allocated, we are pwned. As really we can't resize an
- * fbdev that is in the wild currently due to fbdev not really
- * being designed for the lower layers moving stuff around
- * under it.
- * - so in the grand style of things - punt.
- */
- if ((fb->width < surface_width) ||
- (fb->height < surface_height)) {
- DRM_ERROR("fb not large enough for console\n");
- return -EINVAL;
- }
- }
-// fail
-
- info = intel_fb->base.fbdev;
- par = info->par;
-
- crtc_count = 0;
- /*
- * For each CRTC, set up the connector list for the CRTC's mode
- * set configuration.
- */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- modeset = &intel_crtc->mode_set;
- modeset->fb = &intel_fb->base;
- conn_count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- head) {
- if (!connector->encoder)
- continue;
-
- if(connector->encoder->crtc == modeset->crtc) {
- modeset->connectors[conn_count++] = connector;
- if (conn_count > INTELFB_CONN_LIMIT)
- BUG();
- }
- }
-
- /* Zero out remaining connector pointers */
- for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
- modeset->connectors[i] = NULL;
-
- par->crtc_ids[crtc_count++] = crtc->base.id;
-
- modeset->num_connectors = conn_count;
- if (modeset->crtc->desired_mode) {
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = drm_mode_duplicate(dev,
- modeset->crtc->desired_mode);
- }
- }
- par->crtc_count = crtc_count;
-
- if (new_fb) {
- info->var.pixclock = -1;
- if (register_framebuffer(info) < 0)
- return -EINVAL;
- } else
- intelfb_set_par(info);
-
- DRM_INFO("fb%d: %s frame buffer device\n", info->node,
- info->fix.id);
-
- /* Switch back to kernel console on panic */
- kernelfb_mode = *modeset;
- atomic_notifier_chain_register(&panic_notifier_list, &paniced);
- DRM_DEBUG("registered panic notifier\n");
-
- return 0;
-}
-
-/**
- * intelfb_restore - restore the framebuffer console (kernel) config
- *
- * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
- */
-void intelfb_restore(void)
-{
- int ret;
- if ((ret = drm_crtc_helper_set_config(&kernelfb_mode)) != 0) {
- DRM_ERROR("Failed to restore crtc configuration: %d\n",
- ret);
- }
-}
-
-static void intelfb_restore_work_fn(struct work_struct *ignored)
-{
- intelfb_restore();
-}
-static DECLARE_WORK(intelfb_restore_work, intelfb_restore_work_fn);
-
-static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
-{
- schedule_work(&intelfb_restore_work);
-}
-
-static struct sysrq_key_op sysrq_intelfb_restore_op = {
- .handler = intelfb_sysrq,
- .help_msg = "force-fb(V)",
- .action_msg = "Restore framebuffer console",
-};
-
int intelfb_probe(struct drm_device *dev)
{
int ret;
DRM_DEBUG("\n");
-
- /* something has changed in the lower levels of hell - deal with it
- here */
-
- /* two modes : a) 1 fb to rule all crtcs.
- b) one fb per crtc.
- two actions 1) new connected device
- 2) device removed.
- case a/1 : if the fb surface isn't big enough - resize the surface fb.
- if the fb size isn't big enough - resize fb into surface.
- if everything big enough configure the new crtc/etc.
- case a/2 : undo the configuration
- possibly resize down the fb to fit the new configuration.
- case b/1 : see if it is on a new crtc - setup a new fb and add it.
- case b/2 : teardown the new fb.
- */
-
- /* mode a first */
- /* search for an fb */
- if (i915_fbpercrtc == 1) {
- ret = intelfb_multi_fb_probe(dev);
- } else {
- ret = intelfb_single_fb_probe(dev);
- }
-
- register_sysrq_key('v', &sysrq_intelfb_restore_op);
-
+ ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
return ret;
}
EXPORT_SYMBOL(intelfb_probe);
@@ -940,13 +265,14 @@ int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
info = fb->fbdev;
if (info) {
+ struct intelfb_par *par = info->par;
unregister_framebuffer(info);
iounmap(info->screen_base);
+ if (info->par)
+ drm_fb_helper_free(&par->helper);
framebuffer_release(info);
}
- atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
- memset(&kernelfb_mode, 0, sizeof(struct drm_mode_set));
return 0;
}
EXPORT_SYMBOL(intelfb_remove);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index fa304e13601..663ab6de0b5 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -223,7 +223,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
connector = &intel_output->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_DVID);
+ DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
intel_output->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 62b8bead765..c7eab724c41 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -42,11 +42,11 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
if (!IS_IGD(dev))
return;
if (enable)
- I915_WRITE(CG_2D_DIS,
- I915_READ(CG_2D_DIS) | DPCUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(DSPCLK_GATE_D,
+ I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
else
- I915_WRITE(CG_2D_DIS,
- I915_READ(CG_2D_DIS) & (~DPCUNIT_CLOCK_GATE_DISABLE));
+ I915_WRITE(DSPCLK_GATE_D,
+ I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE));
}
/*
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8df02ef8926..98ae3d73577 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -27,6 +27,7 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#include <acpi/button.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
#include "drmP.h"
@@ -38,16 +39,6 @@
#include "i915_drv.h"
#include <linux/acpi.h>
-#define I915_LVDS "i915_lvds"
-
-/*
- * the following four scaling options are defined.
- * #define DRM_MODE_SCALE_NON_GPU 0
- * #define DRM_MODE_SCALE_FULLSCREEN 1
- * #define DRM_MODE_SCALE_NO_SCALE 2
- * #define DRM_MODE_SCALE_ASPECT 3
- */
-
/* Private structure for the integrated LVDS support */
struct intel_lvds_priv {
int fitting_mode;
@@ -305,6 +296,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
goto out;
}
+ /* full screen scale for now */
+ if (IS_IGDNG(dev))
+ goto out;
+
/* 965+ wants fuzzy fitting */
if (IS_I965G(dev))
pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
@@ -332,11 +327,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- I915_WRITE(BCLRPAT_A, 0);
- I915_WRITE(BCLRPAT_B, 0);
+ if (!IS_IGDNG(dev)) {
+ I915_WRITE(BCLRPAT_A, 0);
+ I915_WRITE(BCLRPAT_B, 0);
+ }
switch (lvds_priv->fitting_mode) {
- case DRM_MODE_SCALE_NO_SCALE:
+ case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
@@ -582,7 +579,6 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* settings.
*/
- /* No panel fitting yet, fixme */
if (IS_IGDNG(dev))
return;
@@ -595,15 +591,33 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
}
+/* Some lid devices report incorrect lid status, assume they're connected */
+static const struct dmi_system_id bad_lid_status[] = {
+ {
+ .ident = "Aspire One",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
+ },
+ },
+ { }
+};
+
/**
* Detect the LVDS connection.
*
- * This always returns CONNECTOR_STATUS_CONNECTED. This connector should only have
- * been set up if the LVDS was actually connected anyway.
+ * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
+ * connected and closed means disconnected. We also send hotplug events as
+ * needed, using lid status notification from the input layer.
*/
static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
{
- return connector_status_connected;
+ enum drm_connector_status status = connector_status_connected;
+
+ if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
+ status = connector_status_disconnected;
+
+ return status;
}
/**
@@ -642,6 +656,24 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 0;
}
+static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+ void *unused)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, lid_notifier);
+ struct drm_device *dev = dev_priv->dev;
+
+ if (acpi_lid_open() && !dev_priv->suspended) {
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+
+ drm_sysfs_hotplug_event(dev_priv->dev);
+
+ return NOTIFY_OK;
+}
+
/**
* intel_lvds_destroy - unregister and free LVDS structures
* @connector: connector to free
@@ -651,10 +683,14 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
+ if (dev_priv->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -672,9 +708,8 @@ static int intel_lvds_set_property(struct drm_connector *connector,
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
- if (value == DRM_MODE_SCALE_NON_GPU) {
- DRM_DEBUG_KMS(I915_LVDS,
- "non_GPU property is unsupported\n");
+ if (value == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
}
if (lvds_priv->fitting_mode == value) {
@@ -731,8 +766,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS(I915_LVDS,
- "Skipping LVDS initialization for %s\n", id->ident);
+ DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
return 1;
}
@@ -1023,11 +1057,16 @@ out:
pwm |= PWM_PCH_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
}
+ dev_priv->lid_notifier.notifier_call = intel_lid_notify;
+ if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+ DRM_DEBUG("lid notifier registration failed\n");
+ dev_priv->lid_notifier.notifier_call = NULL;
+ }
drm_sysfs_connector_add(connector);
return;
failed:
- DRM_DEBUG_KMS(I915_LVDS, "No LVDS modes found, disabling.\n");
+ DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d3b74ba62b4..083bec2e50f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -37,7 +37,19 @@
#include "intel_sdvo_regs.h"
#undef SDVO_DEBUG
-#define I915_SDVO "i915_sdvo"
+
+static char *tv_format_names[] = {
+ "NTSC_M" , "NTSC_J" , "NTSC_443",
+ "PAL_B" , "PAL_D" , "PAL_G" ,
+ "PAL_H" , "PAL_I" , "PAL_M" ,
+ "PAL_N" , "PAL_NC" , "PAL_60" ,
+ "SECAM_B" , "SECAM_D" , "SECAM_G" ,
+ "SECAM_K" , "SECAM_K1", "SECAM_L" ,
+ "SECAM_60"
+};
+
+#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
struct intel_sdvo_priv {
u8 slave_addr;
@@ -71,6 +83,15 @@ struct intel_sdvo_priv {
*/
bool is_tv;
+ /* This is for current tv format name */
+ char *tv_format_name;
+
+ /* This contains all current supported TV format */
+ char *tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format_property;
+ struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
+
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
@@ -97,14 +118,6 @@ struct intel_sdvo_priv {
*/
struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
- /**
- * Current selected TV format.
- *
- * This is stored in the same structure that's passed to the device, for
- * convenience.
- */
- struct intel_sdvo_tv_format tv_format;
-
/*
* supported encoding mode, used to determine whether HDMI is
* supported
@@ -114,11 +127,38 @@ struct intel_sdvo_priv {
/* DDC bus used by this SDVO output */
uint8_t ddc_bus;
+ /* Mac mini hack -- use the same DDC as the analog connector */
+ struct i2c_adapter *analog_ddc_bus;
+
int save_sdvo_mult;
u16 save_active_outputs;
struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
struct intel_sdvo_dtd save_output_dtd[16];
u32 save_SDVOX;
+ /* add the property for the SDVO-TV */
+ struct drm_property *left_property;
+ struct drm_property *right_property;
+ struct drm_property *top_property;
+ struct drm_property *bottom_property;
+ struct drm_property *hpos_property;
+ struct drm_property *vpos_property;
+
+ /* add the property for the SDVO-TV/LVDS */
+ struct drm_property *brightness_property;
+ struct drm_property *contrast_property;
+ struct drm_property *saturation_property;
+ struct drm_property *hue_property;
+
+ /* Add variable to record current setting for the above property */
+ u32 left_margin, right_margin, top_margin, bottom_margin;
+ /* this is to get the range of margin.*/
+ u32 max_hscan, max_vscan;
+ u32 max_hpos, cur_hpos;
+ u32 max_vpos, cur_vpos;
+ u32 cur_brightness, max_brightness;
+ u32 cur_contrast, max_contrast;
+ u32 cur_saturation, max_saturation;
+ u32 cur_hue, max_hue;
};
static bool
@@ -188,7 +228,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
return true;
}
- DRM_DEBUG("i2c transfer returned %d\n", ret);
+ DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
return false;
}
@@ -265,6 +305,31 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
/* HDMI op code */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
@@ -298,7 +363,7 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
int i;
- DRM_DEBUG_KMS(I915_SDVO, "%s: W: %02X ",
+ DRM_DEBUG_KMS("%s: W: %02X ",
SDVO_NAME(sdvo_priv), cmd);
for (i = 0; i < args_len; i++)
DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
@@ -351,7 +416,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
int i;
- DRM_DEBUG_KMS(I915_SDVO, "%s: R: ", SDVO_NAME(sdvo_priv));
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
for (i = 0; i < response_len; i++)
DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
for (; i < 8; i++)
@@ -668,10 +733,10 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
status = intel_sdvo_read_response(intel_output, &response, 1);
if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
+ DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
return SDVO_CLOCK_RATE_MULT_1X;
} else {
- DRM_DEBUG("Current clock rate multiplier: %d\n", response);
+ DRM_DEBUG_KMS("Current clock rate multiplier: %d\n", response);
}
return response;
@@ -945,23 +1010,28 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
static void intel_sdvo_set_tv_format(struct intel_output *output)
{
+
+ struct intel_sdvo_tv_format format;
struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
- struct intel_sdvo_tv_format *format, unset;
- u8 status;
+ uint32_t format_map, i;
+ uint8_t status;
- format = &sdvo_priv->tv_format;
- memset(&unset, 0, sizeof(unset));
- if (memcmp(format, &unset, sizeof(*format))) {
- DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n",
- SDVO_NAME(sdvo_priv));
- format->ntsc_m = 1;
- intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, format,
- sizeof(*format));
- status = intel_sdvo_read_response(output, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- DRM_DEBUG("%s: Failed to set TV format\n",
- SDVO_NAME(sdvo_priv));
- }
+ for (i = 0; i < TV_FORMAT_NUM; i++)
+ if (tv_format_names[i] == sdvo_priv->tv_format_name)
+ break;
+
+ format_map = 1 << i;
+ memset(&format, 0, sizeof(format));
+ memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
+ sizeof(format) : sizeof(format_map));
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map,
+ sizeof(format));
+
+ status = intel_sdvo_read_response(output, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ DRM_DEBUG_KMS("%s: Failed to set TV format\n",
+ SDVO_NAME(sdvo_priv));
}
static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
@@ -1230,8 +1300,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
* a given it the status is a success, we succeeded.
*/
if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
- DRM_DEBUG("First %s output reported failure to sync\n",
- SDVO_NAME(sdvo_priv));
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(sdvo_priv));
}
if (0)
@@ -1326,8 +1396,8 @@ static void intel_sdvo_restore(struct drm_connector *connector)
intel_wait_for_vblank(dev);
status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2);
if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
- DRM_DEBUG("First %s output reported failure to sync\n",
- SDVO_NAME(sdvo_priv));
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(sdvo_priv));
}
intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs);
@@ -1405,7 +1475,7 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
u8 response[2];
u8 status;
struct intel_output *intel_output;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (!connector)
return 0;
@@ -1478,6 +1548,36 @@ intel_sdvo_multifunc_encoder(struct intel_output *intel_output)
return (caps > 1);
}
+static struct drm_connector *
+intel_find_analog_connector(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct intel_output *intel_output;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ intel_output = to_intel_output(connector);
+ if (intel_output->type == INTEL_OUTPUT_ANALOG)
+ return connector;
+ }
+ return NULL;
+}
+
+static int
+intel_analog_is_connected(struct drm_device *dev)
+{
+ struct drm_connector *analog_connector;
+ analog_connector = intel_find_analog_connector(dev);
+
+ if (!analog_connector)
+ return false;
+
+ if (analog_connector->funcs->detect(analog_connector) ==
+ connector_status_disconnected)
+ return false;
+
+ return true;
+}
+
enum drm_connector_status
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
{
@@ -1488,6 +1588,15 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
edid = drm_get_edid(&intel_output->base,
intel_output->ddc_bus);
+
+ /* when there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector
+ */
+ if (edid == NULL &&
+ sdvo_priv->analog_ddc_bus &&
+ !intel_analog_is_connected(intel_output->base.dev))
+ edid = drm_get_edid(&intel_output->base,
+ sdvo_priv->analog_ddc_bus);
if (edid != NULL) {
/* Don't report the output as connected if it's a DVI-I
* connector with a non-digital EDID coming out.
@@ -1516,10 +1625,11 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
struct intel_output *intel_output = to_intel_output(connector);
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
status = intel_sdvo_read_response(intel_output, &response, 2);
- DRM_DEBUG("SDVO response %d %d\n", response & 0xff, response >> 8);
+ DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
if (status != SDVO_CMD_STATUS_SUCCESS)
return connector_status_unknown;
@@ -1540,50 +1650,32 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ int num_modes;
/* set the bus switch and get the modes */
- intel_ddc_get_modes(intel_output);
+ num_modes = intel_ddc_get_modes(intel_output);
-#if 0
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- /* Mac mini hack. On this device, I get DDC through the analog, which
- * load-detects as disconnected. I fail to DDC through the SDVO DDC,
- * but it does load-detect as connected. So, just steal the DDC bits
- * from analog when we fail at finding it the right way.
+ /*
+ * Mac mini hack. On this device, the DVI-I connector shares one DDC
+ * link between analog and digital outputs. So, if the regular SDVO
+ * DDC fails, check to see if the analog output is disconnected, in
+ * which case we'll look there for the digital DDC data.
*/
- crt = xf86_config->output[0];
- intel_output = crt->driver_private;
- if (intel_output->type == I830_OUTPUT_ANALOG &&
- crt->funcs->detect(crt) == XF86OutputStatusDisconnected) {
- I830I2CInit(pScrn, &intel_output->pDDCBus, GPIOA, "CRTDDC_A");
- edid_mon = xf86OutputGetEDID(crt, intel_output->pDDCBus);
- xf86DestroyI2CBusRec(intel_output->pDDCBus, true, true);
- }
- if (edid_mon) {
- xf86OutputSetEDID(output, edid_mon);
- modes = xf86OutputGetEDIDModes(output);
- }
-#endif
-}
+ if (num_modes == 0 &&
+ sdvo_priv->analog_ddc_bus &&
+ !intel_analog_is_connected(intel_output->base.dev)) {
+ struct i2c_adapter *digital_ddc_bus;
-/**
- * This function checks the current TV format, and chooses a default if
- * it hasn't been set.
- */
-static void
-intel_sdvo_check_tv_format(struct intel_output *output)
-{
- struct intel_sdvo_priv *dev_priv = output->dev_priv;
- struct intel_sdvo_tv_format format;
- uint8_t status;
+ /* Switch to the analog ddc bus and try that
+ */
+ digital_ddc_bus = intel_output->ddc_bus;
+ intel_output->ddc_bus = sdvo_priv->analog_ddc_bus;
- intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMAT, NULL, 0);
- status = intel_sdvo_read_response(output, &format, sizeof(format));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return;
+ (void) intel_ddc_get_modes(intel_output);
- memcpy(&dev_priv->tv_format, &format, sizeof(format));
+ intel_output->ddc_bus = digital_ddc_bus;
+ }
}
/*
@@ -1656,17 +1748,26 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
struct intel_output *output = to_intel_output(connector);
struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
struct intel_sdvo_sdtv_resolution_request tv_res;
- uint32_t reply = 0;
+ uint32_t reply = 0, format_map = 0;
+ int i;
uint8_t status;
- int i = 0;
- intel_sdvo_check_tv_format(output);
/* Read the list of supported input resolutions for the selected TV
* format.
*/
- memset(&tv_res, 0, sizeof(tv_res));
- memcpy(&tv_res, &sdvo_priv->tv_format, sizeof(tv_res));
+ for (i = 0; i < TV_FORMAT_NUM; i++)
+ if (tv_format_names[i] == sdvo_priv->tv_format_name)
+ break;
+
+ format_map = (1 << i);
+ memcpy(&tv_res, &format_map,
+ sizeof(struct intel_sdvo_sdtv_resolution_request) >
+ sizeof(format_map) ? sizeof(format_map) :
+ sizeof(struct intel_sdvo_sdtv_resolution_request));
+
+ intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
+
intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res));
status = intel_sdvo_read_response(output, &reply, 3);
@@ -1681,6 +1782,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
if (nmode)
drm_mode_probed_add(connector, nmode);
}
+
}
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
@@ -1739,6 +1841,45 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
return 1;
}
+static
+void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_device *dev = connector->dev;
+
+ if (sdvo_priv->is_tv) {
+ if (sdvo_priv->left_property)
+ drm_property_destroy(dev, sdvo_priv->left_property);
+ if (sdvo_priv->right_property)
+ drm_property_destroy(dev, sdvo_priv->right_property);
+ if (sdvo_priv->top_property)
+ drm_property_destroy(dev, sdvo_priv->top_property);
+ if (sdvo_priv->bottom_property)
+ drm_property_destroy(dev, sdvo_priv->bottom_property);
+ if (sdvo_priv->hpos_property)
+ drm_property_destroy(dev, sdvo_priv->hpos_property);
+ if (sdvo_priv->vpos_property)
+ drm_property_destroy(dev, sdvo_priv->vpos_property);
+ }
+ if (sdvo_priv->is_tv) {
+ if (sdvo_priv->saturation_property)
+ drm_property_destroy(dev,
+ sdvo_priv->saturation_property);
+ if (sdvo_priv->contrast_property)
+ drm_property_destroy(dev,
+ sdvo_priv->contrast_property);
+ if (sdvo_priv->hue_property)
+ drm_property_destroy(dev, sdvo_priv->hue_property);
+ }
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (sdvo_priv->brightness_property)
+ drm_property_destroy(dev,
+ sdvo_priv->brightness_property);
+ }
+ return;
+}
+
static void intel_sdvo_destroy(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
@@ -1748,17 +1889,158 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
intel_i2c_destroy(intel_output->i2c_bus);
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
+ if (sdvo_priv->analog_ddc_bus)
+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(connector->dev,
sdvo_priv->sdvo_lvds_fixed_mode);
+ if (sdvo_priv->tv_format_property)
+ drm_property_destroy(connector->dev,
+ sdvo_priv->tv_format_property);
+
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
+ intel_sdvo_destroy_enhance_property(connector);
+
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(intel_output);
}
+static int
+intel_sdvo_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct drm_encoder *encoder = &intel_output->enc;
+ struct drm_crtc *crtc = encoder->crtc;
+ int ret = 0;
+ bool changed = false;
+ uint8_t cmd, status;
+ uint16_t temp_value;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret < 0)
+ goto out;
+
+ if (property == sdvo_priv->tv_format_property) {
+ if (val >= TV_FORMAT_NUM) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (sdvo_priv->tv_format_name ==
+ sdvo_priv->tv_format_supported[val])
+ goto out;
+
+ sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[val];
+ changed = true;
+ }
+
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ cmd = 0;
+ temp_value = val;
+ if (sdvo_priv->left_property == property) {
+ drm_connector_property_set_value(connector,
+ sdvo_priv->right_property, val);
+ if (sdvo_priv->left_margin == temp_value)
+ goto out;
+
+ sdvo_priv->left_margin = temp_value;
+ sdvo_priv->right_margin = temp_value;
+ temp_value = sdvo_priv->max_hscan -
+ sdvo_priv->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ } else if (sdvo_priv->right_property == property) {
+ drm_connector_property_set_value(connector,
+ sdvo_priv->left_property, val);
+ if (sdvo_priv->right_margin == temp_value)
+ goto out;
+
+ sdvo_priv->left_margin = temp_value;
+ sdvo_priv->right_margin = temp_value;
+ temp_value = sdvo_priv->max_hscan -
+ sdvo_priv->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ } else if (sdvo_priv->top_property == property) {
+ drm_connector_property_set_value(connector,
+ sdvo_priv->bottom_property, val);
+ if (sdvo_priv->top_margin == temp_value)
+ goto out;
+
+ sdvo_priv->top_margin = temp_value;
+ sdvo_priv->bottom_margin = temp_value;
+ temp_value = sdvo_priv->max_vscan -
+ sdvo_priv->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ } else if (sdvo_priv->bottom_property == property) {
+ drm_connector_property_set_value(connector,
+ sdvo_priv->top_property, val);
+ if (sdvo_priv->bottom_margin == temp_value)
+ goto out;
+ sdvo_priv->top_margin = temp_value;
+ sdvo_priv->bottom_margin = temp_value;
+ temp_value = sdvo_priv->max_vscan -
+ sdvo_priv->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ } else if (sdvo_priv->hpos_property == property) {
+ if (sdvo_priv->cur_hpos == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_POSITION_H;
+ sdvo_priv->cur_hpos = temp_value;
+ } else if (sdvo_priv->vpos_property == property) {
+ if (sdvo_priv->cur_vpos == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_POSITION_V;
+ sdvo_priv->cur_vpos = temp_value;
+ } else if (sdvo_priv->saturation_property == property) {
+ if (sdvo_priv->cur_saturation == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_SATURATION;
+ sdvo_priv->cur_saturation = temp_value;
+ } else if (sdvo_priv->contrast_property == property) {
+ if (sdvo_priv->cur_contrast == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_CONTRAST;
+ sdvo_priv->cur_contrast = temp_value;
+ } else if (sdvo_priv->hue_property == property) {
+ if (sdvo_priv->cur_hue == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_HUE;
+ sdvo_priv->cur_hue = temp_value;
+ } else if (sdvo_priv->brightness_property == property) {
+ if (sdvo_priv->cur_brightness == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_BRIGHTNESS;
+ sdvo_priv->cur_brightness = temp_value;
+ }
+ if (cmd) {
+ intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2);
+ status = intel_sdvo_read_response(intel_output,
+ NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO command \n");
+ return -EINVAL;
+ }
+ changed = true;
+ }
+ }
+ if (changed && crtc)
+ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+ crtc->y, crtc->fb);
+out:
+ return ret;
+}
+
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
.dpms = intel_sdvo_dpms,
.mode_fixup = intel_sdvo_mode_fixup,
@@ -1773,6 +2055,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.restore = intel_sdvo_restore,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_sdvo_set_property,
.destroy = intel_sdvo_destroy,
};
@@ -1991,6 +2274,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
} else if (flags & SDVO_OUTPUT_LVDS0) {
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
@@ -2013,10 +2298,9 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
sdvo_priv->controlled_output = 0;
memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
- DRM_DEBUG_KMS(I915_SDVO,
- "%s: Unknown SDVO output type (0x%02x%02x)\n",
- SDVO_NAME(sdvo_priv),
- bytes[0], bytes[1]);
+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+ SDVO_NAME(sdvo_priv),
+ bytes[0], bytes[1]);
ret = false;
}
intel_output->crtc_mask = (1 << 0) | (1 << 1);
@@ -2029,6 +2313,359 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
}
+static void intel_sdvo_tv_create_property(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_tv_format format;
+ uint32_t format_map, i;
+ uint8_t status;
+
+ intel_sdvo_set_target_output(intel_output,
+ sdvo_priv->controlled_output);
+
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &format, sizeof(format));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return;
+
+ memcpy(&format_map, &format, sizeof(format) > sizeof(format_map) ?
+ sizeof(format_map) : sizeof(format));
+
+ if (format_map == 0)
+ return;
+
+ sdvo_priv->format_supported_num = 0;
+ for (i = 0 ; i < TV_FORMAT_NUM; i++)
+ if (format_map & (1 << i)) {
+ sdvo_priv->tv_format_supported
+ [sdvo_priv->format_supported_num++] =
+ tv_format_names[i];
+ }
+
+
+ sdvo_priv->tv_format_property =
+ drm_property_create(
+ connector->dev, DRM_MODE_PROP_ENUM,
+ "mode", sdvo_priv->format_supported_num);
+
+ for (i = 0; i < sdvo_priv->format_supported_num; i++)
+ drm_property_add_enum(
+ sdvo_priv->tv_format_property, i,
+ i, sdvo_priv->tv_format_supported[i]);
+
+ sdvo_priv->tv_format_name = sdvo_priv->tv_format_supported[0];
+ drm_connector_attach_property(
+ connector, sdvo_priv->tv_format_property, 0);
+
+}
+
+static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct intel_sdvo_enhancements_reply sdvo_data;
+ struct drm_device *dev = connector->dev;
+ uint8_t status;
+ uint16_t response, data_value[2];
+
+ intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ NULL, 0);
+ status = intel_sdvo_read_response(intel_output, &sdvo_data,
+ sizeof(sdvo_data));
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS(" incorrect response is returned\n");
+ return;
+ }
+ response = *((uint16_t *)&sdvo_data);
+ if (!response) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return;
+ }
+ if (sdvo_priv->is_tv) {
+ /* when horizontal overscan is supported, Add the left/right
+ * property
+ */
+ if (sdvo_data.overscan_h) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO max "
+ "h_overscan\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
+ return;
+ }
+ sdvo_priv->max_hscan = data_value[0];
+ sdvo_priv->left_margin = data_value[0] - response;
+ sdvo_priv->right_margin = sdvo_priv->left_margin;
+ sdvo_priv->left_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "left_margin", 2);
+ sdvo_priv->left_property->values[0] = 0;
+ sdvo_priv->left_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->left_property,
+ sdvo_priv->left_margin);
+ sdvo_priv->right_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "right_margin", 2);
+ sdvo_priv->right_property->values[0] = 0;
+ sdvo_priv->right_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->right_property,
+ sdvo_priv->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.overscan_v) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO max "
+ "v_overscan\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
+ return;
+ }
+ sdvo_priv->max_vscan = data_value[0];
+ sdvo_priv->top_margin = data_value[0] - response;
+ sdvo_priv->bottom_margin = sdvo_priv->top_margin;
+ sdvo_priv->top_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "top_margin", 2);
+ sdvo_priv->top_property->values[0] = 0;
+ sdvo_priv->top_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->top_property,
+ sdvo_priv->top_margin);
+ sdvo_priv->bottom_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "bottom_margin", 2);
+ sdvo_priv->bottom_property->values[0] = 0;
+ sdvo_priv->bottom_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->bottom_property,
+ sdvo_priv->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.position_h) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_POSITION_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
+ return;
+ }
+ sdvo_priv->max_hpos = data_value[0];
+ sdvo_priv->cur_hpos = response;
+ sdvo_priv->hpos_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "hpos", 2);
+ sdvo_priv->hpos_property->values[0] = 0;
+ sdvo_priv->hpos_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->hpos_property,
+ sdvo_priv->cur_hpos);
+ DRM_DEBUG_KMS("h_position: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.position_v) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_POSITION_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
+ return;
+ }
+ sdvo_priv->max_vpos = data_value[0];
+ sdvo_priv->cur_vpos = response;
+ sdvo_priv->vpos_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "vpos", 2);
+ sdvo_priv->vpos_property->values[0] = 0;
+ sdvo_priv->vpos_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->vpos_property,
+ sdvo_priv->cur_vpos);
+ DRM_DEBUG_KMS("v_position: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ }
+ if (sdvo_priv->is_tv) {
+ if (sdvo_data.saturation) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_SATURATION, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
+ return;
+ }
+ sdvo_priv->max_saturation = data_value[0];
+ sdvo_priv->cur_saturation = response;
+ sdvo_priv->saturation_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "saturation", 2);
+ sdvo_priv->saturation_property->values[0] = 0;
+ sdvo_priv->saturation_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->saturation_property,
+ sdvo_priv->cur_saturation);
+ DRM_DEBUG_KMS("saturation: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.contrast) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_CONTRAST, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
+ return;
+ }
+ sdvo_priv->max_contrast = data_value[0];
+ sdvo_priv->cur_contrast = response;
+ sdvo_priv->contrast_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "contrast", 2);
+ sdvo_priv->contrast_property->values[0] = 0;
+ sdvo_priv->contrast_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->contrast_property,
+ sdvo_priv->cur_contrast);
+ DRM_DEBUG_KMS("contrast: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.hue) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_HUE, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_HUE, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
+ return;
+ }
+ sdvo_priv->max_hue = data_value[0];
+ sdvo_priv->cur_hue = response;
+ sdvo_priv->hue_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "hue", 2);
+ sdvo_priv->hue_property->values[0] = 0;
+ sdvo_priv->hue_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->hue_property,
+ sdvo_priv->cur_hue);
+ DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ }
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
+ if (sdvo_data.brightness) {
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_output,
+ SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
+ status = intel_sdvo_read_response(intel_output,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
+ return;
+ }
+ sdvo_priv->max_brightness = data_value[0];
+ sdvo_priv->cur_brightness = response;
+ sdvo_priv->brightness_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "brightness", 2);
+ sdvo_priv->brightness_property->values[0] = 0;
+ sdvo_priv->brightness_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->brightness_property,
+ sdvo_priv->cur_brightness);
+ DRM_DEBUG_KMS("brightness: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ }
+ return;
+}
+
bool intel_sdvo_init(struct drm_device *dev, int output_device)
{
struct drm_connector *connector;
@@ -2066,18 +2703,22 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
- DRM_DEBUG_KMS(I915_SDVO,
- "No SDVO device found on SDVO%c\n",
+ DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
output_device == SDVOB ? 'B' : 'C');
goto err_i2c;
}
}
/* setup the DDC bus. */
- if (output_device == SDVOB)
+ if (output_device == SDVOB) {
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
- else
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ "SDVOB/VGA DDC BUS");
+ } else {
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
+ "SDVOC/VGA DDC BUS");
+ }
if (intel_output->ddc_bus == NULL)
goto err_i2c;
@@ -2090,7 +2731,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
if (intel_sdvo_output_setup(intel_output,
sdvo_priv->caps.output_flags) != true) {
- DRM_DEBUG("SDVO output failed to setup on SDVO%c\n",
+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
output_device == SDVOB ? 'B' : 'C');
goto err_i2c;
}
@@ -2111,6 +2752,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
+ if (sdvo_priv->is_tv)
+ intel_sdvo_tv_create_property(connector);
+
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds)
+ intel_sdvo_create_enhance_property(connector);
+
drm_sysfs_connector_add(connector);
intel_sdvo_select_ddc_bus(sdvo_priv);
@@ -2123,7 +2770,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
&sdvo_priv->pixel_clock_max);
- DRM_DEBUG_KMS(I915_SDVO, "%s device VID/DID: %02X:%02X.%02X, "
+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
@@ -2143,6 +2790,8 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
return true;
err_i2c:
+ if (sdvo_priv->analog_ddc_bus != NULL)
+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
if (intel_output->ddc_bus != NULL)
intel_i2c_destroy(intel_output->ddc_bus);
if (intel_output->i2c_bus != NULL)
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 5b1c9e9fdba..9ca917931af 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1082,7 +1082,8 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
/* Ensure TV refresh is close to desired refresh */
- if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10)
+ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
+ < 1000)
return MODE_OK;
return MODE_CLOCK_RANGE;
}
@@ -1437,6 +1438,35 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
return type;
}
+/*
+ * Here we set accurate tv format according to connector type
+ * i.e Component TV should not be assigned by NTSC or PAL
+ */
+static void intel_tv_find_better_format(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+ int i;
+
+ if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
+ tv_mode->component_only)
+ return;
+
+
+ for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
+ tv_mode = tv_modes + i;
+
+ if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
+ tv_mode->component_only)
+ break;
+ }
+
+ tv_priv->tv_format = tv_mode->name;
+ drm_connector_property_set_value(connector,
+ connector->dev->mode_config.tv_mode_property, i);
+}
+
/**
* Detect the TV connection.
*
@@ -1473,6 +1503,7 @@ intel_tv_detect(struct drm_connector *connector)
if (type < 0)
return connector_status_disconnected;
+ intel_tv_find_better_format(connector);
return connector_status_connected;
}