summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c21
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h66
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c264
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c151
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c125
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c36
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c203
-rw-r--r--drivers/gpu/drm/i915/intel_display.c300
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c157
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c23
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c31
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c47
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c4
21 files changed, 939 insertions, 585 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 274a3280cdc..7a4b3f3aad3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -914,7 +914,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
- GEN6_CAGF_SHIFT) * 50);
+ GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -930,15 +930,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = (rp_state_cap & 0xff00) >> 8;
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
} else {
seq_printf(m, "no P-state info available\n");
}
@@ -1292,7 +1292,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
continue;
}
ia_freq = I915_READ(GEN6_PCODE_DATA);
- seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+ seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
}
mutex_unlock(&dev->struct_mutex);
@@ -1717,7 +1717,7 @@ i915_max_freq_read(struct file *filp,
return ret;
len = snprintf(buf, sizeof(buf),
- "max freq: %d\n", dev_priv->rps.max_delay * 50);
+ "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
@@ -1760,9 +1760,9 @@ i915_max_freq_write(struct file *filp,
/*
* Turbo will still be enabled, but won't go above the set value.
*/
- dev_priv->rps.max_delay = val / 50;
+ dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
- gen6_set_rps(dev, val / 50);
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
return cnt;
@@ -1793,7 +1793,7 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
return ret;
len = snprintf(buf, sizeof(buf),
- "min freq: %d\n", dev_priv->rps.min_delay * 50);
+ "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
@@ -1834,9 +1834,9 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
/*
* Turbo will still be enabled, but won't go below the set value.
*/
- dev_priv->rps.min_delay = val / 50;
+ dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
- gen6_set_rps(dev, val / 50);
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
return cnt;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2c09900e326..ffbc9156c79 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1461,7 +1461,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
struct intel_device_info *info;
- int ret = 0, mmio_bar;
+ int ret = 0, mmio_bar, mmio_size;
uint32_t aperture_size;
info = (struct intel_device_info *) flags;
@@ -1526,7 +1526,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
mmio_bar = IS_GEN2(dev) ? 1 : 0;
- dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
+ /* Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (info->gen < 5)
+ mmio_size = 512*1024;
+ else
+ mmio_size = 2*1024*1024;
+
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
@@ -1607,6 +1619,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps.lock);
+ spin_lock_init(&dev_priv->dpio_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
@@ -1855,8 +1868,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHEING, i915_gem_set_cacheing_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHEING, i915_gem_get_cacheing_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 26c6959a524..4f2831aa5fe 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -899,9 +899,29 @@ enum i915_cache_level {
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
+struct drm_i915_gem_object_ops {
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ int (*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *);
+};
+
struct drm_i915_gem_object {
struct drm_gem_object base;
+ const struct drm_i915_gem_object_ops *ops;
+
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
struct list_head gtt_list;
@@ -986,17 +1006,12 @@ struct drm_i915_gem_object {
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
+ unsigned int has_dma_mapping:1;
- struct page **pages;
-
- /**
- * DMAR support
- */
- struct scatterlist *sg_list;
- int num_sg;
+ struct sg_table *pages;
+ int pages_pin_count;
/* prime dma-buf support */
- struct sg_table *sg_table;
void *dma_buf_vmapping;
int vmapping_count;
@@ -1163,6 +1178,8 @@ struct drm_i915_file_private {
#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define GT_FREQUENCY_MULTIPLIER 50
+
#include "i915_trace.h"
/**
@@ -1284,10 +1301,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1306,7 +1323,8 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-void i915_gem_object_init(struct drm_i915_gem_object *obj);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
@@ -1319,7 +1337,27 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-int __must_check i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+ struct scatterlist *sg = obj->pages->sgl;
+ while (n >= SG_MAX_SINGLE_ALLOC) {
+ sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
+ n -= SG_MAX_SINGLE_ALLOC - 1;
+ }
+ return sg_page(sg+n);
+}
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages == NULL);
+ obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages_pin_count == 0);
+ obj->pages_pin_count--;
+}
+
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 87a64e5f28f..365a7dc8a4a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -343,7 +343,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap_atomic(vaddr);
- return ret;
+ return ret ? -EFAULT : 0;
}
static void
@@ -394,7 +394,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap(page);
- return ret;
+ return ret ? - EFAULT : 0;
}
static int
@@ -403,7 +403,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
struct drm_i915_gem_pread *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
char __user *user_data;
ssize_t remain;
loff_t offset;
@@ -412,7 +411,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
- int release_page;
+ struct scatterlist *sg;
+ int i;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -433,11 +433,23 @@ i915_gem_shmem_pread(struct drm_device *dev,
}
}
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
offset = args->offset;
- while (remain > 0) {
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
+ if (i < offset >> PAGE_SHIFT)
+ continue;
+
+ if (remain <= 0)
+ break;
+
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
@@ -448,18 +460,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
-
+ page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -470,7 +471,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
goto next_page;
hit_slowpath = 1;
- page_cache_get(page);
mutex_unlock(&dev->struct_mutex);
if (!prefaulted) {
@@ -488,16 +488,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
needs_clflush);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
+
next_page:
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
+ if (ret)
goto out;
- }
remain -= page_length;
user_data += page_length;
@@ -505,6 +501,8 @@ next_page:
}
out:
+ i915_gem_object_unpin_pages(obj);
+
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
@@ -690,7 +688,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap_atomic(vaddr);
- return ret;
+ return ret ? -EFAULT : 0;
}
/* Only difference to the fast-path function is that this can handle bit17
@@ -724,7 +722,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
page_do_bit17_swizzling);
kunmap(page);
- return ret;
+ return ret ? -EFAULT : 0;
}
static int
@@ -733,7 +731,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
@@ -742,7 +739,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int hit_slowpath = 0;
int needs_clflush_after = 0;
int needs_clflush_before = 0;
- int release_page;
+ int i;
+ struct scatterlist *sg;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -768,13 +766,25 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
&& obj->cache_level == I915_CACHE_NONE)
needs_clflush_before = 1;
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
offset = args->offset;
obj->dirty = 1;
- while (remain > 0) {
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
int partial_cacheline_write;
+ if (i < offset >> PAGE_SHIFT)
+ continue;
+
+ if (remain <= 0)
+ break;
+
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
@@ -793,18 +803,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
-
+ page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -816,26 +815,20 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
goto next_page;
hit_slowpath = 1;
- page_cache_get(page);
mutex_unlock(&dev->struct_mutex);
-
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
+
next_page:
set_page_dirty(page);
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
+ if (ret)
goto out;
- }
remain -= page_length;
user_data += page_length;
@@ -843,6 +836,8 @@ next_page:
}
out:
+ i915_gem_object_unpin_pages(obj);
+
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
@@ -1650,18 +1645,13 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
return obj->madv == I915_MADV_DONTNEED;
}
-static int
+static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
int page_count = obj->base.size / PAGE_SIZE;
+ struct scatterlist *sg;
int ret, i;
- BUG_ON(obj->gtt_space);
-
- if (obj->pages == NULL)
- return 0;
-
- BUG_ON(obj->gtt_space);
BUG_ON(obj->madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
@@ -1680,22 +1670,40 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
- for (i = 0; i < page_count; i++) {
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+
if (obj->dirty)
- set_page_dirty(obj->pages[i]);
+ set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
- mark_page_accessed(obj->pages[i]);
+ mark_page_accessed(page);
- page_cache_release(obj->pages[i]);
+ page_cache_release(page);
}
obj->dirty = 0;
- drm_free_large(obj->pages);
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+ if (obj->pages == NULL)
+ return 0;
+
+ BUG_ON(obj->gtt_space);
+
+ if (obj->pages_pin_count)
+ return -EBUSY;
+
+ ops->put_pages(obj);
obj->pages = NULL;
list_del(&obj->gtt_list);
-
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
@@ -1712,7 +1720,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
&dev_priv->mm.unbound_list,
gtt_list) {
if (i915_gem_object_is_purgeable(obj) &&
- i915_gem_object_put_pages_gtt(obj) == 0) {
+ i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
return count;
@@ -1724,7 +1732,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
mm_list) {
if (i915_gem_object_is_purgeable(obj) &&
i915_gem_object_unbind(obj) == 0 &&
- i915_gem_object_put_pages_gtt(obj) == 0) {
+ i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
return count;
@@ -1742,21 +1750,20 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
i915_gem_evict_everything(dev_priv->dev);
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
- i915_gem_object_put_pages_gtt(obj);
+ i915_gem_object_put_pages(obj);
}
-int
+static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int page_count, i;
struct address_space *mapping;
+ struct sg_table *st;
+ struct scatterlist *sg;
struct page *page;
gfp_t gfp;
- if (obj->pages || obj->sg_table)
- return 0;
-
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
@@ -1764,20 +1771,27 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- /* Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
page_count = obj->base.size / PAGE_SIZE;
- obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
- if (obj->pages == NULL)
+ if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+ sg_free_table(st);
+ kfree(st);
return -ENOMEM;
+ }
- /* Fail silently without starting the shrinker */
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ *
+ * Fail silently without starting the shrinker
+ */
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
gfp = mapping_gfp_mask(mapping);
gfp |= __GFP_NORETRY | __GFP_NOWARN;
gfp &= ~(__GFP_IO | __GFP_WAIT);
- for (i = 0; i < page_count; i++) {
+ for_each_sg(st->sgl, sg, page_count, i) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) {
i915_gem_purge(dev_priv, page_count);
@@ -1800,24 +1814,50 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp &= ~(__GFP_IO | __GFP_WAIT);
}
- obj->pages[i] = page;
+ sg_set_page(sg, page, PAGE_SIZE, 0);
}
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
- list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+ obj->pages = st;
return 0;
err_pages:
- while (i--)
- page_cache_release(obj->pages[i]);
-
- drm_free_large(obj->pages);
- obj->pages = NULL;
+ for_each_sg(st->sgl, sg, i, page_count)
+ page_cache_release(sg_page(sg));
+ sg_free_table(st);
+ kfree(st);
return PTR_ERR(page);
}
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+ int ret;
+
+ if (obj->pages)
+ return 0;
+
+ BUG_ON(obj->pages_pin_count);
+
+ ret = ops->get_pages(obj);
+ if (ret)
+ return ret;
+
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+ return 0;
+}
+
void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
@@ -2071,7 +2111,6 @@ void i915_gem_reset(struct drm_device *dev)
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
}
-
/* The fence registers are invalidated so clear them out */
i915_gem_reset_fences(dev);
}
@@ -2871,7 +2910,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return -E2BIG;
}
- ret = i915_gem_object_get_pages_gtt(obj);
+ ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
@@ -2971,7 +3010,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
trace_i915_gem_object_clflush(obj);
- drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
+ drm_clflush_sg(obj->pages);
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3146,10 +3185,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return 0;
}
-int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
- struct drm_i915_gem_cacheing *args = data;
+ struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -3163,7 +3202,7 @@ int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
- args->cacheing = obj->cache_level != I915_CACHE_NONE;
+ args->caching = obj->cache_level != I915_CACHE_NONE;
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3171,10 +3210,10 @@ unlock:
return ret;
}
-int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
- struct drm_i915_gem_cacheing *args = data;
+ struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
int ret;
@@ -3183,11 +3222,11 @@ int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- switch (args->cacheing) {
- case I915_CACHEING_NONE:
+ switch (args->caching) {
+ case I915_CACHING_NONE:
level = I915_CACHE_NONE;
break;
- case I915_CACHEING_CACHED:
+ case I915_CACHING_CACHED:
level = I915_CACHE_LLC;
break;
default:
@@ -3386,7 +3425,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
{
int ret;
- BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+ if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+ return -EBUSY;
if (obj->gtt_space != NULL) {
if ((alignment && obj->gtt_offset & (alignment - 1)) ||
@@ -3610,15 +3650,16 @@ unlock:
return ret;
}
-void i915_gem_object_init(struct drm_i915_gem_object *obj)
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops)
{
- obj->base.driver_private = NULL;
-
INIT_LIST_HEAD(&obj->mm_list);
INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
+ obj->ops = ops;
+
obj->fence_reg = I915_FENCE_REG_NONE;
obj->madv = I915_MADV_WILLNEED;
/* Avoid an unnecessary call to unbind on the first bind. */
@@ -3627,6 +3668,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj)
i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
}
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+ .get_pages = i915_gem_object_get_pages_gtt,
+ .put_pages = i915_gem_object_put_pages_gtt,
+};
+
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
@@ -3653,7 +3699,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
mapping_set_gfp_mask(mapping, mask);
- i915_gem_object_init(obj);
+ i915_gem_object_init(obj, &i915_gem_object_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3693,9 +3739,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
trace_i915_gem_object_destroy(obj);
- if (gem_obj->import_attach)
- drm_prime_gem_destroy(gem_obj, obj->sg_table);
-
if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
@@ -3711,9 +3754,15 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
dev_priv->mm.interruptible = was_interruptible;
}
- i915_gem_object_put_pages_gtt(obj);
+ obj->pages_pin_count = 0;
+ i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
+ BUG_ON(obj->pages);
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
+
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -4370,9 +4419,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
cnt = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
- cnt += obj->base.size >> PAGE_SHIFT;
+ if (obj->pages_pin_count == 0)
+ cnt += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
- if (obj->pin_count == 0)
+ if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 43c95307f99..ca3497e1108 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -28,35 +28,62 @@
#include <linux/dma-buf.h>
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
- struct drm_device *dev = obj->base.dev;
- int npages = obj->base.size / PAGE_SIZE;
- struct sg_table *sg;
- int ret;
- int nents;
+ struct sg_table *st;
+ struct scatterlist *src, *dst;
+ int ret, i;
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
return ERR_PTR(ret);
- ret = i915_gem_object_get_pages_gtt(obj);
+ ret = i915_gem_object_get_pages(obj);
if (ret) {
- sg = ERR_PTR(ret);
+ st = ERR_PTR(ret);
+ goto out;
+ }
+
+ /* Copy sg so that we make an independent mapping */
+ st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (st == NULL) {
+ st = ERR_PTR(-ENOMEM);
goto out;
}
- /* link the pages into an SG then map the sg */
- sg = drm_prime_pages_to_sg(obj->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+ if (ret) {
+ kfree(st);
+ st = ERR_PTR(ret);
+ goto out;
+ }
+
+ src = obj->pages->sgl;
+ dst = st->sgl;
+ for (i = 0; i < obj->pages->nents; i++) {
+ sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
+ dst = sg_next(dst);
+ src = sg_next(src);
+ }
+
+ if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ sg_free_table(st);
+ kfree(st);
+ st = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ i915_gem_object_pin_pages(obj);
+
out:
- mutex_unlock(&dev->struct_mutex);
- return sg;
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return st;
}
static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
+ struct sg_table *sg,
+ enum dma_data_direction dir)
{
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
@@ -78,7 +105,9 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->base.dev;
- int ret;
+ struct scatterlist *sg;
+ struct page **pages;
+ int ret, i;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -89,22 +118,34 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
goto out_unlock;
}
- ret = i915_gem_object_get_pages_gtt(obj);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ERR_PTR(ret);
- }
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ goto error;
- obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
- if (!obj->dma_buf_vmapping) {
- DRM_ERROR("failed to vmap object\n");
- goto out_unlock;
- }
+ ret = -ENOMEM;
+
+ pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
+ if (pages == NULL)
+ goto error;
+
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
+ pages[i] = sg_page(sg);
+
+ obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
+ drm_free_large(pages);
+
+ if (!obj->dma_buf_vmapping)
+ goto error;
obj->vmapping_count = 1;
+ i915_gem_object_pin_pages(obj);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping;
+
+error:
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -117,10 +158,11 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
if (ret)
return;
- --obj->vmapping_count;
- if (obj->vmapping_count == 0) {
+ if (--obj->vmapping_count == 0) {
vunmap(obj->dma_buf_vmapping);
obj->dma_buf_vmapping = NULL;
+
+ i915_gem_object_unpin_pages(obj);
}
mutex_unlock(&dev->struct_mutex);
}
@@ -180,22 +222,43 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags)
+ struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- return dma_buf_export(obj, &i915_dmabuf_ops,
- obj->base.size, 0600);
+ return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
}
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *sg;
+
+ sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+
+ obj->pages = sg;
+ obj->has_dma_mapping = true;
+ return 0;
+}
+
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ dma_buf_unmap_attachment(obj->base.import_attach,
+ obj->pages, DMA_BIDIRECTIONAL);
+ obj->has_dma_mapping = false;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+ .get_pages = i915_gem_object_get_pages_dmabuf,
+ .put_pages = i915_gem_object_put_pages_dmabuf,
+};
+
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+ struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
- struct sg_table *sg;
struct drm_i915_gem_object *obj;
- int npages;
- int size;
int ret;
/* is this one of own objects? */
@@ -213,34 +276,24 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- size = dma_buf->size;
- npages = size / PAGE_SIZE;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL) {
ret = -ENOMEM;
- goto fail_unmap;
+ goto fail_detach;
}
- ret = drm_gem_private_object_init(dev, &obj->base, size);
+ ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
if (ret) {
kfree(obj);
- goto fail_unmap;
+ goto fail_detach;
}
- obj->sg_table = sg;
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
return &obj->base;
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e6b2205ecf6..6a2f3e50c71 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -44,6 +44,7 @@ eb_create(int size)
{
struct eb_objects *eb;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
while (count > size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
@@ -210,7 +211,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
} else {
@@ -1101,8 +1103,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ (void __user *)(uintptr_t)args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1141,8 +1142,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
@@ -1196,8 +1196,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 18477314d85..47e427e4bc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
- first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
+ first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
@@ -167,8 +167,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
}
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
- struct scatterlist *sg_list,
- unsigned sg_len,
+ const struct sg_table *pages,
unsigned first_entry,
uint32_t pte_flags)
{
@@ -180,12 +179,12 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
struct scatterlist *sg;
/* init sg walking */
- sg = sg_list;
+ sg = pages->sgl;
i = 0;
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
- while (i < sg_len) {
+ while (i < pages->nents) {
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
@@ -194,13 +193,11 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
pt_vaddr[j] = pte | pte_flags;
/* grab the next page */
- m++;
- if (m == segment_len) {
- sg = sg_next(sg);
- i++;
- if (i == sg_len)
+ if (++m == segment_len) {
+ if (++i == pages->nents)
break;
+ sg = sg_next(sg);
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
}
@@ -213,44 +210,10 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
}
}
-static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
- unsigned first_entry, unsigned num_entries,
- struct page **pages, uint32_t pte_flags)
-{
- uint32_t *pt_vaddr, pte;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned last_pte, i;
- dma_addr_t page_addr;
-
- while (num_entries) {
- last_pte = first_pte + num_entries;
- last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
-
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (i = first_pte; i < last_pte; i++) {
- page_addr = page_to_phys(*pages);
- pte = GEN6_PTE_ADDR_ENCODE(page_addr);
- pt_vaddr[i] = pte | pte_flags;
-
- pages++;
- }
-
- kunmap_atomic(pt_vaddr);
-
- num_entries -= last_pte - first_pte;
- first_pte = 0;
- act_pd++;
- }
-}
-
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t pte_flags = GEN6_PTE_VALID;
switch (cache_level) {
@@ -261,7 +224,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(obj->base.dev))
pte_flags |= HSW_PTE_UNCACHED;
else
pte_flags |= GEN6_PTE_UNCACHED;
@@ -270,26 +233,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
BUG();
}
- if (obj->sg_table) {
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->sg_table->sgl,
- obj->sg_table->nents,
- obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
- } else if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
- } else
- i915_ppgtt_insert_pages(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- pte_flags);
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -361,44 +308,26 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* don't map imported dma buf objects */
- if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
- return intel_gtt_map_memory(obj->pages,
- obj->base.size >> PAGE_SHIFT,
- &obj->sg_list,
- &obj->num_sg);
- else
+ if (obj->has_dma_mapping)
return 0;
+
+ if (!dma_map_sg(&obj->base.dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return -ENOSPC;
+
+ return 0;
}
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
- if (obj->sg_table) {
- intel_gtt_insert_sg_entries(obj->sg_table->sgl,
- obj->sg_table->nents,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
-
+ intel_gtt_insert_sg_entries(obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
obj->has_global_gtt_mapping = 1;
}
@@ -418,10 +347,10 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
- if (obj->sg_list) {
- intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
- obj->sg_list = NULL;
- }
+ if (!obj->has_dma_mapping)
+ dma_unmap_sg(&dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
undo_idling(dev_priv, interruptible);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b964df51cec..8093ecd2ea3 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -470,18 +470,20 @@ i915_gem_swizzle_page(struct page *page)
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL)
return;
- for (i = 0; i < page_count; i++) {
- char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+ char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
- i915_gem_swizzle_page(obj->pages[i]);
- set_page_dirty(obj->pages[i]);
+ i915_gem_swizzle_page(page);
+ set_page_dirty(page);
}
}
}
@@ -489,6 +491,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
@@ -502,8 +505,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
}
}
- for (i = 0; i < page_count; i++) {
- if (page_to_phys(obj->pages[i]) & (1 << 17))
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+ if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d6010135e40..d7f0066538a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -382,7 +382,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
else
new_delay = dev_priv->rps.cur_delay - 1;
- gen6_set_rps(dev_priv->dev, new_delay);
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ if (!(new_delay > dev_priv->rps.max_delay ||
+ new_delay < dev_priv->rps.min_delay)) {
+ gen6_set_rps(dev_priv->dev, new_delay);
+ }
mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -888,20 +894,20 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src)
{
struct drm_i915_error_object *dst;
- int page, page_count;
+ int i, count;
u32 reloc_offset;
if (src == NULL || src->pages == NULL)
return NULL;
- page_count = src->base.size / PAGE_SIZE;
+ count = src->base.size / PAGE_SIZE;
- dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
+ dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
reloc_offset = src->gtt_offset;
- for (page = 0; page < page_count; page++) {
+ for (i = 0; i < count; i++) {
unsigned long flags;
void *d;
@@ -924,30 +930,33 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
} else {
+ struct page *page;
void *s;
- drm_clflush_pages(&src->pages[page], 1);
+ page = i915_gem_object_get_page(src, i);
- s = kmap_atomic(src->pages[page]);
+ drm_clflush_pages(&page, 1);
+
+ s = kmap_atomic(page);
memcpy(d, s, PAGE_SIZE);
kunmap_atomic(s);
- drm_clflush_pages(&src->pages[page], 1);
+ drm_clflush_pages(&page, 1);
}
local_irq_restore(flags);
- dst->pages[page] = d;
+ dst->pages[i] = d;
reloc_offset += PAGE_SIZE;
}
- dst->page_count = page_count;
+ dst->page_count = count;
dst->gtt_offset = src->gtt_offset;
return dst;
unwind:
- while (page--)
- kfree(dst->pages[page]);
+ while (i--)
+ kfree(dst->pages[i]);
kfree(dst);
return NULL;
}
@@ -2735,9 +2744,6 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
} else if (INTEL_INFO(dev)->gen == 3) {
- /* IIR "flip pending" means done if this bit is set */
- I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
-
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a828e90602b..7637824c6a7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1794,6 +1794,10 @@
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
+/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index da733a3fe1e..903eebd2117 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -46,32 +46,32 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
}
static ssize_t
-show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
}
static ssize_t
-show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
}
static ssize_t
-show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
}
static ssize_t
-show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
}
@@ -203,6 +203,184 @@ static struct bin_attribute dpf_attrs = {
.mmap = NULL
};
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap, hw_max, hw_min;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = (rp_state_cap & 0xff);
+ hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+ if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ if (dev_priv->rps.cur_delay > val)
+ gen6_set_rps(dev_priv->dev, val);
+
+ dev_priv->rps.max_delay = val;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_min_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap, hw_max, hw_min;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = (rp_state_cap & 0xff);
+ hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+ if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ if (dev_priv->rps.cur_delay < val)
+ gen6_set_rps(dev_priv->dev, val);
+
+ dev_priv->rps.min_delay = val;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+
+}
+
+static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
+static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+
+
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+
+/* For now we have a static number of RP states */
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap;
+ ssize_t ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (attr == &dev_attr_gt_RP0_freq_mhz) {
+ val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
+ val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
+ val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ } else {
+ BUG();
+ }
+ return snprintf(buf, PAGE_SIZE, "%d", val);
+}
+
+static const struct attribute *gen6_attrs[] = {
+ &dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_max_freq_mhz.attr,
+ &dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_gt_RP0_freq_mhz.attr,
+ &dev_attr_gt_RP1_freq_mhz.attr,
+ &dev_attr_gt_RPn_freq_mhz.attr,
+ NULL,
+};
+
void i915_setup_sysfs(struct drm_device *dev)
{
int ret;
@@ -220,10 +398,19 @@ void i915_setup_sysfs(struct drm_device *dev)
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
}
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+ if (ret)
+ DRM_ERROR("gen6 sysfs setup failed\n");
+ }
}
void i915_teardown_sysfs(struct drm_device *dev)
{
+ sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
+#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+#endif
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5576b7c695e..6f5aafa1b63 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1376,7 +1376,8 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
- WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+ && (val & DP_PIPEB_SELECT),
"IBX PCH dp port still using transcoder B\n");
}
@@ -1388,7 +1389,8 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
- WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
+ && (val & SDVO_PIPE_B_SELECT),
"IBX PCH hdmi port still using transcoder B\n");
}
@@ -1860,59 +1862,6 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
intel_wait_for_vblank(dev_priv->dev, pipe);
}
-static void disable_pch_dp(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg, u32 port_sel)
-{
- u32 val = I915_READ(reg);
- if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
- DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
- I915_WRITE(reg, val & ~DP_PORT_EN);
- }
-}
-
-static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
-{
- u32 val = I915_READ(reg);
- if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
- DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
- reg, pipe);
- I915_WRITE(reg, val & ~PORT_ENABLE);
- }
-}
-
-/* Disable any ports connected to this transcoder */
-static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 reg, val;
-
- val = I915_READ(PCH_PP_CONTROL);
- I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
-
- disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
-
- reg = PCH_ADPA;
- val = I915_READ(reg);
- if (adpa_pipe_enabled(dev_priv, pipe, val))
- I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
-
- reg = PCH_LVDS;
- val = I915_READ(reg);
- if (lvds_pipe_enabled(dev_priv, pipe, val)) {
- DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
- I915_WRITE(reg, val & ~LVDS_PORT_EN);
- POSTING_READ(reg);
- udelay(100);
- }
-
- disable_pch_hdmi(dev_priv, pipe, HDMIB);
- disable_pch_hdmi(dev_priv, pipe, HDMIC);
- disable_pch_hdmi(dev_priv, pipe, HDMID);
-}
-
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -3237,10 +3186,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
is_pch_port = intel_crtc_driving_pch(crtc);
- if (is_pch_port)
+ if (is_pch_port) {
ironlake_fdi_pll_enable(intel_crtc);
- else
- ironlake_fdi_disable(crtc);
+ } else {
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+ }
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -3311,14 +3266,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
- ironlake_fdi_disable(crtc);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
- /* This is a horrible layering violation; we should be doing this in
- * the connector/encoder ->prepare instead, but we don't always have
- * enough information there about the config to know whether it will
- * actually be necessary or just cause undesired flicker.
- */
- intel_disable_pch_ports(dev_priv, pipe);
+ ironlake_fdi_disable(crtc);
intel_disable_transcoder(dev_priv, pipe);
@@ -4279,12 +4231,6 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
@@ -4292,6 +4238,12 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
intel_update_lvds(crtc, clock, adjusted_mode);
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
@@ -4648,6 +4600,113 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
return 120000;
}
+static void ironlake_set_pipeconf(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ bool dither)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint32_t val;
+
+ val = I915_READ(PIPECONF(pipe));
+
+ val &= ~PIPE_BPC_MASK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ val |= PIPE_6BPC;
+ break;
+ case 24:
+ val |= PIPE_8BPC;
+ break;
+ case 30:
+ val |= PIPE_10BPC;
+ break;
+ case 36:
+ val |= PIPE_12BPC;
+ break;
+ default:
+ val |= PIPE_8BPC;
+ break;
+ }
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(pipe), val);
+ POSTING_READ(PIPECONF(pipe));
+}
+
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock,
+ bool *has_reduced_clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ int refclk;
+ const intel_limit_t *limit;
+ bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (intel_encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ }
+ }
+
+ refclk = ironlake_get_refclk(crtc);
+
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ clock);
+ if (!ret)
+ return false;
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
+ *has_reduced_clock = limit->find_pll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk,
+ clock,
+ reduced_clock);
+ }
+
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
+
+ return true;
+}
+
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4659,13 +4718,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- int refclk, num_connectors = 0;
+ int num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ u32 dpll, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *encoder, *edp_encoder = NULL;
- const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
u32 temp;
@@ -4707,16 +4765,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- refclk = ironlake_get_refclk(crtc);
-
- /*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
- limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
- &clock);
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock, &reduced_clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
@@ -4725,24 +4775,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
- if (is_lvds && dev_priv->lvds_downclock_avail) {
- /*
- * Ensure we match the reduced clock's P to the target clock.
- * If the clocks don't match, we can't switch the display clock
- * by using the FP0/FP1. In such case we will disable the LVDS
- * downclock feature.
- */
- has_reduced_clock = limit->find_pll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk,
- &clock,
- &reduced_clock);
- }
-
- if (is_sdvo && is_tv)
- i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
-
-
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
@@ -4770,32 +4802,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
target_clock = adjusted_mode->clock;
/* determine panel color depth */
- temp = I915_READ(PIPECONF(pipe));
- temp &= ~PIPE_BPC_MASK;
dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode);
- switch (pipe_bpp) {
- case 18:
- temp |= PIPE_6BPC;
- break;
- case 24:
- temp |= PIPE_8BPC;
- break;
- case 30:
- temp |= PIPE_10BPC;
- break;
- case 36:
- temp |= PIPE_12BPC;
- break;
- default:
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
+ pipe_bpp != 36) {
WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
- pipe_bpp);
- temp |= PIPE_8BPC;
+ pipe_bpp);
pipe_bpp = 24;
- break;
}
-
intel_crtc->bpp = pipe_bpp;
- I915_WRITE(PIPECONF(pipe), temp);
if (!lane) {
/*
@@ -4879,12 +4896,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- /* setup pipeconf */
- pipeconf = I915_READ(PIPECONF(pipe));
-
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
-
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
@@ -4944,12 +4955,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PCH_LVDS, temp);
}
- pipeconf &= ~PIPECONF_DITHER_EN;
- pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
- if ((is_lvds && dev_priv->lvds_dither) || dither) {
- pipeconf |= PIPECONF_DITHER_EN;
- pipeconf |= PIPECONF_DITHER_TYPE_SP;
- }
if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
@@ -4985,9 +4990,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
- pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- pipeconf |= PIPECONF_INTERLACED_ILK;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_end -= 1;
@@ -4995,7 +4998,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal/2);
} else {
- pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(VSYNCSHIFT(pipe), 0);
}
@@ -5033,12 +5035,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- I915_WRITE(PIPECONF(pipe), pipeconf);
- POSTING_READ(PIPECONF(pipe));
+ ironlake_set_pipeconf(crtc, adjusted_mode, dither);
intel_wait_for_vblank(dev, pipe);
- I915_WRITE(DSPCNTR(plane), dspcntr);
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
ret = intel_pipe_set_base(crtc, x, y, fb);
@@ -6866,7 +6868,13 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
intel_crtc = to_intel_crtc(connector->encoder->crtc);
if (prepare_pipes & (1 << intel_crtc->pipe)) {
+ struct drm_property *dpms_property =
+ dev->mode_config.dpms_property;
+
connector->dpms = DRM_MODE_DPMS_ON;
+ drm_connector_property_set_value(connector,
+ dpms_property,
+ DRM_MODE_DPMS_ON);
intel_encoder = to_intel_encoder(connector->encoder);
intel_encoder->connectors_active = true;
@@ -7296,7 +7304,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
struct drm_mode_set save_set;
struct intel_set_config *config;
int ret;
- int i;
BUG_ON(!set);
BUG_ON(!set->crtc);
@@ -7360,15 +7367,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
ret = -EINVAL;
goto fail;
}
-
- if (set->crtc->enabled) {
- DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
- for (i = 0; i < set->num_connectors; i++) {
- DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
- drm_get_connector_name(set->connectors[i]));
- set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
- }
- }
} else if (config->fb_changed) {
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index c59710db653..1474f84fdbd 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -808,9 +808,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
}
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder);
-static void ironlake_edp_pll_off(struct drm_encoder *encoder);
-
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -821,14 +818,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* Turn on the eDP PLL if needed */
- if (is_edp(intel_dp)) {
- if (!is_pch_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
- else
- ironlake_edp_pll_off(encoder);
- }
-
/*
* There are four kinds of DP registers:
*
@@ -898,7 +887,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= intel_crtc->pipe << 29;
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -920,7 +908,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -1191,27 +1178,49 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
msleep(intel_dp->backlight_off_delay);
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
+ WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We don't adjust intel_dp->DP while tearing down the link, to
+ * facilitate link retraining (e.g. after hotplug). Hence clear all
+ * enable bits here to ensure that we don't enable too much. */
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(200);
}
-static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
dpa_ctl = I915_READ(DP_A);
+ WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+ "dp pll off, should be on\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We can't rely on the value tracked for the DP register in
+ * intel_dp->DP because link_down must not change that (otherwise link
+ * re-training will fail. */
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
POSTING_READ(DP_A);
@@ -1306,7 +1315,20 @@ static void intel_disable_dp(struct intel_encoder *encoder)
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
ironlake_edp_panel_off(intel_dp);
- intel_dp_link_down(intel_dp);
+
+ /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+ if (!is_cpu_edp(intel_dp))
+ intel_dp_link_down(intel_dp);
+}
+
+static void intel_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (is_cpu_edp(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_pll_off(intel_dp);
+ }
}
static void intel_enable_dp(struct intel_encoder *encoder)
@@ -1316,51 +1338,24 @@ static void intel_enable_dp(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ if (WARN_ON(dp_reg & DP_PORT_EN))
+ return;
+
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_start_link_train(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
- intel_dp_complete_link_train(intel_dp);
- } else
- ironlake_edp_panel_vdd_off(intel_dp, false);
+ intel_dp_start_link_train(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ intel_dp_complete_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
}
-static void
-intel_dp_dpms(struct drm_connector *connector, int mode)
+static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
-
- /* DP supports only 2 dpms states. */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == connector->dpms)
- return;
-
- connector->dpms = mode;
-
- /* Only need to change hw state when actually enabled */
- if (!intel_dp->base.base.crtc) {
- intel_dp->base.connectors_active = false;
- return;
- }
-
- if (mode != DRM_MODE_DPMS_ON) {
- intel_encoder_dpms(&intel_dp->base, mode);
-
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_off(&intel_dp->base.base);
- } else {
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_on(&intel_dp->base.base);
-
- intel_encoder_dpms(&intel_dp->base, mode);
- }
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_modeset_check_state(connector->dev);
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_on(intel_dp);
}
/*
@@ -1743,25 +1738,12 @@ static void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
- /*
- * On CPT we have to enable the port in training pattern 1, which
- * will happen below in intel_dp_set_link_train. Otherwise, enable
- * the port and wait for it to become active.
- */
- if (!HAS_PCH_CPT(dev)) {
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
-
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
@@ -1923,18 +1905,11 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
- if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
DRM_DEBUG_KMS("\n");
- if (is_edp(intel_dp)) {
- DP &= ~DP_PLL_ENABLE;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
- udelay(100);
- }
-
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
@@ -1946,13 +1921,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
- if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- DP |= DP_LINK_TRAIN_OFF_CPT;
- else
- DP |= DP_LINK_TRAIN_OFF;
- }
-
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
@@ -2391,7 +2359,7 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
- .dpms = intel_dp_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
@@ -2478,6 +2446,8 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_dp->output_reg = output_reg;
intel_dp->port = port;
+ /* Preserve the current hw state. */
+ intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
@@ -2522,7 +2492,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
drm_sysfs_connector_add(connector);
intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -2548,14 +2520,10 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
break;
}
- intel_dp_i2c_init(intel_dp, intel_connector, name);
-
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
- bool ret;
struct edp_power_seq cur, vbt;
u32 pp_on, pp_off, pp_div;
- struct edid *edid;
pp_on = I915_READ(PCH_PP_ON_DELAYS);
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
@@ -2606,6 +2574,13 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+ }
+
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
+
+ if (is_edp(intel_dp)) {
+ bool ret;
+ struct edid *edid;
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_get_dpcd(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4f2b2d6a248..351fd7179cd 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -152,8 +152,10 @@ struct intel_encoder {
bool cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
+ void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
void (*disable)(struct intel_encoder *);
+ void (*post_disable)(struct intel_encoder *);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 5d02aad0de8..08f2b63d740 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -151,6 +151,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VIDEO_DIP_DATA, 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -186,6 +189,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -224,6 +230,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -259,6 +268,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -292,6 +304,9 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(data_reg + i, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(data_reg + i, 0);
mmiowb();
val |= hsw_infoframe_enable(frame);
@@ -377,6 +392,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
port = VIDEO_DIP_PORT_C;
break;
default:
+ BUG();
return;
}
@@ -435,6 +451,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
port = VIDEO_DIP_PORT_D;
break;
default:
+ BUG();
return;
}
@@ -674,10 +691,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
- u32 enable_bits = SDVO_ENABLE;
-
- if (intel_hdmi->has_audio)
- enable_bits |= SDVO_AUDIO_ENABLE;
+ u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->sdvox_reg);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index dc9f9ff34d8..40d72bd64e1 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -770,6 +770,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Gigabyte GA-D525TUD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 18bd0af855d..e27c1701262 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -427,6 +427,25 @@ blind_set:
goto end;
}
+static void intel_setup_cadls(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ int i = 0;
+ u32 disp_id;
+
+ /* Initialize the CADL field by duplicating the DIDL values.
+ * Technically, this is not always correct as display outputs may exist,
+ * but not active. This initialization is necessary for some Clevo
+ * laptops that check this field before processing the brightness and
+ * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
+ * there are less than eight devices. */
+ do {
+ disp_id = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(disp_id, &opregion->acpi->cadl[i]);
+ } while (++i < 8 && disp_id != 0);
+}
+
void intel_opregion_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -436,8 +455,10 @@ void intel_opregion_init(struct drm_device *dev)
return;
if (opregion->acpi) {
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_didl_outputs(dev);
+ intel_setup_cadls(dev);
+ }
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 3df4f5fa892..e019b236986 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -162,19 +162,12 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
return val;
}
-u32 intel_panel_get_max_backlight(struct drm_device *dev)
+static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
max = i915_read_blc_pwm_ctl(dev_priv);
- if (max == 0) {
- /* XXX add code here to query mode clock or hardware clock
- * and program max PWM appropriately.
- */
- pr_warn_once("fixme: max PWM is zero\n");
- return 1;
- }
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
@@ -188,6 +181,22 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
max *= 0xff;
}
+ return max;
+}
+
+u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+ u32 max;
+
+ max = _intel_panel_get_max_backlight(dev);
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+ pr_warn_once("fixme: max PWM is zero\n");
+ return 1;
+ }
+
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
return max;
}
@@ -424,7 +433,11 @@ int intel_panel_setup_backlight(struct drm_device *dev)
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
- props.max_brightness = intel_panel_get_max_backlight(dev);
+ props.max_brightness = _intel_panel_get_max_backlight(dev);
+ if (props.max_brightness == 0) {
+ DRM_ERROR("Failed to get maximum backlight value\n");
+ return -ENODEV;
+ }
dev_priv->backlight =
backlight_device_register("intel_backlight",
&connector->kdev, dev,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 36c64091bc9..d69f8f49beb 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2324,6 +2324,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
u32 limits = gen6_rps_limits(dev_priv, &val);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(val > dev_priv->rps.max_delay);
+ WARN_ON(val < dev_priv->rps.min_delay);
if (val == dev_priv->rps.cur_delay)
return;
@@ -2338,6 +2340,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+ POSTING_READ(GEN6_RPNSWREQ);
+
dev_priv->rps.cur_delay = val;
trace_intel_gpu_freq_change(val * 50);
@@ -2730,7 +2734,7 @@ static const struct cparams {
{ 0, 800, 231, 23784 },
};
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
{
u64 total_count, diff, ret;
u32 count1, count2, count3, m = 0, c = 0;
@@ -2784,6 +2788,22 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
return ret;
}
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_chipset_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
+
unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
{
unsigned long m, x, b;
@@ -2987,7 +3007,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
spin_unlock_irq(&mchdev_lock);
}
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long t, corr, state1, corr2, state2;
u32 pxvid, ext_v;
@@ -3024,6 +3044,22 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
return dev_priv->ips.gfx_power + state2;
}
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_gfx_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
+
/**
* i915_read_mch_val - return value for IPS use
*
@@ -3040,8 +3076,8 @@ unsigned long i915_read_mch_val(void)
goto out_unlock;
dev_priv = i915_mch_dev;
- chipset_val = i915_chipset_val(dev_priv);
- graphics_val = i915_gfx_val(dev_priv);
+ chipset_val = __i915_chipset_val(dev_priv);
+ graphics_val = __i915_gfx_val(dev_priv);
ret = chipset_val + graphics_val;
@@ -3723,6 +3759,9 @@ static void gen3_init_clock_gating(struct drm_device *dev)
if (IS_PINEVIEW(dev))
I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+
+ /* IIR "flip pending" means done if this bit is set */
+ I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
}
static void i85x_init_clock_gating(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 55cdb4d30a1..984a0c5fbf5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -464,7 +464,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
goto err_unref;
pc->gtt_offset = obj->gtt_offset;
- pc->cpu_page = kmap(obj->pages[0]);
+ pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL)
goto err_unpin;
@@ -491,7 +491,8 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
return;
obj = pc->obj;
- kunmap(obj->pages[0]);
+
+ kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -1026,7 +1027,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
if (obj == NULL)
return;
- kunmap(obj->pages[0]);
+ kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
@@ -1053,7 +1054,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
}
ring->status_page.gfx_addr = obj->gtt_offset;
- ring->status_page.page_addr = kmap(obj->pages[0]);
+ ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index cc8df4de2d9..7644f31a377 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -60,11 +60,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
- sprctl |= SPRITE_FORMAT_RGBX888;
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
- sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
case DRM_FORMAT_YUYV: