summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-10-02 09:19:32 +1000
committerDave Airlie <airlied@redhat.com>2012-10-02 09:19:32 +1000
commit8c98449ad316ba95a8f0a3ee3eaeb03dcd7f9ccc (patch)
tree575c51d772059012eeffc301597589ed7c391321 /drivers/gpu/drm
parent7facf16690dc4160e5ff605271704183ff56b2d9 (diff)
parentf531dcb23f9a5c6ad77e451459df965dc9a0c0c8 (diff)
Merge branch 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Last pile of stuff for 3.7, essentially just a bunch of bigger fixes and a few less intrusive features: - cpu freq interface in sysfs from Ben - cpu edp fixes and some related cleanups - write-combining ptes for pre-gen6 (Chris) - basic CADL support (Peter Wu), this fixes quite a few issues with backlights ... - rework of the gem backing pages handling (preps for stolen mem handling) from Chris - some more cleanup-fallout from the modeset-rework On top of that I've done a backmerge of -rc7(since the conflicts got too messy and I've pushed out broken merged trees too often). I've also included 3 fixes on top of what QA beat on: - Fix for a infoframe handling regression in 3.5 - infoframe blows up too often and 3.6 is pretty much done, so I'd like to merge that through -next and the stable process and give it more exposure before it lands in a stable tree. - ioctl cosmetics^Wspelling fix in the structs (userspace won't be affected, since all existing userspace uses private copies of the ioctl struct definitions, and the struct layout itself is abi compatible). - Bugfix for a regression introduced in this pull's testing cycle. * 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel: (695 commits) drm/i915: Wrap external callers to IPS state with appropriate locks drm/i915: s/cacheing/caching/ drm/i915: make sure we write all the DIP data bytes drm/i915: BUG() on unexpected HDMI register ...
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c3
-rw-r--r--drivers/gpu/drm/drm_cache.c25
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c52
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c3
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c21
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h66
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c264
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c151
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c125
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c36
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c203
-rw-r--r--drivers/gpu/drm/i915/intel_display.c300
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c157
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c23
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c31
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c47
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c36
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c29
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c140
-rw-r--r--drivers/gpu/drm/radeon/r100.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c30
-rw-r--r--drivers/gpu/drm/radeon/r600d.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c8
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_drv.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c73
66 files changed, 1305 insertions, 776 deletions
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index d0c4574ef49..36164806b9d 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -193,6 +193,9 @@ static const struct file_operations ast_fops = {
.mmap = ast_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.read = drm_read,
};
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 866e9d48b2d..f3b2a7cce74 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -842,7 +842,7 @@ int ast_cursor_init(struct drm_device *dev)
ast->cursor_cache = obj;
ast->cursor_cache_gpu_addr = gpu_addr;
- DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
+ DRM_DEBUG_KMS("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
return 0;
fail:
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 7053140c659..b83a2d7ddd1 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -74,6 +74,9 @@ static const struct file_operations cirrus_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = cirrus_mmap,
.poll = drm_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.fasync = drm_fasync,
};
static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 3dbc7f17eb1..4a4274b348b 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -101,6 +101,31 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
EXPORT_SYMBOL(drm_clflush_pages);
void
+drm_clflush_sg(struct sg_table *st)
+{
+#if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+ struct scatterlist *sg;
+ int i;
+
+ mb();
+ for_each_sg(st->sgl, sg, st->nents, i)
+ drm_clflush_page(sg_page(sg));
+ mb();
+
+ return;
+ }
+
+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+ printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+ WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_sg);
+
+void
drm_clflush_virt_range(char *addr, unsigned long length)
{
#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 28d0900d7f8..c317f721a8d 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- if (!req->flags)
+ if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index de2a0ed8e94..289c9b18c0d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -87,6 +87,9 @@ static struct edid_quirk {
int product_id;
u32 quirks;
} edid_quirk_list[] = {
+ /* ASUS VW222S */
+ { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
/* Acer AL1706 */
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 7f5096763b7..59a26e577b5 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -36,6 +36,6 @@ config DRM_EXYNOS_VIDI
config DRM_EXYNOS_G2D
bool "Exynos DRM G2D"
- depends on DRM_EXYNOS
+ depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
help
Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 613bf8a5d9b..ae13febe0ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -163,6 +163,12 @@ static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
/* TODO */
}
+static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ return -ENOTTY;
+}
+
static struct dma_buf_ops exynos_dmabuf_ops = {
.map_dma_buf = exynos_gem_map_dma_buf,
.unmap_dma_buf = exynos_gem_unmap_dma_buf,
@@ -170,6 +176,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
.kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
.kunmap = exynos_gem_dmabuf_kunmap,
.kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
+ .mmap = exynos_gem_dmabuf_mmap,
.release = exynos_dmabuf_release,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ebacec6f1e4..d0707193745 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -160,7 +160,6 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
if (!file_priv)
return -ENOMEM;
- drm_prime_init_file_private(&file->prime);
file->driver_priv = file_priv;
return exynos_drm_subdrv_open(dev, file);
@@ -184,7 +183,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
e->base.destroy(&e->base);
}
}
- drm_prime_destroy_file_private(&file->prime);
spin_unlock_irqrestore(&dev->event_lock, flags);
exynos_drm_subdrv_close(dev, file);
@@ -241,6 +239,9 @@ static const struct file_operations exynos_drm_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.release = drm_release,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a68d2b313f0..b19cd93e704 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -831,11 +831,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to find registers\n");
- ret = -ENOENT;
- goto err_clk;
- }
ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!ctx->regs) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index d2d88f22a03..1065e90d091 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -129,7 +129,6 @@ struct g2d_runqueue_node {
struct g2d_data {
struct device *dev;
struct clk *gate_clk;
- struct resource *regs_res;
void __iomem *regs;
int irq;
struct workqueue_struct *g2d_workq;
@@ -751,7 +750,7 @@ static int __devinit g2d_probe(struct platform_device *pdev)
struct exynos_drm_subdrv *subdrv;
int ret;
- g2d = kzalloc(sizeof(*g2d), GFP_KERNEL);
+ g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL);
if (!g2d) {
dev_err(dev, "failed to allocate driver data\n");
return -ENOMEM;
@@ -759,10 +758,8 @@ static int __devinit g2d_probe(struct platform_device *pdev)
g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
sizeof(struct g2d_runqueue_node), 0, 0, NULL);
- if (!g2d->runqueue_slab) {
- ret = -ENOMEM;
- goto err_free_mem;
- }
+ if (!g2d->runqueue_slab)
+ return -ENOMEM;
g2d->dev = dev;
@@ -794,38 +791,26 @@ static int __devinit g2d_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O memory\n");
- ret = -ENOENT;
- goto err_put_clk;
- }
- g2d->regs_res = request_mem_region(res->start, resource_size(res),
- dev_name(dev));
- if (!g2d->regs_res) {
- dev_err(dev, "failed to request I/O memory\n");
- ret = -ENOENT;
- goto err_put_clk;
- }
-
- g2d->regs = ioremap(res->start, resource_size(res));
+ g2d->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!g2d->regs) {
dev_err(dev, "failed to remap I/O memory\n");
ret = -ENXIO;
- goto err_release_res;
+ goto err_put_clk;
}
g2d->irq = platform_get_irq(pdev, 0);
if (g2d->irq < 0) {
dev_err(dev, "failed to get irq\n");
ret = g2d->irq;
- goto err_unmap_base;
+ goto err_put_clk;
}
- ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d);
+ ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0,
+ "drm_g2d", g2d);
if (ret < 0) {
dev_err(dev, "irq request failed\n");
- goto err_unmap_base;
+ goto err_put_clk;
}
platform_set_drvdata(pdev, g2d);
@@ -838,7 +823,7 @@ static int __devinit g2d_probe(struct platform_device *pdev)
ret = exynos_drm_subdrv_register(subdrv);
if (ret < 0) {
dev_err(dev, "failed to register drm g2d device\n");
- goto err_free_irq;
+ goto err_put_clk;
}
dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
@@ -846,13 +831,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
return 0;
-err_free_irq:
- free_irq(g2d->irq, g2d);
-err_unmap_base:
- iounmap(g2d->regs);
-err_release_res:
- release_resource(g2d->regs_res);
- kfree(g2d->regs_res);
err_put_clk:
pm_runtime_disable(dev);
clk_put(g2d->gate_clk);
@@ -862,8 +840,6 @@ err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
kmem_cache_destroy(g2d->runqueue_slab);
-err_free_mem:
- kfree(g2d);
return ret;
}
@@ -873,24 +849,18 @@ static int __devexit g2d_remove(struct platform_device *pdev)
cancel_work_sync(&g2d->runqueue_work);
exynos_drm_subdrv_unregister(&g2d->subdrv);
- free_irq(g2d->irq, g2d);
while (g2d->runqueue_node) {
g2d_free_runqueue_node(g2d, g2d->runqueue_node);
g2d->runqueue_node = g2d_get_runqueue_node(g2d);
}
- iounmap(g2d->regs);
- release_resource(g2d->regs_res);
- kfree(g2d->regs_res);
-
pm_runtime_disable(&pdev->dev);
clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq);
kmem_cache_destroy(g2d->runqueue_slab);
- kfree(g2d);
return 0;
}
@@ -924,7 +894,7 @@ static int g2d_resume(struct device *dev)
}
#endif
-SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f9efde40c09..a38051c95ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -122,7 +122,7 @@ fail:
__free_page(pages[i]);
drm_free_large(pages);
- return ERR_PTR(PTR_ERR(p));
+ return ERR_CAST(p);
}
static void exynos_gem_put_pages(struct drm_gem_object *obj,
@@ -662,7 +662,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
*/
args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = PAGE_ALIGN(args->pitch * args->height);
+ args->size = args->pitch * args->height;
exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
if (IS_ERR(exynos_gem_obj))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 8ffcdf8b9e2..3fdf0b65f47 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -345,7 +345,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
DRM_DEBUG_KMS("%s\n", __FILE__);
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
DRM_LOG_KMS("failed to alloc common hdmi context.\n");
return -ENOMEM;
@@ -371,7 +371,6 @@ static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_drm_subdrv_unregister(&ctx->subdrv);
- kfree(ctx);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index b89829e5043..e1f94b746bd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -29,7 +29,6 @@ static const uint32_t formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_NV12,
- DRM_FORMAT_NV12M,
DRM_FORMAT_NV12MT,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 92395258d64..e364165f1a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -620,7 +620,7 @@ static int __devinit vidi_probe(struct platform_device *pdev)
DRM_DEBUG_KMS("%s\n", __FILE__);
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -660,8 +660,6 @@ static int __devexit vidi_remove(struct platform_device *pdev)
ctx->raw_edid = NULL;
}
- kfree(ctx);
-
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 409e2ec1207..a6aea6f3ea1 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2172,7 +2172,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
DRM_DEBUG_KMS("HDMI resource init\n");
- memset(res, 0, sizeof *res);
+ memset(res, 0, sizeof(*res));
/* get clocks, power */
res->hdmi = clk_get(dev, "hdmi");
@@ -2204,7 +2204,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
- sizeof res->regul_bulk[0], GFP_KERNEL);
+ sizeof(res->regul_bulk[0]), GFP_KERNEL);
if (!res->regul_bulk) {
DRM_ERROR("failed to get memory for regulators\n");
goto fail;
@@ -2243,7 +2243,7 @@ static int hdmi_resources_cleanup(struct hdmi_context *hdata)
clk_put(res->sclk_hdmi);
if (!IS_ERR_OR_NULL(res->hdmi))
clk_put(res->hdmi);
- memset(res, 0, sizeof *res);
+ memset(res, 0, sizeof(*res));
return 0;
}
@@ -2312,11 +2312,6 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("failed to find registers\n");
- ret = -ENOENT;
- goto err_resource;
- }
hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!hdata->regs) {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 30fcc12f81d..25b97d5e5fc 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -236,11 +236,11 @@ static inline void vp_filter_set(struct mixer_resources *res,
static void vp_default_filter(struct mixer_resources *res)
{
vp_filter_set(res, VP_POLY8_Y0_LL,
- filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+ filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
vp_filter_set(res, VP_POLY4_Y0_LL,
- filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+ filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
vp_filter_set(res, VP_POLY4_C0_LL,
- filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+ filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
}
static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 0f9b7db80f6..cf49ba5a54b 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -476,6 +476,7 @@ static const struct psb_offset oaktrail_regmap[2] = {
.pos = DSPAPOS,
.surf = DSPASURF,
.addr = MRST_DSPABASE,
+ .base = MRST_DSPABASE,
.status = PIPEASTAT,
.linoff = DSPALINOFF,
.tileoff = DSPATILEOFF,
@@ -499,6 +500,7 @@ static const struct psb_offset oaktrail_regmap[2] = {
.pos = DSPBPOS,
.surf = DSPBSURF,
.addr = DSPBBASE,
+ .base = DSPBBASE,
.status = PIPEBSTAT,
.linoff = DSPBLINOFF,
.tileoff = DSPBTILEOFF,
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 30dc22a7156..8033526bb53 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -1362,6 +1362,9 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
(struct drm_connector **) (psb_intel_crtc + 1);
psb_intel_crtc->mode_set.num_connectors = 0;
psb_intel_cursor_init(dev, psb_intel_crtc);
+
+ /* Set to true so that the pipe is forced off on initial config. */
+ psb_intel_crtc->active = true;
}
int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 57d892eaaa6..463ec6871fe 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -115,6 +115,9 @@ static const struct file_operations i810_buffer_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index f9924ad04d0..48cfcca2b35 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -51,6 +51,9 @@ static const struct file_operations i810_driver_fops = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 274a3280cdc..7a4b3f3aad3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -914,7 +914,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
- GEN6_CAGF_SHIFT) * 50);
+ GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -930,15 +930,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = (rp_state_cap & 0xff00) >> 8;
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
max_freq = rp_state_cap & 0xff;
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- max_freq * 50);
+ max_freq * GT_FREQUENCY_MULTIPLIER);
} else {
seq_printf(m, "no P-state info available\n");
}
@@ -1292,7 +1292,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
continue;
}
ia_freq = I915_READ(GEN6_PCODE_DATA);
- seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+ seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
}
mutex_unlock(&dev->struct_mutex);
@@ -1717,7 +1717,7 @@ i915_max_freq_read(struct file *filp,
return ret;
len = snprintf(buf, sizeof(buf),
- "max freq: %d\n", dev_priv->rps.max_delay * 50);
+ "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
@@ -1760,9 +1760,9 @@ i915_max_freq_write(struct file *filp,
/*
* Turbo will still be enabled, but won't go above the set value.
*/
- dev_priv->rps.max_delay = val / 50;
+ dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
- gen6_set_rps(dev, val / 50);
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
return cnt;
@@ -1793,7 +1793,7 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
return ret;
len = snprintf(buf, sizeof(buf),
- "min freq: %d\n", dev_priv->rps.min_delay * 50);
+ "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
@@ -1834,9 +1834,9 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
/*
* Turbo will still be enabled, but won't go below the set value.
*/
- dev_priv->rps.min_delay = val / 50;
+ dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
- gen6_set_rps(dev, val / 50);
+ gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
return cnt;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2c09900e326..ffbc9156c79 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1461,7 +1461,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
struct intel_device_info *info;
- int ret = 0, mmio_bar;
+ int ret = 0, mmio_bar, mmio_size;
uint32_t aperture_size;
info = (struct intel_device_info *) flags;
@@ -1526,7 +1526,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
mmio_bar = IS_GEN2(dev) ? 1 : 0;
- dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
+ /* Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (info->gen < 5)
+ mmio_size = 512*1024;
+ else
+ mmio_size = 2*1024*1024;
+
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
@@ -1607,6 +1619,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps.lock);
+ spin_lock_init(&dev_priv->dpio_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
@@ -1855,8 +1868,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHEING, i915_gem_set_cacheing_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHEING, i915_gem_get_cacheing_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 26c6959a524..4f2831aa5fe 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -899,9 +899,29 @@ enum i915_cache_level {
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
+struct drm_i915_gem_object_ops {
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ int (*get_pages)(struct drm_i915_gem_object *);
+ void (*put_pages)(struct drm_i915_gem_object *);
+};
+
struct drm_i915_gem_object {
struct drm_gem_object base;
+ const struct drm_i915_gem_object_ops *ops;
+
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
struct list_head gtt_list;
@@ -986,17 +1006,12 @@ struct drm_i915_gem_object {
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
+ unsigned int has_dma_mapping:1;
- struct page **pages;
-
- /**
- * DMAR support
- */
- struct scatterlist *sg_list;
- int num_sg;
+ struct sg_table *pages;
+ int pages_pin_count;
/* prime dma-buf support */
- struct sg_table *sg_table;
void *dma_buf_vmapping;
int vmapping_count;
@@ -1163,6 +1178,8 @@ struct drm_i915_file_private {
#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define GT_FREQUENCY_MULTIPLIER 50
+
#include "i915_trace.h"
/**
@@ -1284,10 +1301,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1306,7 +1323,8 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-void i915_gem_object_init(struct drm_i915_gem_object *obj);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
@@ -1319,7 +1337,27 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
-int __must_check i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+ struct scatterlist *sg = obj->pages->sgl;
+ while (n >= SG_MAX_SINGLE_ALLOC) {
+ sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
+ n -= SG_MAX_SINGLE_ALLOC - 1;
+ }
+ return sg_page(sg+n);
+}
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages == NULL);
+ obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(obj->pages_pin_count == 0);
+ obj->pages_pin_count--;
+}
+
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 87a64e5f28f..365a7dc8a4a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -343,7 +343,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap_atomic(vaddr);
- return ret;
+ return ret ? -EFAULT : 0;
}
static void
@@ -394,7 +394,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap(page);
- return ret;
+ return ret ? - EFAULT : 0;
}
static int
@@ -403,7 +403,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
struct drm_i915_gem_pread *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
char __user *user_data;
ssize_t remain;
loff_t offset;
@@ -412,7 +411,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
- int release_page;
+ struct scatterlist *sg;
+ int i;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -433,11 +433,23 @@ i915_gem_shmem_pread(struct drm_device *dev,
}
}
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
offset = args->offset;
- while (remain > 0) {
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
+ if (i < offset >> PAGE_SHIFT)
+ continue;
+
+ if (remain <= 0)
+ break;
+
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
@@ -448,18 +460,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
-
+ page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -470,7 +471,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
goto next_page;
hit_slowpath = 1;
- page_cache_get(page);
mutex_unlock(&dev->struct_mutex);
if (!prefaulted) {
@@ -488,16 +488,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
needs_clflush);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
+
next_page:
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
+ if (ret)
goto out;
- }
remain -= page_length;
user_data += page_length;
@@ -505,6 +501,8 @@ next_page:
}
out:
+ i915_gem_object_unpin_pages(obj);
+
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
@@ -690,7 +688,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
page_length);
kunmap_atomic(vaddr);
- return ret;
+ return ret ? -EFAULT : 0;
}
/* Only difference to the fast-path function is that this can handle bit17
@@ -724,7 +722,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
page_do_bit17_swizzling);
kunmap(page);
- return ret;
+ return ret ? -EFAULT : 0;
}
static int
@@ -733,7 +731,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
@@ -742,7 +739,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int hit_slowpath = 0;
int needs_clflush_after = 0;
int needs_clflush_before = 0;
- int release_page;
+ int i;
+ struct scatterlist *sg;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -768,13 +766,25 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
&& obj->cache_level == I915_CACHE_NONE)
needs_clflush_before = 1;
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
+
offset = args->offset;
obj->dirty = 1;
- while (remain > 0) {
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
int partial_cacheline_write;
+ if (i < offset >> PAGE_SHIFT)
+ continue;
+
+ if (remain <= 0)
+ break;
+
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
@@ -793,18 +803,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
- if (obj->pages) {
- page = obj->pages[offset >> PAGE_SHIFT];
- release_page = 0;
- } else {
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
- release_page = 1;
- }
-
+ page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
@@ -816,26 +815,20 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
goto next_page;
hit_slowpath = 1;
- page_cache_get(page);
mutex_unlock(&dev->struct_mutex);
-
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
mutex_lock(&dev->struct_mutex);
- page_cache_release(page);
+
next_page:
set_page_dirty(page);
mark_page_accessed(page);
- if (release_page)
- page_cache_release(page);
- if (ret) {
- ret = -EFAULT;
+ if (ret)
goto out;
- }
remain -= page_length;
user_data += page_length;
@@ -843,6 +836,8 @@ next_page:
}
out:
+ i915_gem_object_unpin_pages(obj);
+
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
@@ -1650,18 +1645,13 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
return obj->madv == I915_MADV_DONTNEED;
}
-static int
+static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
int page_count = obj->base.size / PAGE_SIZE;
+ struct scatterlist *sg;
int ret, i;
- BUG_ON(obj->gtt_space);
-
- if (obj->pages == NULL)
- return 0;
-
- BUG_ON(obj->gtt_space);
BUG_ON(obj->madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
@@ -1680,22 +1670,40 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
- for (i = 0; i < page_count; i++) {
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+
if (obj->dirty)
- set_page_dirty(obj->pages[i]);
+ set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
- mark_page_accessed(obj->pages[i]);
+ mark_page_accessed(page);
- page_cache_release(obj->pages[i]);
+ page_cache_release(page);
}
obj->dirty = 0;
- drm_free_large(obj->pages);
+ sg_free_table(obj->pages);
+ kfree(obj->pages);
+}
+
+static int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+ if (obj->pages == NULL)
+ return 0;
+
+ BUG_ON(obj->gtt_space);
+
+ if (obj->pages_pin_count)
+ return -EBUSY;
+
+ ops->put_pages(obj);
obj->pages = NULL;
list_del(&obj->gtt_list);
-
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
@@ -1712,7 +1720,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
&dev_priv->mm.unbound_list,
gtt_list) {
if (i915_gem_object_is_purgeable(obj) &&
- i915_gem_object_put_pages_gtt(obj) == 0) {
+ i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
return count;
@@ -1724,7 +1732,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
mm_list) {
if (i915_gem_object_is_purgeable(obj) &&
i915_gem_object_unbind(obj) == 0 &&
- i915_gem_object_put_pages_gtt(obj) == 0) {
+ i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
return count;
@@ -1742,21 +1750,20 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
i915_gem_evict_everything(dev_priv->dev);
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
- i915_gem_object_put_pages_gtt(obj);
+ i915_gem_object_put_pages(obj);
}
-int
+static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int page_count, i;
struct address_space *mapping;
+ struct sg_table *st;
+ struct scatterlist *sg;
struct page *page;
gfp_t gfp;
- if (obj->pages || obj->sg_table)
- return 0;
-
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
@@ -1764,20 +1771,27 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- /* Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- */
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
page_count = obj->base.size / PAGE_SIZE;
- obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
- if (obj->pages == NULL)
+ if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+ sg_free_table(st);
+ kfree(st);
return -ENOMEM;
+ }
- /* Fail silently without starting the shrinker */
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ *
+ * Fail silently without starting the shrinker
+ */
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
gfp = mapping_gfp_mask(mapping);
gfp |= __GFP_NORETRY | __GFP_NOWARN;
gfp &= ~(__GFP_IO | __GFP_WAIT);
- for (i = 0; i < page_count; i++) {
+ for_each_sg(st->sgl, sg, page_count, i) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) {
i915_gem_purge(dev_priv, page_count);
@@ -1800,24 +1814,50 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp &= ~(__GFP_IO | __GFP_WAIT);
}
- obj->pages[i] = page;
+ sg_set_page(sg, page, PAGE_SIZE, 0);
}
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
- list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+ obj->pages = st;
return 0;
err_pages:
- while (i--)
- page_cache_release(obj->pages[i]);
-
- drm_free_large(obj->pages);
- obj->pages = NULL;
+ for_each_sg(st->sgl, sg, i, page_count)
+ page_cache_release(sg_page(sg));
+ sg_free_table(st);
+ kfree(st);
return PTR_ERR(page);
}
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ const struct drm_i915_gem_object_ops *ops = obj->ops;
+ int ret;
+
+ if (obj->pages)
+ return 0;
+
+ BUG_ON(obj->pages_pin_count);
+
+ ret = ops->get_pages(obj);
+ if (ret)
+ return ret;
+
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+ return 0;
+}
+
void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
@@ -2071,7 +2111,6 @@ void i915_gem_reset(struct drm_device *dev)
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
}
-
/* The fence registers are invalidated so clear them out */
i915_gem_reset_fences(dev);
}
@@ -2871,7 +2910,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return -E2BIG;
}
- ret = i915_gem_object_get_pages_gtt(obj);
+ ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
@@ -2971,7 +3010,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
trace_i915_gem_object_clflush(obj);
- drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
+ drm_clflush_sg(obj->pages);
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3146,10 +3185,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return 0;
}
-int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
- struct drm_i915_gem_cacheing *args = data;
+ struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -3163,7 +3202,7 @@ int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
- args->cacheing = obj->cache_level != I915_CACHE_NONE;
+ args->caching = obj->cache_level != I915_CACHE_NONE;
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3171,10 +3210,10 @@ unlock:
return ret;
}
-int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
- struct drm_i915_gem_cacheing *args = data;
+ struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
int ret;
@@ -3183,11 +3222,11 @@ int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- switch (args->cacheing) {
- case I915_CACHEING_NONE:
+ switch (args->caching) {
+ case I915_CACHING_NONE:
level = I915_CACHE_NONE;
break;
- case I915_CACHEING_CACHED:
+ case I915_CACHING_CACHED:
level = I915_CACHE_LLC;
break;
default:
@@ -3386,7 +3425,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
{
int ret;
- BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+ if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+ return -EBUSY;
if (obj->gtt_space != NULL) {
if ((alignment && obj->gtt_offset & (alignment - 1)) ||
@@ -3610,15 +3650,16 @@ unlock:
return ret;
}
-void i915_gem_object_init(struct drm_i915_gem_object *obj)
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops)
{
- obj->base.driver_private = NULL;
-
INIT_LIST_HEAD(&obj->mm_list);
INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
+ obj->ops = ops;
+
obj->fence_reg = I915_FENCE_REG_NONE;
obj->madv = I915_MADV_WILLNEED;
/* Avoid an unnecessary call to unbind on the first bind. */
@@ -3627,6 +3668,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj)
i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
}
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+ .get_pages = i915_gem_object_get_pages_gtt,
+ .put_pages = i915_gem_object_put_pages_gtt,
+};
+
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
@@ -3653,7 +3699,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
mapping_set_gfp_mask(mapping, mask);
- i915_gem_object_init(obj);
+ i915_gem_object_init(obj, &i915_gem_object_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3693,9 +3739,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
trace_i915_gem_object_destroy(obj);
- if (gem_obj->import_attach)
- drm_prime_gem_destroy(gem_obj, obj->sg_table);
-
if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
@@ -3711,9 +3754,15 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
dev_priv->mm.interruptible = was_interruptible;
}
- i915_gem_object_put_pages_gtt(obj);
+ obj->pages_pin_count = 0;
+ i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
+ BUG_ON(obj->pages);
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
+
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -4370,9 +4419,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
cnt = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
- cnt += obj->base.size >> PAGE_SHIFT;
+ if (obj->pages_pin_count == 0)
+ cnt += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
- if (obj->pin_count == 0)
+ if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 43c95307f99..ca3497e1108 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -28,35 +28,62 @@
#include <linux/dma-buf.h>
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
- struct drm_device *dev = obj->base.dev;
- int npages = obj->base.size / PAGE_SIZE;
- struct sg_table *sg;
- int ret;
- int nents;
+ struct sg_table *st;
+ struct scatterlist *src, *dst;
+ int ret, i;
- ret = i915_mutex_lock_interruptible(dev);
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
return ERR_PTR(ret);
- ret = i915_gem_object_get_pages_gtt(obj);
+ ret = i915_gem_object_get_pages(obj);
if (ret) {
- sg = ERR_PTR(ret);
+ st = ERR_PTR(ret);
+ goto out;
+ }
+
+ /* Copy sg so that we make an independent mapping */
+ st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (st == NULL) {
+ st = ERR_PTR(-ENOMEM);
goto out;
}
- /* link the pages into an SG then map the sg */
- sg = drm_prime_pages_to_sg(obj->pages, npages);
- nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+ if (ret) {
+ kfree(st);
+ st = ERR_PTR(ret);
+ goto out;
+ }
+
+ src = obj->pages->sgl;
+ dst = st->sgl;
+ for (i = 0; i < obj->pages->nents; i++) {
+ sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
+ dst = sg_next(dst);
+ src = sg_next(src);
+ }
+
+ if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ sg_free_table(st);
+ kfree(st);
+ st = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ i915_gem_object_pin_pages(obj);
+
out:
- mutex_unlock(&dev->struct_mutex);
- return sg;
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return st;
}
static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg, enum dma_data_direction dir)
+ struct sg_table *sg,
+ enum dma_data_direction dir)
{
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
@@ -78,7 +105,9 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf->priv;
struct drm_device *dev = obj->base.dev;
- int ret;
+ struct scatterlist *sg;
+ struct page **pages;
+ int ret, i;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -89,22 +118,34 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
goto out_unlock;
}
- ret = i915_gem_object_get_pages_gtt(obj);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ERR_PTR(ret);
- }
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ goto error;
- obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
- if (!obj->dma_buf_vmapping) {
- DRM_ERROR("failed to vmap object\n");
- goto out_unlock;
- }
+ ret = -ENOMEM;
+
+ pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
+ if (pages == NULL)
+ goto error;
+
+ for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
+ pages[i] = sg_page(sg);
+
+ obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
+ drm_free_large(pages);
+
+ if (!obj->dma_buf_vmapping)
+ goto error;
obj->vmapping_count = 1;
+ i915_gem_object_pin_pages(obj);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping;
+
+error:
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -117,10 +158,11 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
if (ret)
return;
- --obj->vmapping_count;
- if (obj->vmapping_count == 0) {
+ if (--obj->vmapping_count == 0) {
vunmap(obj->dma_buf_vmapping);
obj->dma_buf_vmapping = NULL;
+
+ i915_gem_object_unpin_pages(obj);
}
mutex_unlock(&dev->struct_mutex);
}
@@ -180,22 +222,43 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags)
+ struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- return dma_buf_export(obj, &i915_dmabuf_ops,
- obj->base.size, 0600);
+ return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
}
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *sg;
+
+ sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+
+ obj->pages = sg;
+ obj->has_dma_mapping = true;
+ return 0;
+}
+
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ dma_buf_unmap_attachment(obj->base.import_attach,
+ obj->pages, DMA_BIDIRECTIONAL);
+ obj->has_dma_mapping = false;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+ .get_pages = i915_gem_object_get_pages_dmabuf,
+ .put_pages = i915_gem_object_put_pages_dmabuf,
+};
+
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+ struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
- struct sg_table *sg;
struct drm_i915_gem_object *obj;
- int npages;
- int size;
int ret;
/* is this one of own objects? */
@@ -213,34 +276,24 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach))
return ERR_CAST(attach);
- sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
- goto fail_detach;
- }
-
- size = dma_buf->size;
- npages = size / PAGE_SIZE;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL) {
ret = -ENOMEM;
- goto fail_unmap;
+ goto fail_detach;
}
- ret = drm_gem_private_object_init(dev, &obj->base, size);
+ ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
if (ret) {
kfree(obj);
- goto fail_unmap;
+ goto fail_detach;
}
- obj->sg_table = sg;
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
return &obj->base;
-fail_unmap:
- dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e6b2205ecf6..6a2f3e50c71 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -44,6 +44,7 @@ eb_create(int size)
{
struct eb_objects *eb;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
while (count > size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
@@ -210,7 +211,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
} else {
@@ -1101,8 +1103,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ (void __user *)(uintptr_t)args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1141,8 +1142,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++)
exec_list[i].offset = exec2_list[i].offset;
/* ... and back out to userspace */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
@@ -1196,8 +1196,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
+ ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 18477314d85..47e427e4bc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
- first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
+ first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
@@ -167,8 +167,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
}
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
- struct scatterlist *sg_list,
- unsigned sg_len,
+ const struct sg_table *pages,
unsigned first_entry,
uint32_t pte_flags)
{
@@ -180,12 +179,12 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
struct scatterlist *sg;
/* init sg walking */
- sg = sg_list;
+ sg = pages->sgl;
i = 0;
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
- while (i < sg_len) {
+ while (i < pages->nents) {
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
@@ -194,13 +193,11 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
pt_vaddr[j] = pte | pte_flags;
/* grab the next page */
- m++;
- if (m == segment_len) {
- sg = sg_next(sg);
- i++;
- if (i == sg_len)
+ if (++m == segment_len) {
+ if (++i == pages->nents)
break;
+ sg = sg_next(sg);
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
}
@@ -213,44 +210,10 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
}
}
-static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
- unsigned first_entry, unsigned num_entries,
- struct page **pages, uint32_t pte_flags)
-{
- uint32_t *pt_vaddr, pte;
- unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
- unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
- unsigned last_pte, i;
- dma_addr_t page_addr;
-
- while (num_entries) {
- last_pte = first_pte + num_entries;
- last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
-
- pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
-
- for (i = first_pte; i < last_pte; i++) {
- page_addr = page_to_phys(*pages);
- pte = GEN6_PTE_ADDR_ENCODE(page_addr);
- pt_vaddr[i] = pte | pte_flags;
-
- pages++;
- }
-
- kunmap_atomic(pt_vaddr);
-
- num_entries -= last_pte - first_pte;
- first_pte = 0;
- act_pd++;
- }
-}
-
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t pte_flags = GEN6_PTE_VALID;
switch (cache_level) {
@@ -261,7 +224,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(obj->base.dev))
pte_flags |= HSW_PTE_UNCACHED;
else
pte_flags |= GEN6_PTE_UNCACHED;
@@ -270,26 +233,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
BUG();
}
- if (obj->sg_table) {
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->sg_table->sgl,
- obj->sg_table->nents,
- obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
- } else if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- i915_ppgtt_insert_sg_entries(ppgtt,
- obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- pte_flags);
- } else
- i915_ppgtt_insert_pages(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- pte_flags);
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -361,44 +308,26 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* don't map imported dma buf objects */
- if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
- return intel_gtt_map_memory(obj->pages,
- obj->base.size >> PAGE_SHIFT,
- &obj->sg_list,
- &obj->num_sg);
- else
+ if (obj->has_dma_mapping)
return 0;
+
+ if (!dma_map_sg(&obj->base.dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return -ENOSPC;
+
+ return 0;
}
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
- if (obj->sg_table) {
- intel_gtt_insert_sg_entries(obj->sg_table->sgl,
- obj->sg_table->nents,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
-
+ intel_gtt_insert_sg_entries(obj->pages,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
obj->has_global_gtt_mapping = 1;
}
@@ -418,10 +347,10 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
- if (obj->sg_list) {
- intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
- obj->sg_list = NULL;
- }
+ if (!obj->has_dma_mapping)
+ dma_unmap_sg(&dev->pdev->dev,
+ obj->pages->sgl, obj->pages->nents,
+ PCI_DMA_BIDIRECTIONAL);
undo_idling(dev_priv, interruptible);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b964df51cec..8093ecd2ea3 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -470,18 +470,20 @@ i915_gem_swizzle_page(struct page *page)
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL)
return;
- for (i = 0; i < page_count; i++) {
- char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+ char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
- i915_gem_swizzle_page(obj->pages[i]);
- set_page_dirty(obj->pages[i]);
+ i915_gem_swizzle_page(page);
+ set_page_dirty(page);
}
}
}
@@ -489,6 +491,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
+ struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
@@ -502,8 +505,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
}
}
- for (i = 0; i < page_count; i++) {
- if (page_to_phys(obj->pages[i]) & (1 << 17))
+ for_each_sg(obj->pages->sgl, sg, page_count, i) {
+ struct page *page = sg_page(sg);
+ if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d6010135e40..d7f0066538a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -382,7 +382,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
else
new_delay = dev_priv->rps.cur_delay - 1;
- gen6_set_rps(dev_priv->dev, new_delay);
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ if (!(new_delay > dev_priv->rps.max_delay ||
+ new_delay < dev_priv->rps.min_delay)) {
+ gen6_set_rps(dev_priv->dev, new_delay);
+ }
mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -888,20 +894,20 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src)
{
struct drm_i915_error_object *dst;
- int page, page_count;
+ int i, count;
u32 reloc_offset;
if (src == NULL || src->pages == NULL)
return NULL;
- page_count = src->base.size / PAGE_SIZE;
+ count = src->base.size / PAGE_SIZE;
- dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
+ dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
reloc_offset = src->gtt_offset;
- for (page = 0; page < page_count; page++) {
+ for (i = 0; i < count; i++) {
unsigned long flags;
void *d;
@@ -924,30 +930,33 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
} else {
+ struct page *page;
void *s;
- drm_clflush_pages(&src->pages[page], 1);
+ page = i915_gem_object_get_page(src, i);
- s = kmap_atomic(src->pages[page]);
+ drm_clflush_pages(&page, 1);
+
+ s = kmap_atomic(page);
memcpy(d, s, PAGE_SIZE);
kunmap_atomic(s);
- drm_clflush_pages(&src->pages[page], 1);
+ drm_clflush_pages(&page, 1);
}
local_irq_restore(flags);
- dst->pages[page] = d;
+ dst->pages[i] = d;
reloc_offset += PAGE_SIZE;
}
- dst->page_count = page_count;
+ dst->page_count = count;
dst->gtt_offset = src->gtt_offset;
return dst;
unwind:
- while (page--)
- kfree(dst->pages[page]);
+ while (i--)
+ kfree(dst->pages[i]);
kfree(dst);
return NULL;
}
@@ -2735,9 +2744,6 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
} else if (INTEL_INFO(dev)->gen == 3) {
- /* IIR "flip pending" means done if this bit is set */
- I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
-
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a828e90602b..7637824c6a7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1794,6 +1794,10 @@
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
+/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index da733a3fe1e..903eebd2117 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -46,32 +46,32 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
}
static ssize_t
-show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
}
static ssize_t
-show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
}
static ssize_t
-show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
}
static ssize_t
-show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
+show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
}
@@ -203,6 +203,184 @@ static struct bin_attribute dpf_attrs = {
.mmap = NULL
};
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_max_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap, hw_max, hw_min;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = (rp_state_cap & 0xff);
+ hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+ if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ if (dev_priv->rps.cur_delay > val)
+ gen6_set_rps(dev_priv->dev, val);
+
+ dev_priv->rps.max_delay = val;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+ mutex_unlock(&dev->struct_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d", ret);
+}
+
+static ssize_t gt_min_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap, hw_max, hw_min;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val /= GT_FREQUENCY_MULTIPLIER;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ hw_max = (rp_state_cap & 0xff);
+ hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+ if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ if (dev_priv->rps.cur_delay < val)
+ gen6_set_rps(dev_priv->dev, val);
+
+ dev_priv->rps.min_delay = val;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+
+}
+
+static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
+static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+
+
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+
+/* For now we have a static number of RP states */
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, rp_state_cap;
+ ssize_t ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (attr == &dev_attr_gt_RP0_freq_mhz) {
+ val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
+ val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
+ val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ } else {
+ BUG();
+ }
+ return snprintf(buf, PAGE_SIZE, "%d", val);
+}
+
+static const struct attribute *gen6_attrs[] = {
+ &dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_max_freq_mhz.attr,
+ &dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_gt_RP0_freq_mhz.attr,
+ &dev_attr_gt_RP1_freq_mhz.attr,
+ &dev_attr_gt_RPn_freq_mhz.attr,
+ NULL,
+};
+
void i915_setup_sysfs(struct drm_device *dev)
{
int ret;
@@ -220,10 +398,19 @@ void i915_setup_sysfs(struct drm_device *dev)
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
}
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+ if (ret)
+ DRM_ERROR("gen6 sysfs setup failed\n");
+ }
}
void i915_teardown_sysfs(struct drm_device *dev)
{
+ sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
+#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+#endif
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5576b7c695e..6f5aafa1b63 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1376,7 +1376,8 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
- WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+ && (val & DP_PIPEB_SELECT),
"IBX PCH dp port still using transcoder B\n");
}
@@ -1388,7 +1389,8 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
- WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
+ && (val & SDVO_PIPE_B_SELECT),
"IBX PCH hdmi port still using transcoder B\n");
}
@@ -1860,59 +1862,6 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
intel_wait_for_vblank(dev_priv->dev, pipe);
}
-static void disable_pch_dp(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg, u32 port_sel)
-{
- u32 val = I915_READ(reg);
- if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
- DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
- I915_WRITE(reg, val & ~DP_PORT_EN);
- }
-}
-
-static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
-{
- u32 val = I915_READ(reg);
- if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
- DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
- reg, pipe);
- I915_WRITE(reg, val & ~PORT_ENABLE);
- }
-}
-
-/* Disable any ports connected to this transcoder */
-static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 reg, val;
-
- val = I915_READ(PCH_PP_CONTROL);
- I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
-
- disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
- disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
- disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
-
- reg = PCH_ADPA;
- val = I915_READ(reg);
- if (adpa_pipe_enabled(dev_priv, pipe, val))
- I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
-
- reg = PCH_LVDS;
- val = I915_READ(reg);
- if (lvds_pipe_enabled(dev_priv, pipe, val)) {
- DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
- I915_WRITE(reg, val & ~LVDS_PORT_EN);
- POSTING_READ(reg);
- udelay(100);
- }
-
- disable_pch_hdmi(dev_priv, pipe, HDMIB);
- disable_pch_hdmi(dev_priv, pipe, HDMIC);
- disable_pch_hdmi(dev_priv, pipe, HDMID);
-}
-
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -3237,10 +3186,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
is_pch_port = intel_crtc_driving_pch(crtc);
- if (is_pch_port)
+ if (is_pch_port) {
ironlake_fdi_pll_enable(intel_crtc);
- else
- ironlake_fdi_disable(crtc);
+ } else {
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+ }
+
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -3311,14 +3266,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
- ironlake_fdi_disable(crtc);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
- /* This is a horrible layering violation; we should be doing this in
- * the connector/encoder ->prepare instead, but we don't always have
- * enough information there about the config to know whether it will
- * actually be necessary or just cause undesired flicker.
- */
- intel_disable_pch_ports(dev_priv, pipe);
+ ironlake_fdi_disable(crtc);
intel_disable_transcoder(dev_priv, pipe);
@@ -4279,12 +4231,6 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
@@ -4292,6 +4238,12 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
intel_update_lvds(crtc, clock, adjusted_mode);
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
@@ -4648,6 +4600,113 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
return 120000;
}
+static void ironlake_set_pipeconf(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ bool dither)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ uint32_t val;
+
+ val = I915_READ(PIPECONF(pipe));
+
+ val &= ~PIPE_BPC_MASK;
+ switch (intel_crtc->bpp) {
+ case 18:
+ val |= PIPE_6BPC;
+ break;
+ case 24:
+ val |= PIPE_8BPC;
+ break;
+ case 30:
+ val |= PIPE_10BPC;
+ break;
+ case 36:
+ val |= PIPE_12BPC;
+ break;
+ default:
+ val |= PIPE_8BPC;
+ break;
+ }
+
+ val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+ if (dither)
+ val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+ val &= ~PIPECONF_INTERLACE_MASK;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val |= PIPECONF_INTERLACED_ILK;
+ else
+ val |= PIPECONF_PROGRESSIVE;
+
+ I915_WRITE(PIPECONF(pipe), val);
+ POSTING_READ(PIPECONF(pipe));
+}
+
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock,
+ bool *has_reduced_clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ int refclk;
+ const intel_limit_t *limit;
+ bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
+
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ case INTEL_OUTPUT_HDMI:
+ is_sdvo = true;
+ if (intel_encoder->needs_tv_clock)
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ }
+ }
+
+ refclk = ironlake_get_refclk(crtc);
+
+ /*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+ limit = intel_limit(crtc, refclk);
+ ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ clock);
+ if (!ret)
+ return false;
+
+ if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
+ *has_reduced_clock = limit->find_pll(limit, crtc,
+ dev_priv->lvds_downclock,
+ refclk,
+ clock,
+ reduced_clock);
+ }
+
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
+
+ return true;
+}
+
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4659,13 +4718,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- int refclk, num_connectors = 0;
+ int num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ u32 dpll, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *encoder, *edp_encoder = NULL;
- const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
u32 temp;
@@ -4707,16 +4765,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- refclk = ironlake_get_refclk(crtc);
-
- /*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
- limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
- &clock);
+ ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+ &has_reduced_clock, &reduced_clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
@@ -4725,24 +4775,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
- if (is_lvds && dev_priv->lvds_downclock_avail) {
- /*
- * Ensure we match the reduced clock's P to the target clock.
- * If the clocks don't match, we can't switch the display clock
- * by using the FP0/FP1. In such case we will disable the LVDS
- * downclock feature.
- */
- has_reduced_clock = limit->find_pll(limit, crtc,
- dev_priv->lvds_downclock,
- refclk,
- &clock,
- &reduced_clock);
- }
-
- if (is_sdvo && is_tv)
- i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
-
-
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
@@ -4770,32 +4802,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
target_clock = adjusted_mode->clock;
/* determine panel color depth */
- temp = I915_READ(PIPECONF(pipe));
- temp &= ~PIPE_BPC_MASK;
dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode);
- switch (pipe_bpp) {
- case 18:
- temp |= PIPE_6BPC;
- break;
- case 24:
- temp |= PIPE_8BPC;
- break;
- case 30:
- temp |= PIPE_10BPC;
- break;
- case 36:
- temp |= PIPE_12BPC;
- break;
- default:
+ if (is_lvds && dev_priv->lvds_dither)
+ dither = true;
+
+ if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
+ pipe_bpp != 36) {
WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
- pipe_bpp);
- temp |= PIPE_8BPC;
+ pipe_bpp);
pipe_bpp = 24;
- break;
}
-
intel_crtc->bpp = pipe_bpp;
- I915_WRITE(PIPECONF(pipe), temp);
if (!lane) {
/*
@@ -4879,12 +4896,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
dpll |= PLL_REF_INPUT_DREFCLK;
- /* setup pipeconf */
- pipeconf = I915_READ(PIPECONF(pipe));
-
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
-
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
@@ -4944,12 +4955,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PCH_LVDS, temp);
}
- pipeconf &= ~PIPECONF_DITHER_EN;
- pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
- if ((is_lvds && dev_priv->lvds_dither) || dither) {
- pipeconf |= PIPECONF_DITHER_EN;
- pipeconf |= PIPECONF_DITHER_TYPE_SP;
- }
if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
@@ -4985,9 +4990,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
- pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- pipeconf |= PIPECONF_INTERLACED_ILK;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_end -= 1;
@@ -4995,7 +4998,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal/2);
} else {
- pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(VSYNCSHIFT(pipe), 0);
}
@@ -5033,12 +5035,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- I915_WRITE(PIPECONF(pipe), pipeconf);
- POSTING_READ(PIPECONF(pipe));
+ ironlake_set_pipeconf(crtc, adjusted_mode, dither);
intel_wait_for_vblank(dev, pipe);
- I915_WRITE(DSPCNTR(plane), dspcntr);
+ /* Set up the display plane register */
+ I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
ret = intel_pipe_set_base(crtc, x, y, fb);
@@ -6866,7 +6868,13 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
intel_crtc = to_intel_crtc(connector->encoder->crtc);
if (prepare_pipes & (1 << intel_crtc->pipe)) {
+ struct drm_property *dpms_property =
+ dev->mode_config.dpms_property;
+
connector->dpms = DRM_MODE_DPMS_ON;
+ drm_connector_property_set_value(connector,
+ dpms_property,
+ DRM_MODE_DPMS_ON);
intel_encoder = to_intel_encoder(connector->encoder);
intel_encoder->connectors_active = true;
@@ -7296,7 +7304,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
struct drm_mode_set save_set;
struct intel_set_config *config;
int ret;
- int i;
BUG_ON(!set);
BUG_ON(!set->crtc);
@@ -7360,15 +7367,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
ret = -EINVAL;
goto fail;
}
-
- if (set->crtc->enabled) {
- DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
- for (i = 0; i < set->num_connectors; i++) {
- DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
- drm_get_connector_name(set->connectors[i]));
- set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
- }
- }
} else if (config->fb_changed) {
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index c59710db653..1474f84fdbd 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -808,9 +808,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
}
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder);
-static void ironlake_edp_pll_off(struct drm_encoder *encoder);
-
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -821,14 +818,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* Turn on the eDP PLL if needed */
- if (is_edp(intel_dp)) {
- if (!is_pch_edp(intel_dp))
- ironlake_edp_pll_on(encoder);
- else
- ironlake_edp_pll_off(encoder);
- }
-
/*
* There are four kinds of DP registers:
*
@@ -898,7 +887,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= intel_crtc->pipe << 29;
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -920,7 +908,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
@@ -1191,27 +1178,49 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
msleep(intel_dp->backlight_off_delay);
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
+ WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We don't adjust intel_dp->DP while tearing down the link, to
+ * facilitate link retraining (e.g. after hotplug). Hence clear all
+ * enable bits here to ensure that we don't enable too much. */
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(200);
}
-static void ironlake_edp_pll_off(struct drm_encoder *encoder)
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
+ assert_pipe_disabled(dev_priv,
+ to_intel_crtc(crtc)->pipe);
+
dpa_ctl = I915_READ(DP_A);
+ WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+ "dp pll off, should be on\n");
+ WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+ /* We can't rely on the value tracked for the DP register in
+ * intel_dp->DP because link_down must not change that (otherwise link
+ * re-training will fail. */
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
POSTING_READ(DP_A);
@@ -1306,7 +1315,20 @@ static void intel_disable_dp(struct intel_encoder *encoder)
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
ironlake_edp_panel_off(intel_dp);
- intel_dp_link_down(intel_dp);
+
+ /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+ if (!is_cpu_edp(intel_dp))
+ intel_dp_link_down(intel_dp);
+}
+
+static void intel_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (is_cpu_edp(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_pll_off(intel_dp);
+ }
}
static void intel_enable_dp(struct intel_encoder *encoder)
@@ -1316,51 +1338,24 @@ static void intel_enable_dp(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ if (WARN_ON(dp_reg & DP_PORT_EN))
+ return;
+
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_start_link_train(intel_dp);
- ironlake_edp_panel_on(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, true);
- intel_dp_complete_link_train(intel_dp);
- } else
- ironlake_edp_panel_vdd_off(intel_dp, false);
+ intel_dp_start_link_train(intel_dp);
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, true);
+ intel_dp_complete_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
}
-static void
-intel_dp_dpms(struct drm_connector *connector, int mode)
+static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
-
- /* DP supports only 2 dpms states. */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (mode == connector->dpms)
- return;
-
- connector->dpms = mode;
-
- /* Only need to change hw state when actually enabled */
- if (!intel_dp->base.base.crtc) {
- intel_dp->base.connectors_active = false;
- return;
- }
-
- if (mode != DRM_MODE_DPMS_ON) {
- intel_encoder_dpms(&intel_dp->base, mode);
-
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_off(&intel_dp->base.base);
- } else {
- if (is_cpu_edp(intel_dp))
- ironlake_edp_pll_on(&intel_dp->base.base);
-
- intel_encoder_dpms(&intel_dp->base, mode);
- }
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_modeset_check_state(connector->dev);
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_on(intel_dp);
}
/*
@@ -1743,25 +1738,12 @@ static void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
- /*
- * On CPT we have to enable the port in training pattern 1, which
- * will happen below in intel_dp_set_link_train. Otherwise, enable
- * the port and wait for it to become active.
- */
- if (!HAS_PCH_CPT(dev)) {
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
-
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
@@ -1923,18 +1905,11 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
- if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
DRM_DEBUG_KMS("\n");
- if (is_edp(intel_dp)) {
- DP &= ~DP_PLL_ENABLE;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
- udelay(100);
- }
-
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
@@ -1946,13 +1921,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
- if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
- DP |= DP_LINK_TRAIN_OFF_CPT;
- else
- DP |= DP_LINK_TRAIN_OFF;
- }
-
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
@@ -2391,7 +2359,7 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
- .dpms = intel_dp_dpms,
+ .dpms = intel_connector_dpms,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
@@ -2478,6 +2446,8 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_dp->output_reg = output_reg;
intel_dp->port = port;
+ /* Preserve the current hw state. */
+ intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
@@ -2522,7 +2492,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
drm_sysfs_connector_add(connector);
intel_encoder->enable = intel_enable_dp;
+ intel_encoder->pre_enable = intel_pre_enable_dp;
intel_encoder->disable = intel_disable_dp;
+ intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -2548,14 +2520,10 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
break;
}
- intel_dp_i2c_init(intel_dp, intel_connector, name);
-
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
- bool ret;
struct edp_power_seq cur, vbt;
u32 pp_on, pp_off, pp_div;
- struct edid *edid;
pp_on = I915_READ(PCH_PP_ON_DELAYS);
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
@@ -2606,6 +2574,13 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+ }
+
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
+
+ if (is_edp(intel_dp)) {
+ bool ret;
+ struct edid *edid;
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_get_dpcd(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4f2b2d6a248..351fd7179cd 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -152,8 +152,10 @@ struct intel_encoder {
bool cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
+ void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
void (*disable)(struct intel_encoder *);
+ void (*post_disable)(struct intel_encoder *);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 5d02aad0de8..08f2b63d740 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -151,6 +151,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VIDEO_DIP_DATA, 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -186,6 +189,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -224,6 +230,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -259,6 +268,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
val |= g4x_infoframe_enable(frame);
@@ -292,6 +304,9 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(data_reg + i, *data);
data++;
}
+ /* Write every possible data byte to force correct ECC calculation. */
+ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ I915_WRITE(data_reg + i, 0);
mmiowb();
val |= hsw_infoframe_enable(frame);
@@ -377,6 +392,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
port = VIDEO_DIP_PORT_C;
break;
default:
+ BUG();
return;
}
@@ -435,6 +451,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
port = VIDEO_DIP_PORT_D;
break;
default:
+ BUG();
return;
}
@@ -674,10 +691,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
- u32 enable_bits = SDVO_ENABLE;
-
- if (intel_hdmi->has_audio)
- enable_bits |= SDVO_AUDIO_ENABLE;
+ u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->sdvox_reg);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index dc9f9ff34d8..40d72bd64e1 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -770,6 +770,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Gigabyte GA-D525TUD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 18bd0af855d..e27c1701262 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -427,6 +427,25 @@ blind_set:
goto end;
}
+static void intel_setup_cadls(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ int i = 0;
+ u32 disp_id;
+
+ /* Initialize the CADL field by duplicating the DIDL values.
+ * Technically, this is not always correct as display outputs may exist,
+ * but not active. This initialization is necessary for some Clevo
+ * laptops that check this field before processing the brightness and
+ * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
+ * there are less than eight devices. */
+ do {
+ disp_id = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(disp_id, &opregion->acpi->cadl[i]);
+ } while (++i < 8 && disp_id != 0);
+}
+
void intel_opregion_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -436,8 +455,10 @@ void intel_opregion_init(struct drm_device *dev)
return;
if (opregion->acpi) {
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_didl_outputs(dev);
+ intel_setup_cadls(dev);
+ }
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 3df4f5fa892..e019b236986 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -162,19 +162,12 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
return val;
}
-u32 intel_panel_get_max_backlight(struct drm_device *dev)
+static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
max = i915_read_blc_pwm_ctl(dev_priv);
- if (max == 0) {
- /* XXX add code here to query mode clock or hardware clock
- * and program max PWM appropriately.
- */
- pr_warn_once("fixme: max PWM is zero\n");
- return 1;
- }
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
@@ -188,6 +181,22 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
max *= 0xff;
}
+ return max;
+}
+
+u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+ u32 max;
+
+ max = _intel_panel_get_max_backlight(dev);
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+ pr_warn_once("fixme: max PWM is zero\n");
+ return 1;
+ }
+
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
return max;
}
@@ -424,7 +433,11 @@ int intel_panel_setup_backlight(struct drm_device *dev)
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
- props.max_brightness = intel_panel_get_max_backlight(dev);
+ props.max_brightness = _intel_panel_get_max_backlight(dev);
+ if (props.max_brightness == 0) {
+ DRM_ERROR("Failed to get maximum backlight value\n");
+ return -ENODEV;
+ }
dev_priv->backlight =
backlight_device_register("intel_backlight",
&connector->kdev, dev,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 36c64091bc9..d69f8f49beb 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2324,6 +2324,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
u32 limits = gen6_rps_limits(dev_priv, &val);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(val > dev_priv->rps.max_delay);
+ WARN_ON(val < dev_priv->rps.min_delay);
if (val == dev_priv->rps.cur_delay)
return;
@@ -2338,6 +2340,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+ POSTING_READ(GEN6_RPNSWREQ);
+
dev_priv->rps.cur_delay = val;
trace_intel_gpu_freq_change(val * 50);
@@ -2730,7 +2734,7 @@ static const struct cparams {
{ 0, 800, 231, 23784 },
};
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
{
u64 total_count, diff, ret;
u32 count1, count2, count3, m = 0, c = 0;
@@ -2784,6 +2788,22 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
return ret;
}
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_chipset_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
+
unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
{
unsigned long m, x, b;
@@ -2987,7 +3007,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
spin_unlock_irq(&mchdev_lock);
}
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long t, corr, state1, corr2, state2;
u32 pxvid, ext_v;
@@ -3024,6 +3044,22 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
return dev_priv->ips.gfx_power + state2;
}
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long val;
+
+ if (dev_priv->info->gen != 5)
+ return 0;
+
+ spin_lock_irq(&mchdev_lock);
+
+ val = __i915_gfx_val(dev_priv);
+
+ spin_unlock_irq(&mchdev_lock);
+
+ return val;
+}
+
/**
* i915_read_mch_val - return value for IPS use
*
@@ -3040,8 +3076,8 @@ unsigned long i915_read_mch_val(void)
goto out_unlock;
dev_priv = i915_mch_dev;
- chipset_val = i915_chipset_val(dev_priv);
- graphics_val = i915_gfx_val(dev_priv);
+ chipset_val = __i915_chipset_val(dev_priv);
+ graphics_val = __i915_gfx_val(dev_priv);
ret = chipset_val + graphics_val;
@@ -3723,6 +3759,9 @@ static void gen3_init_clock_gating(struct drm_device *dev)
if (IS_PINEVIEW(dev))
I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+
+ /* IIR "flip pending" means done if this bit is set */
+ I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
}
static void i85x_init_clock_gating(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 55cdb4d30a1..984a0c5fbf5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -464,7 +464,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
goto err_unref;
pc->gtt_offset = obj->gtt_offset;
- pc->cpu_page = kmap(obj->pages[0]);
+ pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL)
goto err_unpin;
@@ -491,7 +491,8 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
return;
obj = pc->obj;
- kunmap(obj->pages[0]);
+
+ kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -1026,7 +1027,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
if (obj == NULL)
return;
- kunmap(obj->pages[0]);
+ kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
@@ -1053,7 +1054,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
}
ring->status_page.gfx_addr = obj->gtt_offset;
- ring->status_page.page_addr = kmap(obj->pages[0]);
+ ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index cc8df4de2d9..7644f31a377 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -60,11 +60,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
- sprctl |= SPRITE_FORMAT_RGBX888;
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
- sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index ea1024d7997..e5f145d2cb3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -84,6 +84,9 @@ static const struct file_operations mgag200_driver_fops = {
.mmap = mgag200_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.read = drm_read,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 69688ef5cf4..7e16dc5e646 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -598,7 +598,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
args->size = args->pitch * args->height;
args->size = roundup(args->size, PAGE_SIZE);
- ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+ ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 1866dbb4997..c61014442aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -736,9 +736,11 @@ nouveau_card_init(struct drm_device *dev)
}
break;
case NV_C0:
- nvc0_copy_create(dev, 1);
+ if (!(nv_rd32(dev, 0x022500) & 0x00000200))
+ nvc0_copy_create(dev, 1);
case NV_D0:
- nvc0_copy_create(dev, 0);
+ if (!(nv_rd32(dev, 0x022500) & 0x00000100))
+ nvc0_copy_create(dev, 0);
break;
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index f429e6a8ca7..c399d510b27 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <linux/dmi.h>
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_hw.h"
@@ -110,11 +111,26 @@ nv50_gpio_isr(struct drm_device *dev)
nv_wr32(dev, 0xe074, intr1);
}
+static struct dmi_system_id gpio_reset_ids[] = {
+ {
+ .ident = "Apple Macbook 10,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
+ }
+ },
+ { }
+};
+
int
nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ /* initialise gpios and routing to vbios defaults */
+ if (dmi_check_system(gpio_reset_ids))
+ nouveau_gpio_reset(dev);
+
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
nv_wr32(dev, 0xe054, 0xffffffff);
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index dac525b2994..8a2fc89b776 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -1510,10 +1510,10 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
case OUTPUT_DP:
if (nv_connector->base.display_info.bpc == 6) {
nv_encoder->dp.datarate = mode->clock * 18 / 8;
- syncs |= 0x00000140;
+ syncs |= 0x00000002 << 6;
} else {
nv_encoder->dp.datarate = mode->clock * 24 / 8;
- syncs |= 0x00000180;
+ syncs |= 0x00000005 << 6;
}
if (nv_encoder->dcb->sorconf.link & 1)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index f4d4505fe83..2817101fb16 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -258,7 +258,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
- /* disable crtc pair power gating before programming */
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
@@ -278,25 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
- /* power gating is per-pair */
- if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) {
- struct drm_crtc *other_crtc;
- struct radeon_crtc *other_radeon_crtc;
- list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) {
- other_radeon_crtc = to_radeon_crtc(other_crtc);
- if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) ||
- ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) ||
- ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) ||
- ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) ||
- ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) ||
- ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) {
- /* if both crtcs in the pair are off, enable power gating */
- if (other_radeon_crtc->enabled == false)
- atombios_powergate_crtc(crtc, ATOM_ENABLE);
- break;
- }
- }
- }
+ if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
+ atombios_powergate_crtc(crtc, ATOM_ENABLE);
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
@@ -1682,9 +1664,22 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_atom_ss ss;
+ int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->mode_info.crtcs[i] &&
+ rdev->mode_info.crtcs[i]->enabled &&
+ i != radeon_crtc->crtc_id &&
+ radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+ /* one other crtc is using this pll don't turn
+ * off the pll
+ */
+ goto done;
+ }
+ }
+
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
case ATOM_PPLL2:
@@ -1701,6 +1696,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
default:
break;
}
+done:
radeon_crtc->pll_id = -1;
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8a74e1bf045..d48224b0d6b 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -577,30 +577,25 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+ u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
+ u8 tmp;
if (!ASIC_IS_DCE4(rdev))
return panel_mode;
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_NUTMEG)
- panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
- else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_TRAVIS) {
- u8 id[6];
- int i;
- for (i = 0; i < 6; i++)
- id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i);
- if (id[0] == 0x73 &&
- id[1] == 0x69 &&
- id[2] == 0x76 &&
- id[3] == 0x61 &&
- id[4] == 0x72 &&
- id[5] == 0x54)
+ if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
+ /* DP bridge chips */
+ tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+ if (tmp & 1)
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
+ (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
else
- panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+ /* eDP */
+ tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index f9bc27fe269..6e8803a1170 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1379,6 +1379,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_connector *radeon_connector = NULL;
struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
@@ -1390,19 +1392,37 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- /* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
- ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
- if (ASIC_IS_DCE6(rdev)) {
- /* It seems we need to call ATOM_ENCODER_CMD_SETUP again
- * before reenabling encoder on DPMS ON, otherwise we never
- * get picture
- */
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ if (!connector)
+ dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+ else
+ dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+ atombios_dig_encoder_setup(encoder,
+ ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+ dig->panel_mode);
+ if (ext_encoder) {
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
}
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- } else {
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+ /* enable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ } else {
+ /* setup and enable the encoder and transmitter */
+ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ /* some early dce3.2 boards have a bug in their transmitter control table */
+ if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1420,10 +1440,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ /* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- else
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ } else {
+ /* disable the encoder and transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+ }
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1740,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *test_encoder;
- struct radeon_encoder_atom_dig *dig;
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t dig_enc_in_use = 0;
- /* DCE4/5 */
- if (ASIC_IS_DCE4(rdev)) {
- dig = radeon_encoder->enc_priv;
- if (ASIC_IS_DCE41(rdev)) {
+ if (ASIC_IS_DCE6(rdev)) {
+ /* DCE6 */
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* DCE4/5 */
+ if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
/* ontario follows DCE4 */
if (rdev->family == CHIP_PALM) {
if (dig->linkb)
@@ -1848,10 +1898,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
radeon_encoder->pixel_clock = adjusted_mode->clock;
+ /* need to call this here rather than in prepare() since we need some crtc info */
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
atombios_yuv_setup(encoder, true);
@@ -1870,38 +1922,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
- if (!connector)
- dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
- else
- dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
-
- /* setup and enable the encoder */
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
- atombios_dig_encoder_setup(encoder,
- ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
- dig->panel_mode);
- } else if (ASIC_IS_DCE4(rdev)) {
- /* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- /* setup and enable the encoder */
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
-
- /* enable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- } else {
- /* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
-
- /* setup and enable the encoder and transmitter */
- atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- }
+ /* handled in dpms */
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -1922,14 +1943,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
break;
}
- if (ext_encoder) {
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
- else
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
- }
-
atombios_apply_encoder_quirks(encoder, adjusted_mode);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
@@ -2116,7 +2129,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
}
radeon_atom_output_lock(encoder, true);
- radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -2137,6 +2149,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
{
+ /* need to call this here as we need the crtc set up */
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
radeon_atom_output_lock(encoder, false);
}
@@ -2177,14 +2190,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- if (ASIC_IS_DCE4(rdev))
- /* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- else {
- /* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
- }
+ /* handled in dpms */
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8acb34fd3fd..8d7e33a0b24 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1182,7 +1182,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
ring->ready = true;
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
- if (radeon_ring_supports_scratch_reg(rdev, ring)) {
+ if (!ring->rptr_save_reg /* not resuming from suspend */
+ && radeon_ring_supports_scratch_reg(rdev, ring)) {
r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
if (r) {
DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ab74e6b149e..f37676d7f21 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -63,6 +63,7 @@ struct r600_cs_track {
u32 cb_color_size_idx[8]; /* unused */
u32 cb_target_mask;
u32 cb_shader_mask; /* unused */
+ bool is_resolve;
u32 cb_color_size[8];
u32 vgt_strmout_en;
u32 vgt_strmout_buffer_en;
@@ -315,7 +316,15 @@ static void r600_cs_track_init(struct r600_cs_track *track)
track->cb_color_bo[i] = NULL;
track->cb_color_bo_offset[i] = 0xFFFFFFFF;
track->cb_color_bo_mc[i] = 0xFFFFFFFF;
- }
+ track->cb_color_frag_bo[i] = NULL;
+ track->cb_color_frag_offset[i] = 0xFFFFFFFF;
+ track->cb_color_tile_bo[i] = NULL;
+ track->cb_color_tile_offset[i] = 0xFFFFFFFF;
+ track->cb_color_mask[i] = 0xFFFFFFFF;
+ }
+ track->is_resolve = false;
+ track->nsamples = 16;
+ track->log_nsamples = 4;
track->cb_target_mask = 0xFFFFFFFF;
track->cb_shader_mask = 0xFFFFFFFF;
track->cb_dirty = true;
@@ -352,6 +361,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
volatile u32 *ib = p->ib.ptr;
unsigned array_mode;
u32 format;
+ /* When resolve is used, the second colorbuffer has always 1 sample. */
+ unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
format = G_0280A0_FORMAT(track->cb_color_info[i]);
@@ -375,7 +386,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
array_check.group_size = track->group_size;
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
- array_check.nsamples = track->nsamples;
+ array_check.nsamples = nsamples;
array_check.blocksize = r600_fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
@@ -421,7 +432,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
/* check offset */
tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
- r600_fmt_get_blocksize(format) * track->nsamples;
+ r600_fmt_get_blocksize(format) * nsamples;
switch (array_mode) {
default:
case V_0280A0_ARRAY_LINEAR_GENERAL:
@@ -792,6 +803,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
*/
if (track->cb_dirty) {
tmp = track->cb_target_mask;
+
+ /* We must check both colorbuffers for RESOLVE. */
+ if (track->is_resolve) {
+ tmp |= 0xff;
+ }
+
for (i = 0; i < 8; i++) {
if ((tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
@@ -1281,6 +1298,11 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->nsamples = 1 << tmp;
track->cb_dirty = true;
break;
+ case R_028808_CB_COLOR_CONTROL:
+ tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
+ track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
+ track->cb_dirty = true;
+ break;
case R_0280A0_CB_COLOR0_INFO:
case R_0280A4_CB_COLOR1_INFO:
case R_0280A8_CB_COLOR2_INFO:
@@ -1416,7 +1438,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_028118_CB_COLOR6_MASK:
case R_02811C_CB_COLOR7_MASK:
tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
- track->cb_color_mask[tmp] = ib[idx];
+ track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
track->cb_dirty = true;
}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bdb69a63062..fa6f37099ba 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -66,6 +66,14 @@
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
+#define R_028808_CB_COLOR_CONTROL 0x28808
+#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4)
+#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7)
+#define C_028808_SPECIAL_OP 0xFFFFFF8F
+#define V_028808_SPECIAL_NORMAL 0x00
+#define V_028808_SPECIAL_DISABLE 0x01
+#define V_028808_SPECIAL_RESOLVE_BOX 0x07
+
#define CB_COLOR0_BASE 0x28040
#define CB_COLOR1_BASE 0x28044
#define CB_COLOR2_BASE 0x28048
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index d2e243867ac..7a3daebd732 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1051,7 +1051,7 @@ int radeon_device_init(struct radeon_device *rdev,
if (rdev->flags & RADEON_IS_AGP)
rdev->need_dma32 = true;
if ((rdev->flags & RADEON_IS_PCI) &&
- (rdev->family < CHIP_RS400))
+ (rdev->family <= CHIP_RS740))
rdev->need_dma32 = true;
dma_bits = rdev->need_dma32 ? 32 : 40;
@@ -1346,12 +1346,15 @@ retry:
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_ring_restore(rdev, &rdev->ring[i],
ring_sizes[i], ring_data[i]);
+ ring_sizes[i] = 0;
+ ring_data[i] = NULL;
}
r = radeon_ib_ring_tests(rdev);
if (r) {
dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
if (saved) {
+ saved = false;
radeon_suspend(rdev);
goto retry;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 27d22d709c9..8c593ea82c4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -63,9 +63,10 @@
* 2.19.0 - r600-eg: MSAA textures
* 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
* 2.21.0 - r600-r700: FMASK and CMASK
+ * 2.22.0 - r600 only: RESOLVE_BOX allowed
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 21
+#define KMS_DRIVER_MINOR 22
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7b737b9339a..2a59375dbe5 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -131,7 +131,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
*/
void radeon_fence_process(struct radeon_device *rdev, int ring)
{
- uint64_t seq, last_seq;
+ uint64_t seq, last_seq, last_emitted;
unsigned count_loop = 0;
bool wake = false;
@@ -158,13 +158,15 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
*/
last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
do {
+ last_emitted = rdev->fence_drv[ring].sync_seq[ring];
seq = radeon_fence_read(rdev, ring);
seq |= last_seq & 0xffffffff00000000LL;
if (seq < last_seq) {
- seq += 0x100000000LL;
+ seq &= 0xffffffff;
+ seq |= last_emitted & 0xffffffff00000000LL;
}
- if (seq == last_seq) {
+ if (seq <= last_seq || seq > last_emitted) {
break;
}
/* If we loop over we don't want to return without
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index f93e45d869f..20bfbda7b3f 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -744,7 +744,6 @@ r600 0x9400
0x00028C38 CB_CLRCMP_DST
0x00028C3C CB_CLRCMP_MSK
0x00028C34 CB_CLRCMP_SRC
-0x00028808 CB_COLOR_CONTROL
0x0002842C CB_FOG_BLUE
0x00028428 CB_FOG_GREEN
0x00028424 CB_FOG_RED
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index d31d4cca9a4..c5a164337bd 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -43,6 +43,9 @@ static const struct file_operations savage_driver_fops = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 7f119870147..867dc03000e 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -74,6 +74,9 @@ static const struct file_operations sis_driver_fops = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 90f6b13acfa..a7f4d6bd133 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -49,6 +49,9 @@ static const struct file_operations tdfx_driver_fops = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 6e52069894b..9f84128505b 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -66,6 +66,9 @@ static const struct file_operations udl_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index e927b4c052f..af1b914b17e 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -65,6 +65,9 @@ static const struct file_operations via_driver_fops = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 794ff67c570..b71bcd0bfbb 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -12,3 +12,11 @@ config DRM_VMWGFX
This is a KMS enabled DRM driver for the VMware SVGA2
virtual hardware.
The compiled module will be called "vmwgfx.ko".
+
+config DRM_VMWGFX_FBCON
+ depends on DRM_VMWGFX
+ bool "Enable framebuffer console under vmwgfx by default"
+ help
+ Choose this option if you are shipping a new vmwgfx
+ userspace driver that supports using the kernel driver.
+
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 3e47f757086..09f44c5dd49 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -182,8 +182,9 @@ static struct pci_device_id vmw_pci_id_list[] = {
{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
{0, 0, 0}
};
+MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
-static int enable_fbdev;
+static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
@@ -1153,6 +1154,11 @@ static struct drm_driver driver = {
.open = vmw_driver_open,
.preclose = vmw_preclose,
.postclose = vmw_postclose,
+
+ .dumb_create = vmw_dumb_create,
+ .dumb_map_offset = vmw_dumb_map_offset,
+ .dumb_destroy = vmw_dumb_destroy,
+
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d0f2c079ee2..29c984ff7f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -645,6 +645,16 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+int vmw_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
/**
* Overlay control - vmwgfx_overlay.c
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 22bf9a21ec7..2c6ffe0e2c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1917,3 +1917,76 @@ err_ref:
vmw_resource_unreference(&res);
return ret;
}
+
+
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct vmw_user_dma_buffer *vmw_user_bo;
+ struct ttm_buffer_object *tmp;
+ int ret;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
+ if (vmw_user_bo == NULL)
+ return -ENOMEM;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (ret != 0) {
+ kfree(vmw_user_bo);
+ return ret;
+ }
+
+ ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
+ &vmw_vram_sys_placement, true,
+ &vmw_user_dmabuf_destroy);
+ if (ret != 0)
+ goto out_no_dmabuf;
+
+ tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
+ ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
+ &vmw_user_bo->base,
+ false,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
+ if (unlikely(ret != 0))
+ goto out_no_base_object;
+
+ args->handle = vmw_user_bo->base.hash.key;
+
+out_no_base_object:
+ ttm_bo_unref(&tmp);
+out_no_dmabuf:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
+
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_dma_buffer *out_buf;
+ int ret;
+
+ ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
+ if (ret != 0)
+ return -EINVAL;
+
+ *offset = out_buf->base.addr_space_offset;
+ vmw_dmabuf_unreference(&out_buf);
+ return 0;
+}
+
+int vmw_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ handle, TTM_REF_USAGE);
+}