summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2012-04-12 13:49:28 -0400
committerJohn W. Linville <linville@tuxdriver.com>2012-04-12 13:49:28 -0400
commit8065248069097dddf9945acfb2081025e9618c16 (patch)
treeeddf3fb0372ba0f65c01382d386942ea8d18932d /drivers/gpu/drm
parente66a8ddff72e85605f2212a0ebc666c7e9116641 (diff)
parentb4838d12e1f3cb48c2489a0b08733b5dbf848297 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_fops.c7
-rw-r--r--drivers/gpu/drm/drm_gem.c9
-rw-r--r--drivers/gpu/drm/drm_prime.c304
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c14
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c21
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c9
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c10
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c3
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c4
-rw-r--r--drivers/gpu/drm/radeon/atom.c15
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c98
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h8
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c391
-rw-r--r--drivers/gpu/drm/radeon/r600d.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c13
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c14
46 files changed, 962 insertions, 198 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index cc1148837e2..e354bc0b052 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,7 @@ menuconfig DRM
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
+ select DMA_SHARED_BUFFER
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index a858532806a..c20da5bda35 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
- drm_trace_points.o drm_global.o
+ drm_trace_points.o drm_global.o drm_prime.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 0b65fbc8a63..6116e3b7539 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -136,6 +136,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 7740dd26f00..a0d6e894d97 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -559,9 +559,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
/* Need to resize the fb object !!! */
- if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
+ if (var->bits_per_pixel > fb->bits_per_pixel ||
+ var->xres > fb->width || var->yres > fb->height ||
+ var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
- "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
+ "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
+ var->xres, var->yres, var->bits_per_pixel,
+ var->xres_virtual, var->yres_virtual,
fb->width, fb->height, fb->bits_per_pixel);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7348a3dab25..cdfbf27b2b3 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -271,6 +271,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_init_file_private(&priv->prime);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@@ -571,6 +574,10 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&file_priv->prime);
+
kfree(file_priv);
/* ========================================================
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 0ef358e5324..83114b5e3ce 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -35,6 +35,7 @@
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
#include "drmP.h"
/** @file drm_gem.c
@@ -232,6 +233,10 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
+ if (obj->import_attach)
+ drm_prime_remove_imported_buf_handle(&filp->prime,
+ obj->import_attach->dmabuf);
+
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
drm_gem_object_handle_unreference_unlocked(obj);
@@ -527,6 +532,10 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
+ if (obj->import_attach)
+ drm_prime_remove_imported_buf_handle(&file_priv->prime,
+ obj->import_attach->dmabuf);
+
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
new file mode 100644
index 00000000000..1bdf2b54eaf
--- /dev/null
+++ b/drivers/gpu/drm/drm_prime.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright © 2012 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Rob Clark <rob.clark@linaro.org>
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/dma-buf.h>
+#include "drmP.h"
+
+/*
+ * DMA-BUF/GEM Object references and lifetime overview:
+ *
+ * On the export the dma_buf holds a reference to the exporting GEM
+ * object. It takes this reference in handle_to_fd_ioctl, when it
+ * first calls .prime_export and stores the exporting GEM object in
+ * the dma_buf priv. This reference is released when the dma_buf
+ * object goes away in the driver .release function.
+ *
+ * On the import the importing GEM object holds a reference to the
+ * dma_buf (which in turn holds a ref to the exporting GEM object).
+ * It takes that reference in the fd_to_handle ioctl.
+ * It calls dma_buf_get, creates an attachment to it and stores the
+ * attachment in the GEM object. When this attachment is destroyed
+ * when the imported object is destroyed, we remove the attachment
+ * and drop the reference to the dma_buf.
+ *
+ * Thus the chain of references always flows in one direction
+ * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
+ *
+ * Self-importing: if userspace is using PRIME as a replacement for flink
+ * then it will get a fd->handle request for a GEM object that it created.
+ * Drivers should detect this situation and return back the gem object
+ * from the dma-buf private.
+ */
+
+struct drm_prime_member {
+ struct list_head entry;
+ struct dma_buf *dma_buf;
+ uint32_t handle;
+};
+
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+ int *prime_fd)
+{
+ struct drm_gem_object *obj;
+ void *buf;
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
+ mutex_lock(&file_priv->prime.lock);
+ /* re-export the original imported object */
+ if (obj->import_attach) {
+ get_dma_buf(obj->import_attach->dmabuf);
+ *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
+ drm_gem_object_unreference_unlocked(obj);
+ mutex_unlock(&file_priv->prime.lock);
+ return 0;
+ }
+
+ if (obj->export_dma_buf) {
+ get_dma_buf(obj->export_dma_buf);
+ *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
+ drm_gem_object_unreference_unlocked(obj);
+ } else {
+ buf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (IS_ERR(buf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ drm_gem_object_unreference_unlocked(obj);
+ mutex_unlock(&file_priv->prime.lock);
+ return PTR_ERR(buf);
+ }
+ obj->export_dma_buf = buf;
+ *prime_fd = dma_buf_fd(buf, flags);
+ }
+ mutex_unlock(&file_priv->prime.lock);
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd, uint32_t *handle)
+{
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ int ret;
+
+ dma_buf = dma_buf_get(prime_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ mutex_lock(&file_priv->prime.lock);
+
+ ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
+ dma_buf, handle);
+ if (!ret) {
+ ret = 0;
+ goto out_put;
+ }
+
+ /* never seen this one, need to import */
+ obj = dev->driver->gem_prime_import(dev, dma_buf);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto out_put;
+ }
+
+ ret = drm_gem_handle_create(file_priv, obj, handle);
+ drm_gem_object_unreference_unlocked(obj);
+ if (ret)
+ goto out_put;
+
+ ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+ dma_buf, *handle);
+ if (ret)
+ goto fail;
+
+ mutex_unlock(&file_priv->prime.lock);
+ return 0;
+
+fail:
+ /* hmm, if driver attached, we are relying on the free-object path
+ * to detach.. which seems ok..
+ */
+ drm_gem_object_handle_unreference_unlocked(obj);
+out_put:
+ dma_buf_put(dma_buf);
+ mutex_unlock(&file_priv->prime.lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
+
+int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_prime_handle *args = data;
+ uint32_t flags;
+
+ if (!drm_core_check_feature(dev, DRIVER_PRIME))
+ return -EINVAL;
+
+ if (!dev->driver->prime_handle_to_fd)
+ return -ENOSYS;
+
+ /* check flags are valid */
+ if (args->flags & ~DRM_CLOEXEC)
+ return -EINVAL;
+
+ /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
+ flags = args->flags & DRM_CLOEXEC;
+
+ return dev->driver->prime_handle_to_fd(dev, file_priv,
+ args->handle, flags, &args->fd);
+}
+
+int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_prime_handle *args = data;
+
+ if (!drm_core_check_feature(dev, DRIVER_PRIME))
+ return -EINVAL;
+
+ if (!dev->driver->prime_fd_to_handle)
+ return -ENOSYS;
+
+ return dev->driver->prime_fd_to_handle(dev, file_priv,
+ args->fd, &args->handle);
+}
+
+/*
+ * drm_prime_pages_to_sg
+ *
+ * this helper creates an sg table object from a set of pages
+ * the driver is responsible for mapping the pages into the
+ * importers address space
+ */
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
+{
+ struct sg_table *sg = NULL;
+ struct scatterlist *iter;
+ int i;
+ int ret;
+
+ sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!sg)
+ goto out;
+
+ ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ for_each_sg(sg->sgl, iter, nr_pages, i)
+ sg_set_page(iter, pages[i], PAGE_SIZE, 0);
+
+ return sg;
+out:
+ kfree(sg);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_prime_pages_to_sg);
+
+/* helper function to cleanup a GEM/prime object */
+void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
+{
+ struct dma_buf_attachment *attach;
+ struct dma_buf *dma_buf;
+ attach = obj->import_attach;
+ if (sg)
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+ dma_buf = attach->dmabuf;
+ dma_buf_detach(attach->dmabuf, attach);
+ /* remove the reference */
+ dma_buf_put(dma_buf);
+}
+EXPORT_SYMBOL(drm_prime_gem_destroy);
+
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+ INIT_LIST_HEAD(&prime_fpriv->head);
+ mutex_init(&prime_fpriv->lock);
+}
+EXPORT_SYMBOL(drm_prime_init_file_private);
+
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+ struct drm_prime_member *member, *safe;
+ list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+ list_del(&member->entry);
+ kfree(member);
+ }
+}
+EXPORT_SYMBOL(drm_prime_destroy_file_private);
+
+int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+{
+ struct drm_prime_member *member;
+
+ member = kmalloc(sizeof(*member), GFP_KERNEL);
+ if (!member)
+ return -ENOMEM;
+
+ member->dma_buf = dma_buf;
+ member->handle = handle;
+ list_add(&member->entry, &prime_fpriv->head);
+ return 0;
+}
+EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
+
+int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
+{
+ struct drm_prime_member *member;
+
+ list_for_each_entry(member, &prime_fpriv->head, entry) {
+ if (member->dma_buf == dma_buf) {
+ *handle = member->handle;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
+
+void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
+{
+ struct drm_prime_member *member, *safe;
+
+ mutex_lock(&prime_fpriv->lock);
+ list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+ if (member->dma_buf == dma_buf) {
+ list_del(&member->entry);
+ kfree(member);
+ }
+ }
+ mutex_unlock(&prime_fpriv->lock);
+}
+EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 4c2cb4a8ad9..5675d93b420 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -244,7 +244,6 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
uint64_t value)
{
struct drm_encoder *encoder = connector->encoder;
- struct backlight_device *psb_bd;
if (!strcmp(property->name, "scaling mode") && encoder) {
struct psb_intel_crtc *psb_crtc =
@@ -301,11 +300,15 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
value))
goto set_prop_error;
else {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct backlight_device *psb_bd;
+
psb_bd = mdfld_get_backlight_device();
if (psb_bd) {
psb_bd->props.brightness = value;
mdfld_set_brightness(psb_bd);
}
+#endif
}
}
set_prop_done:
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index fdb7ccefffb..b505b70dba0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1502,14 +1502,6 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
return 0;
}
-static int
-i915_debugfs_common_open(struct inode *inode,
- struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t
i915_wedged_read(struct file *filp,
char __user *ubuf,
@@ -1560,7 +1552,7 @@ i915_wedged_write(struct file *filp,
static const struct file_operations i915_wedged_fops = {
.owner = THIS_MODULE,
- .open = i915_debugfs_common_open,
+ .open = simple_open,
.read = i915_wedged_read,
.write = i915_wedged_write,
.llseek = default_llseek,
@@ -1622,7 +1614,7 @@ i915_max_freq_write(struct file *filp,
static const struct file_operations i915_max_freq_fops = {
.owner = THIS_MODULE,
- .open = i915_debugfs_common_open,
+ .open = simple_open,
.read = i915_max_freq_read,
.write = i915_max_freq_write,
.llseek = default_llseek,
@@ -1693,7 +1685,7 @@ i915_cache_sharing_write(struct file *filp,
static const struct file_operations i915_cache_sharing_fops = {
.owner = THIS_MODULE,
- .open = i915_debugfs_common_open,
+ .open = simple_open,
.read = i915_cache_sharing_read,
.write = i915_cache_sharing_write,
.llseek = default_llseek,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9341eb8ce93..785f67f963e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1183,6 +1183,21 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+ if (i915_enable_ppgtt >= 0)
+ return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return true;
+}
+
static int i915_load_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1197,7 +1212,7 @@ static int i915_load_gem_init(struct drm_device *dev)
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
mutex_lock(&dev->struct_mutex);
- if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) {
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
@@ -1207,8 +1222,10 @@ static int i915_load_gem_init(struct drm_device *dev)
i915_gem_do_init(dev, 0, mappable_size, gtt_size);
ret = i915_gem_init_aliasing_ppgtt(dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
return ret;
+ }
} else {
/* Let GEM Manage all of the aperture.
*
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0694e170a33..dfa55e7478f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -66,7 +66,11 @@ MODULE_PARM_DESC(semaphores,
int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
+ "Enable power-saving render C-state 6. "
+ "Different stages can be selected via bitmask values "
+ "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
+ "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
+ "default: -1 (use per-chip default)");
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -103,8 +107,8 @@ MODULE_PARM_DESC(enable_hangcheck,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-bool i915_enable_ppgtt __read_mostly = 1;
-module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600);
+int i915_enable_ppgtt __read_mostly = -1;
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
@@ -292,6 +296,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
+ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
{0, 0, 0}
};
@@ -467,6 +472,10 @@ static int i915_drm_freeze(struct drm_device *dev)
/* Modeset on resume, not lid events */
dev_priv->modeset_on_lid = 0;
+ console_lock();
+ intel_fbdev_set_suspend(dev, 1);
+ console_unlock();
+
return 0;
}
@@ -529,7 +538,9 @@ static int i915_drm_thaw(struct drm_device *dev)
drm_irq_install(dev);
/* Resume the modeset for every activated CRTC */
+ mutex_lock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
if (IS_IRONLAKE_M(dev))
ironlake_enable_rc6(dev);
@@ -539,6 +550,9 @@ static int i915_drm_thaw(struct drm_device *dev)
dev_priv->modeset_on_lid = 0;
+ console_lock();
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
return error;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c0f19f57200..5fabc6c31fe 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1053,6 +1053,27 @@ struct drm_i915_file_private {
#include "i915_trace.h"
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE (1<<0)
+#define INTEL_RC6p_ENABLE (1<<1)
+#define INTEL_RC6pp_ENABLE (1<<2)
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
@@ -1065,7 +1086,7 @@ extern int i915_vbt_sdvo_panel_type __read_mostly;
extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
-extern bool i915_enable_ppgtt __read_mostly;
+extern int i915_enable_ppgtt __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1f441f5c240..4c65c639f77 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1472,16 +1472,19 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_rendering_seqno = seqno;
- if (obj->fenced_gpu_access) {
- struct drm_i915_fence_reg *reg;
-
- BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
+ if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
obj->last_fenced_ring = ring;
- reg = &dev_priv->fence_regs[obj->fence_reg];
- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ /* Bump MRU to take account of the delayed flush */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_fence_reg *reg;
+
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ }
}
}
@@ -3754,12 +3757,32 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ uint32_t __iomem *pd_addr;
+ uint32_t pd_entry;
int i;
if (!dev_priv->mm.aliasing_ppgtt)
return;
- pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset;
+
+ pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
+ if (dev_priv->mm.gtt->needs_dmar)
+ pt_addr = ppgtt->pt_dma_addr[i];
+ else
+ pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+
+ writel(pd_entry, pd_addr + i);
+ }
+ readl(pd_addr);
+
+ pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 81687af0089..f51a696486c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -498,8 +498,8 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
if (ret)
goto err_unpin;
}
+ obj->pending_fenced_gpu_access = true;
}
- obj->pending_fenced_gpu_access = need_fence;
}
entry->offset = obj->gtt_offset;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2eacd78bb93..a135c61f411 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -65,9 +65,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt;
- uint32_t pd_entry;
unsigned first_pd_entry_in_global_pt;
- uint32_t __iomem *pd_addr;
int i;
int ret = -ENOMEM;
@@ -100,7 +98,6 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
goto err_pt_alloc;
}
- pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
if (dev_priv->mm.gtt->needs_dmar) {
@@ -117,13 +114,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
ppgtt->pt_dma_addr[i] = pt_addr;
} else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
- pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
- pd_entry |= GEN6_PDE_VALID;
-
- writel(pd_entry, pd_addr + i);
}
- readl(pd_addr);
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3886cf051ba..2abf4eb9403 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2385,6 +2385,7 @@
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
#define I965_PIPECONF_ACTIVE (1<<30)
+#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
#define PIPECONF_PIPE_LOCKED (1<<25)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 8168d8f8a63..b48fc2a8410 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -24,6 +24,7 @@
* Eric Anholt <eric@anholt.net>
*
*/
+#include <linux/dmi.h>
#include <drm/drm_dp_helper.h>
#include "drmP.h"
#include "drm.h"
@@ -621,6 +622,26 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->edp.bpp = 18;
}
+static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+ "VBIOS ROM for %s\n",
+ id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_no_opregion_vbt[] = {
+ {
+ .callback = intel_no_opregion_vbt_callback,
+ .ident = "ThinkCentre A57",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
+ },
+ },
+ { }
+};
+
/**
* intel_parse_bios - find VBT and initialize settings from the BIOS
* @dev: DRM device
@@ -641,7 +662,7 @@ intel_parse_bios(struct drm_device *dev)
init_vbt_defaults(dev_priv);
/* XXX Should this validation be moved to intel_opregion.c? */
- if (dev_priv->opregion.vbt) {
+ if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
struct vbt_header *vbt = dev_priv->opregion.vbt;
if (memcmp(vbt->signature, "$VBT", 4) == 0) {
DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d514719f65e..91b35fd1db8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5539,7 +5539,8 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
DRM_DEBUG_KMS("Using SSC on panel\n");
temp |= DREF_SSC1_ENABLE;
- }
+ } else
+ temp &= ~DREF_SSC1_ENABLE;
/* Get SSC going before enabling the outputs */
I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -7580,6 +7581,12 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg, val;
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ for_each_pipe(pipe) {
+ reg = PIPECONF(pipe);
+ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ }
+
if (HAS_PCH_SPLIT(dev))
return;
@@ -8215,7 +8222,7 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
-static bool intel_enable_rc6(struct drm_device *dev)
+static int intel_enable_rc6(struct drm_device *dev)
{
/*
* Respect the kernel parameter if it is set
@@ -8233,11 +8240,11 @@ static bool intel_enable_rc6(struct drm_device *dev)
* Disable rc6 on Sandybridge
*/
if (INTEL_INFO(dev)->gen == 6) {
- DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
- return 0;
+ DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+ return INTEL_RC6_ENABLE;
}
- DRM_DEBUG_DRIVER("RC6 enabled\n");
- return 1;
+ DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+ return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
void gen6_enable_rps(struct drm_i915_private *dev_priv)
@@ -8247,6 +8254,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
u32 pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
int cur_freq, min_freq, max_freq;
+ int rc6_mode;
int i;
/* Here begins a magic sequence of register writes to enable
@@ -8284,9 +8292,20 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- if (intel_enable_rc6(dev_priv->dev))
- rc6_mask = GEN6_RC_CTL_RC6_ENABLE |
- ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0);
+ rc6_mode = intel_enable_rc6(dev_priv->dev);
+ if (rc6_mode & INTEL_RC6_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9cec6c3937f..5a14149b379 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -382,7 +382,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
-
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 2d876697838..19ecd78b8a2 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -254,6 +254,16 @@ void intel_fbdev_fini(struct drm_device *dev)
kfree(dev_priv->fbdev);
dev_priv->fbdev = NULL;
}
+
+void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
+}
+
MODULE_LICENSE("GPL and additional rights");
void intel_fb_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c5c0973af8a..95db2e98822 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -755,6 +755,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI Wind Box DC500",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index fc66af6a944..e25581a9f60 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -626,7 +626,7 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring)
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */
- if (IS_GEN7(dev))
+ if (IS_GEN6(dev) || IS_GEN7(dev))
intel_ring_get_active_head(ring);
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7aa0450399a..a464771a724 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -411,6 +411,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
old_obj = intel_plane->obj;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
/* Pipe must be running... */
if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ca1639918f5..97a81260485 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -13,6 +13,7 @@ config DRM_NOUVEAU
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
select ACPI_WMI if ACPI
select MXM_WMI if ACPI
+ select POWER_SUPPLY
help
Choose this option for open-source nVidia support.
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 637afe71de5..80963d05b54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -177,14 +177,15 @@ bios_shadow_pci(struct nvbios *bios)
if (!pci_enable_rom(pdev)) {
void __iomem *rom = pci_map_rom(pdev, &length);
- if (rom) {
+ if (rom && length) {
bios->data = kmalloc(length, GFP_KERNEL);
if (bios->data) {
memcpy_fromio(bios->data, rom, length);
bios->length = length;
}
- pci_unmap_rom(pdev, rom);
}
+ if (rom)
+ pci_unmap_rom(pdev, rom);
pci_disable_rom(pdev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 44e6416d4a3..846afb0bfef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -436,11 +436,11 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
}
if (dev_priv->card_type < NV_C0) {
- init->subchan[0].handle = NvSw;
- init->subchan[0].grclass = NV_SW;
- init->nr_subchan = 1;
- } else {
- init->nr_subchan = 0;
+ init->subchan[0].handle = 0x00000000;
+ init->subchan[0].grclass = 0x0000;
+ init->subchan[1].handle = NvSw;
+ init->subchan[1].grclass = NV_SW;
+ init->nr_subchan = 2;
}
/* Named memory object area */
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8f510fd956b..fa860358add 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -654,10 +654,13 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
if (nv_connector->edid && connector->display_info.bpc)
return;
- /* if not, we're out of options unless we're LVDS, default to 6bpc */
- connector->display_info.bpc = 6;
- if (nv_encoder->dcb->type != OUTPUT_LVDS)
+ /* if not, we're out of options unless we're LVDS, default to 8bpc */
+ if (nv_encoder->dcb->type != OUTPUT_LVDS) {
+ connector->display_info.bpc = 8;
return;
+ }
+
+ connector->display_info.bpc = 6;
/* LVDS: panel straps */
if (bios->fp_no_ddc) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index bcf0fd9e313..23d4edf992b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -48,8 +48,8 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
/* Hardcoded object assignments to subchannels (subchannel id). */
enum {
- NvSubSw = 0,
- NvSubM2MF = 1,
+ NvSubM2MF = 0,
+ NvSubSw = 1,
NvSub2D = 2,
NvSubCtxSurf2D = 2,
NvSubGdiRect = 3,
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 8f4f914d9ea..e2be95af2e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -315,8 +315,8 @@ nouveau_i2c_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
struct nouveau_i2c_chan *port;
+ u8 version = 0x00, entries, recordlen;
u8 *i2c, *entry, legacy[2][4] = {};
- u8 version, entries, recordlen;
int ret, i;
INIT_LIST_HEAD(&dev_priv->i2c_ports);
@@ -346,12 +346,12 @@ nouveau_i2c_init(struct drm_device *dev)
if (i2c[7]) legacy[1][1] = i2c[7];
}
- if (i2c && version >= 0x30) {
+ if (version >= 0x30) {
entry = i2c[1] + i2c;
entries = i2c[2];
recordlen = i2c[3];
} else
- if (i2c) {
+ if (version) {
entry = i2c;
entries = 16;
recordlen = 4;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a3ae91fa814..c2a8511e855 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -642,7 +642,7 @@ nouveau_card_channel_init(struct drm_device *dev)
OUT_RING (chan, chan->vram_handle);
OUT_RING (chan, chan->gart_handle);
} else
- if (dev_priv->card_type <= NV_C0) {
+ if (dev_priv->card_type <= NV_D0) {
ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
if (ret)
goto error;
@@ -852,7 +852,7 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_pm;
- if (!dev_priv->noaccel) {
+ if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
ret = nouveau_card_channel_init(dev);
if (ret)
goto out_fence;
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d1bd239cd9e..5ce9bf51a8d 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1306,8 +1306,11 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
int atom_asic_init(struct atom_context *ctx)
{
+ struct radeon_device *rdev = ctx->card->dev->dev_private;
int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
uint32_t ps[16];
+ int ret;
+
memset(ps, 0, 64);
ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
@@ -1317,7 +1320,17 @@ int atom_asic_init(struct atom_context *ctx)
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
- return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+ ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+ if (ret)
+ return ret;
+
+ memset(ps, 0, 64);
+
+ if (rdev->family < CHIP_R600) {
+ if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
+ atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
+ }
+ return ret;
}
void atom_destroy(struct atom_context *ctx)
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 93cfe2086ba..25fea631dad 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -44,6 +44,7 @@
#define ATOM_CMD_SETSCLK 0x0A
#define ATOM_CMD_SETMCLK 0x0B
#define ATOM_CMD_SETPCLK 0x0C
+#define ATOM_CMD_SPDFANCNTL 0x39
#define ATOM_DATA_FWI_PTR 0xC
#define ATOM_DATA_IIO_PTR 0x32
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 083b3eada00..b5ff1f7b6f7 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -588,8 +588,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
- if (connector && connector->display_info.bpc)
- bpc = connector->display_info.bpc;
+ /* if (connector && connector->display_info.bpc)
+ bpc = connector->display_info.bpc; */
encoder_mode = atombios_get_encoder_mode(encoder);
is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@ -965,7 +965,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
- bpc = connector->display_info.bpc;
+
+ /* if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc; */
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP_MST:
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 6c62be22680..c57d85664e7 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -405,10 +405,13 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
/* get bpc from the EDID */
static int convert_bpc_to_bpp(int bpc)
{
+#if 0
if (bpc == 0)
return 24;
else
return bpc * 3;
+#endif
+ return 24;
}
/* get the max pix clock supported by the link rate and lane num */
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 468b874336f..e607c4d7dd9 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -541,7 +541,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
hpd_id = radeon_connector->hpd.hpd;
- bpc = connector->display_info.bpc;
+ /* bpc = connector->display_info.bpc; */
}
/* no dig encoder assigned */
@@ -1159,7 +1159,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- bpc = connector->display_info.bpc;
+ /* bpc = connector->display_info.bpc; */
}
memset(&args, 0, sizeof(args));
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index a58b37a2e65..70089d32b80 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -80,6 +80,9 @@ struct evergreen_cs_track {
bool cb_dirty;
bool db_dirty;
bool streamout_dirty;
+ u32 htile_offset;
+ u32 htile_surface;
+ struct radeon_bo *htile_bo;
};
static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
@@ -144,6 +147,9 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
track->db_s_read_bo = NULL;
track->db_s_write_bo = NULL;
track->db_dirty = true;
+ track->htile_bo = NULL;
+ track->htile_offset = 0xFFFFFFFF;
+ track->htile_surface = 0;
for (i = 0; i < 4; i++) {
track->vgt_strmout_size[i] = 0;
@@ -444,6 +450,62 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
return 0;
}
+static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
+ unsigned nbx, unsigned nby)
+{
+ struct evergreen_cs_track *track = p->track;
+ unsigned long size;
+
+ if (track->htile_bo == NULL) {
+ dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+ __func__, __LINE__, track->db_z_info);
+ return -EINVAL;
+ }
+
+ if (G_028ABC_LINEAR(track->htile_surface)) {
+ /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
+ nbx = round_up(nbx, 16 * 8);
+ /* height is npipes htiles aligned == npipes * 8 pixel aligned */
+ nby = round_up(nby, track->npipes * 8);
+ } else {
+ switch (track->npipes) {
+ case 8:
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 64 * 8);
+ break;
+ case 4:
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 2:
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 1:
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+ __func__, __LINE__, track->npipes);
+ return -EINVAL;
+ }
+ }
+ /* compute number of htile */
+ nbx = nbx / 8;
+ nby = nby / 8;
+ size = nbx * nby * 4;
+ size += track->htile_offset;
+
+ if (size > radeon_bo_size(track->htile_bo)) {
+ dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+ __func__, __LINE__, radeon_bo_size(track->htile_bo),
+ size, nbx, nby);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
{
struct evergreen_cs_track *track = p->track;
@@ -530,6 +592,14 @@ static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
return -EINVAL;
}
+ /* hyperz */
+ if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+ r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+ if (r) {
+ return r;
+ }
+ }
+
return 0;
}
@@ -617,6 +687,14 @@ static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
return -EINVAL;
}
+ /* hyperz */
+ if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+ r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+ if (r) {
+ return r;
+ }
+ }
+
return 0;
}
@@ -850,7 +928,7 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
return r;
}
/* Check depth buffer */
- if (G_028800_Z_WRITE_ENABLE(track->db_depth_control)) {
+ if (G_028800_Z_ENABLE(track->db_depth_control)) {
r = evergreen_cs_track_validate_depth(p);
if (r)
return r;
@@ -1616,6 +1694,23 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_bo[tmp] = reloc->robj;
track->cb_dirty = true;
break;
+ case DB_HTILE_DATA_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->htile_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->htile_bo = reloc->robj;
+ track->db_dirty = true;
+ break;
+ case DB_HTILE_SURFACE:
+ /* 8x8 only */
+ track->htile_surface = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
+ break;
case CB_IMMED0_BASE:
case CB_IMMED1_BASE:
case CB_IMMED2_BASE:
@@ -1628,7 +1723,6 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_IMMED9_BASE:
case CB_IMMED10_BASE:
case CB_IMMED11_BASE:
- case DB_HTILE_DATA_BASE:
case SQ_PGM_START_FS:
case SQ_PGM_START_ES:
case SQ_PGM_START_VS:
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index eb5708c7159..b4eefc355f1 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -991,6 +991,14 @@
#define G_028008_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
#define C_028008_SLICE_MAX 0xFF001FFF
#define DB_HTILE_DATA_BASE 0x28014
+#define DB_HTILE_SURFACE 0x28abc
+#define S_028ABC_HTILE_WIDTH(x) (((x) & 0x1) << 0)
+#define G_028ABC_HTILE_WIDTH(x) (((x) >> 0) & 0x1)
+#define C_028ABC_HTILE_WIDTH 0xFFFFFFFE
+#define S_028ABC_HTILE_HEIGHT(x) (((x) & 0x1) << 1)
+#define G_028ABC_HTILE_HEIGHT(x) (((x) >> 1) & 0x1)
+#define C_028ABC_HTILE_HEIGHT 0xFFFFFFFD
+#define G_028ABC_LINEAR(x) (((x) >> 2) & 0x1)
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0ec3f205f9c..b8e12af304a 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -78,6 +78,9 @@ struct r600_cs_track {
bool cb_dirty;
bool db_dirty;
bool streamout_dirty;
+ struct radeon_bo *htile_bo;
+ u64 htile_offset;
+ u32 htile_surface;
};
#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
@@ -321,6 +324,9 @@ static void r600_cs_track_init(struct r600_cs_track *track)
track->db_depth_size_idx = 0;
track->db_depth_control = 0xFFFFFFFF;
track->db_dirty = true;
+ track->htile_bo = NULL;
+ track->htile_offset = 0xFFFFFFFF;
+ track->htile_surface = 0;
for (i = 0; i < 4; i++) {
track->vgt_strmout_size[i] = 0;
@@ -455,12 +461,256 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
return 0;
}
+static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
+{
+ struct r600_cs_track *track = p->track;
+ u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
+ u32 height_align, pitch_align, depth_align;
+ u32 pitch = 8192;
+ u32 height = 8192;
+ u64 base_offset, base_align;
+ struct array_mode_checker array_check;
+ int array_mode;
+ volatile u32 *ib = p->ib->ptr;
+
+
+ if (track->db_bo == NULL) {
+ dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ return -EINVAL;
+ }
+ switch (G_028010_FORMAT(track->db_depth_info)) {
+ case V_028010_DEPTH_16:
+ bpe = 2;
+ break;
+ case V_028010_DEPTH_X8_24:
+ case V_028010_DEPTH_8_24:
+ case V_028010_DEPTH_X8_24_FLOAT:
+ case V_028010_DEPTH_8_24_FLOAT:
+ case V_028010_DEPTH_32_FLOAT:
+ bpe = 4;
+ break;
+ case V_028010_DEPTH_X24_8_32_FLOAT:
+ bpe = 8;
+ break;
+ default:
+ dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+ return -EINVAL;
+ }
+ if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+ if (!track->db_depth_size_idx) {
+ dev_warn(p->dev, "z/stencil buffer size not set\n");
+ return -EINVAL;
+ }
+ tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+ tmp = (tmp / bpe) >> 6;
+ if (!tmp) {
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+ track->db_depth_size, bpe, track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
+ } else {
+ size = radeon_bo_size(track->db_bo);
+ /* pitch in pixels */
+ pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
+ slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ slice_tile_max *= 64;
+ height = slice_tile_max / pitch;
+ if (height > 8192)
+ height = 8192;
+ base_offset = track->db_bo_mc + track->db_offset;
+ array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
+ array_check.array_mode = array_mode;
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = track->nsamples;
+ array_check.blocksize = bpe;
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+ switch (array_mode) {
+ case V_028010_ARRAY_1D_TILED_THIN1:
+ /* don't break userspace */
+ height &= ~0x7;
+ break;
+ case V_028010_ARRAY_2D_TILED_THIN1:
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, height_align)) {
+ dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, height, height_align, array_mode);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
+ base_offset, base_align, array_mode);
+ return -EINVAL;
+ }
+
+ ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+ tmp = ntiles * bpe * 64 * nviews;
+ if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+ dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+ array_mode,
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ }
+
+ /* hyperz */
+ if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+ unsigned long size;
+ unsigned nbx, nby;
+
+ if (track->htile_bo == NULL) {
+ dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+ __func__, __LINE__, track->db_depth_info);
+ return -EINVAL;
+ }
+ if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+ dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
+ __func__, __LINE__, track->db_depth_size);
+ return -EINVAL;
+ }
+
+ nbx = pitch;
+ nby = height;
+ if (G_028D24_LINEAR(track->htile_surface)) {
+ /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
+ nbx = round_up(nbx, 16 * 8);
+ /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
+ nby = round_up(nby, track->npipes * 8);
+ } else {
+ /* htile widht & nby (8 or 4) make 2 bits number */
+ tmp = track->htile_surface & 3;
+ /* align is htile align * 8, htile align vary according to
+ * number of pipe and tile width and nby
+ */
+ switch (track->npipes) {
+ case 8:
+ switch (tmp) {
+ case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 64 * 8);
+ break;
+ case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
+ case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 4:
+ switch (tmp) {
+ case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 64 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
+ case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 2:
+ switch (tmp) {
+ case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 32 * 8);
+ break;
+ case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
+ case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 16 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 1:
+ switch (tmp) {
+ case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+ nbx = round_up(nbx, 32 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
+ case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 16 * 8);
+ nby = round_up(nby, 16 * 8);
+ break;
+ case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
+ nbx = round_up(nbx, 16 * 8);
+ nby = round_up(nby, 8 * 8);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+ __func__, __LINE__, track->npipes);
+ return -EINVAL;
+ }
+ }
+ /* compute number of htile */
+ nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
+ nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
+ size = nbx * nby * 4;
+ size += track->htile_offset;
+
+ if (size > radeon_bo_size(track->htile_bo)) {
+ dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+ __func__, __LINE__, radeon_bo_size(track->htile_bo),
+ size, nbx, nby);
+ return -EINVAL;
+ }
+ }
+
+ track->db_dirty = false;
+ return 0;
+}
+
static int r600_cs_track_check(struct radeon_cs_parser *p)
{
struct r600_cs_track *track = p->track;
u32 tmp;
int r, i;
- volatile u32 *ib = p->ib->ptr;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
@@ -513,124 +763,14 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
track->cb_dirty = false;
}
- if (track->db_dirty) {
- /* Check depth buffer */
- if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
- G_028800_Z_ENABLE(track->db_depth_control)) {
- u32 nviews, bpe, ntiles, size, slice_tile_max;
- u32 height, height_align, pitch, pitch_align, depth_align;
- u64 base_offset, base_align;
- struct array_mode_checker array_check;
- int array_mode;
-
- if (track->db_bo == NULL) {
- dev_warn(p->dev, "z/stencil with no depth buffer\n");
- return -EINVAL;
- }
- if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
- dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
- return -EINVAL;
- }
- switch (G_028010_FORMAT(track->db_depth_info)) {
- case V_028010_DEPTH_16:
- bpe = 2;
- break;
- case V_028010_DEPTH_X8_24:
- case V_028010_DEPTH_8_24:
- case V_028010_DEPTH_X8_24_FLOAT:
- case V_028010_DEPTH_8_24_FLOAT:
- case V_028010_DEPTH_32_FLOAT:
- bpe = 4;
- break;
- case V_028010_DEPTH_X24_8_32_FLOAT:
- bpe = 8;
- break;
- default:
- dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
- return -EINVAL;
- }
- if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
- if (!track->db_depth_size_idx) {
- dev_warn(p->dev, "z/stencil buffer size not set\n");
- return -EINVAL;
- }
- tmp = radeon_bo_size(track->db_bo) - track->db_offset;
- tmp = (tmp / bpe) >> 6;
- if (!tmp) {
- dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
- track->db_depth_size, bpe, track->db_offset,
- radeon_bo_size(track->db_bo));
- return -EINVAL;
- }
- ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
- } else {
- size = radeon_bo_size(track->db_bo);
- /* pitch in pixels */
- pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
- slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
- slice_tile_max *= 64;
- height = slice_tile_max / pitch;
- if (height > 8192)
- height = 8192;
- base_offset = track->db_bo_mc + track->db_offset;
- array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
- array_check.array_mode = array_mode;
- array_check.group_size = track->group_size;
- array_check.nbanks = track->nbanks;
- array_check.npipes = track->npipes;
- array_check.nsamples = track->nsamples;
- array_check.blocksize = bpe;
- if (r600_get_array_mode_alignment(&array_check,
- &pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
- return -EINVAL;
- }
- switch (array_mode) {
- case V_028010_ARRAY_1D_TILED_THIN1:
- /* don't break userspace */
- height &= ~0x7;
- break;
- case V_028010_ARRAY_2D_TILED_THIN1:
- break;
- default:
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
- return -EINVAL;
- }
-
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, array_mode);
- return -EINVAL;
- }
- if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, height, height_align, array_mode);
- return -EINVAL;
- }
- if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
- base_offset, base_align, array_mode);
- return -EINVAL;
- }
-
- ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
- nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
- tmp = ntiles * bpe * 64 * nviews;
- if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
- dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
- array_mode,
- track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
- radeon_bo_size(track->db_bo));
- return -EINVAL;
- }
- }
- }
- track->db_dirty = false;
+ /* Check depth buffer */
+ if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+ G_028800_Z_ENABLE(track->db_depth_control))) {
+ r = r600_cs_track_validate_db(p);
+ if (r)
+ return r;
}
+
return 0;
}
@@ -1244,6 +1384,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_HTILE_DATA_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->htile_offset = radeon_get_ib_value(p, idx) << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->htile_bo = reloc->robj;
+ track->db_dirty = true;
+ break;
+ case DB_HTILE_SURFACE:
+ track->htile_surface = radeon_get_ib_value(p, idx);
+ track->db_dirty = true;
+ break;
case SQ_PGM_START_FS:
case SQ_PGM_START_ES:
case SQ_PGM_START_VS:
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 3568a2e345f..59f9c993cc3 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -195,6 +195,14 @@
#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
#define DB_DEPTH_BASE 0x2800C
#define DB_HTILE_DATA_BASE 0x28014
+#define DB_HTILE_SURFACE 0x28D24
+#define S_028D24_HTILE_WIDTH(x) (((x) & 0x1) << 0)
+#define G_028D24_HTILE_WIDTH(x) (((x) >> 0) & 0x1)
+#define C_028D24_HTILE_WIDTH 0xFFFFFFFE
+#define S_028D24_HTILE_HEIGHT(x) (((x) & 0x1) << 1)
+#define G_028D24_HTILE_HEIGHT(x) (((x) >> 1) & 0x1)
+#define C_028D24_HTILE_HEIGHT 0xFFFFFFFD
+#define G_028D24_LINEAR(x) (((x) >> 2) & 0x1)
#define DB_WATERMARKS 0x9838
#define DEPTH_FREE(x) ((x) << 0)
#define DEPTH_FLUSH(x) ((x) << 5)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 91541e63d58..df6a4dbd93f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -233,7 +233,18 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
bo->pin_count++;
if (gpu_addr)
*gpu_addr = radeon_bo_gpu_offset(bo);
- WARN_ON_ONCE(max_offset != 0);
+
+ if (max_offset != 0) {
+ u64 domain_start;
+
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ domain_start = bo->rdev->mc.vram_start;
+ else
+ domain_start = bo->rdev->mc.gtt_start;
+ WARN_ON_ONCE(max_offset <
+ (radeon_bo_gpu_offset(bo) - domain_start));
+ }
+
return 0;
}
radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index aea63c41585..0f656b111c1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -509,7 +509,6 @@ cayman 0x9400
0x00028AA8 IA_MULTI_VGT_PARAM
0x00028AB4 VGT_REUSE_OFF
0x00028AB8 VGT_VTX_CNT_EN
-0x00028ABC DB_HTILE_SURFACE
0x00028AC0 DB_SRESULTS_COMPARE_STATE0
0x00028AC4 DB_SRESULTS_COMPARE_STATE1
0x00028AC8 DB_PRELOAD_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 77c37202376..b912a37689b 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -519,7 +519,6 @@ evergreen 0x9400
0x00028AA4 VGT_INSTANCE_STEP_RATE_1
0x00028AB4 VGT_REUSE_OFF
0x00028AB8 VGT_VTX_CNT_EN
-0x00028ABC DB_HTILE_SURFACE
0x00028AC0 DB_SRESULTS_COMPARE_STATE0
0x00028AC4 DB_SRESULTS_COMPARE_STATE1
0x00028AC8 DB_PRELOAD_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 626c24ea0b5..5e659b034d9 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -713,7 +713,6 @@ r600 0x9400
0x0000A710 TD_VS_SAMPLER17_BORDER_RED
0x00009508 TA_CNTL_AUX
0x0002802C DB_DEPTH_CLEAR
-0x00028D24 DB_HTILE_SURFACE
0x00028D34 DB_PREFETCH_LIMIT
0x00028D30 DB_PRELOAD_CONTROL
0x00028D0C DB_RENDER_CONTROL
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 5340c5f3987..53673907a6a 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -47,7 +47,7 @@ static struct vm_operations_struct udl_gem_vm_ops = {
static const struct file_operations udl_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .mmap = drm_gem_mmap,
+ .mmap = udl_drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 1612954a5bc..96820d03a30 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -121,6 +121,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
+int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 852642dc118..92f19ef329b 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -71,6 +71,20 @@ int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
return drm_gem_handle_delete(file, handle);
}
+int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ return ret;
+}
+
int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);