diff options
author | Jens Axboe <axboe@kernel.dk> | 2012-05-01 14:29:55 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-05-01 14:29:55 +0200 |
commit | 0b7877d4eea3f93e3dd941999522bbd8c538cb53 (patch) | |
tree | ade6d4e411b9b9b569c802e3b2179826162c934c /drivers/gpu/drm | |
parent | bd1a68b59c8e3bce45fb76632c64e1e063c3962d (diff) | |
parent | 69964ea4c7b68c9399f7977aa5b9aa6539a6a98a (diff) |
Merge tag 'v3.4-rc5' into for-3.5/core
The core branch is behind driver commits that we want to build
on for 3.5, hence I'm pulling in a later -rc.
Linux 3.4-rc5
Conflicts:
Documentation/feature-removal-schedule.txt
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/gpu/drm')
72 files changed, 966 insertions, 418 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index cc1148837e2..e354bc0b052 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -9,6 +9,7 @@ menuconfig DRM depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU select I2C select I2C_ALGOBIT + select DMA_SHARED_BUFFER help Kernel-level support for the Direct Rendering Infrastructure (DRI) introduced in XFree86 4.0. If you say Y here, you need to select diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index a858532806a..c20da5bda35 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ drm_crtc.o drm_modes.o drm_edid.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ - drm_trace_points.o drm_global.o + drm_trace_points.o drm_global.o drm_prime.o drm-$(CONFIG_COMPAT) += drm_ioc32.o diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 30372f7b2d4..348b367debe 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -1510,8 +1510,8 @@ int drm_freebufs(struct drm_device *dev, void *data, * \param arg pointer to a drm_buf_map structure. * \return zero on success or a negative number on failure. * - * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information - * about each buffer into user space. For PCI buffers, it calls do_mmap() with + * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information + * about each buffer into user space. For PCI buffers, it calls vm_mmap() with * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls * drm_mmap_dma(). */ @@ -1553,18 +1553,14 @@ int drm_mapbufs(struct drm_device *dev, void *data, retcode = -EINVAL; goto done; } - down_write(¤t->mm->mmap_sem); - virtual = do_mmap(file_priv->filp, 0, map->size, + virtual = vm_mmap(file_priv->filp, 0, map->size, PROT_READ | PROT_WRITE, MAP_SHARED, token); - up_write(¤t->mm->mmap_sem); } else { - down_write(¤t->mm->mmap_sem); - virtual = do_mmap(file_priv->filp, 0, dma->byte_count, + virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, PROT_READ | PROT_WRITE, MAP_SHARED, 0); - up_write(¤t->mm->mmap_sem); } if (virtual > -1024UL) { /* Real error */ diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index d3aaeb6ae23..c79870a75c2 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -3335,10 +3335,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, ret = crtc->funcs->page_flip(crtc, fb, e); if (ret) { - spin_lock_irqsave(&dev->event_lock, flags); - file_priv->event_space += sizeof e->event; - spin_unlock_irqrestore(&dev->event_lock, flags); - kfree(e); + if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { + spin_lock_irqsave(&dev->event_lock, flags); + file_priv->event_space += sizeof e->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + kfree(e); + } } out: diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 0b65fbc8a63..6116e3b7539 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -136,6 +136,10 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 7740dd26f00..a0d6e894d97 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -559,9 +559,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, return -EINVAL; /* Need to resize the fb object !!! */ - if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) { + if (var->bits_per_pixel > fb->bits_per_pixel || + var->xres > fb->width || var->yres > fb->height || + var->xres_virtual > fb->width || var->yres_virtual > fb->height) { DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb " - "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel, + "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", + var->xres, var->yres, var->bits_per_pixel, + var->xres_virtual, var->yres_virtual, fb->width, fb->height, fb->bits_per_pixel); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 7348a3dab25..123de28f94e 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -271,6 +271,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp, if (dev->driver->driver_features & DRIVER_GEM) drm_gem_open(dev, priv); + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_prime_init_file_private(&priv->prime); + if (dev->driver->open) { ret = dev->driver->open(dev, priv); if (ret < 0) @@ -504,12 +507,12 @@ int drm_release(struct inode *inode, struct file *filp) drm_events_release(file_priv); - if (dev->driver->driver_features & DRIVER_GEM) - drm_gem_release(dev, file_priv); - if (dev->driver->driver_features & DRIVER_MODESET) drm_fb_release(file_priv); + if (dev->driver->driver_features & DRIVER_GEM) + drm_gem_release(dev, file_priv); + mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { struct drm_ctx_list *pos, *n; @@ -571,6 +574,10 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->postclose) dev->driver->postclose(dev, file_priv); + + if (drm_core_check_feature(dev, DRIVER_PRIME)) + drm_prime_destroy_file_private(&file_priv->prime); + kfree(file_priv); /* ======================================================== diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 0ef358e5324..83114b5e3ce 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -35,6 +35,7 @@ #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/shmem_fs.h> +#include <linux/dma-buf.h> #include "drmP.h" /** @file drm_gem.c @@ -232,6 +233,10 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) idr_remove(&filp->object_idr, handle); spin_unlock(&filp->table_lock); + if (obj->import_attach) + drm_prime_remove_imported_buf_handle(&filp->prime, + obj->import_attach->dmabuf); + if (dev->driver->gem_close_object) dev->driver->gem_close_object(obj, filp); drm_gem_object_handle_unreference_unlocked(obj); @@ -527,6 +532,10 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) struct drm_gem_object *obj = ptr; struct drm_device *dev = obj->dev; + if (obj->import_attach) + drm_prime_remove_imported_buf_handle(&file_priv->prime, + obj->import_attach->dmabuf); + if (dev->driver->gem_close_object) dev->driver->gem_close_object(obj, file_priv); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c new file mode 100644 index 00000000000..1bdf2b54eaf --- /dev/null +++ b/drivers/gpu/drm/drm_prime.c @@ -0,0 +1,304 @@ +/* + * Copyright © 2012 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dave Airlie <airlied@redhat.com> + * Rob Clark <rob.clark@linaro.org> + * + */ + +#include <linux/export.h> +#include <linux/dma-buf.h> +#include "drmP.h" + +/* + * DMA-BUF/GEM Object references and lifetime overview: + * + * On the export the dma_buf holds a reference to the exporting GEM + * object. It takes this reference in handle_to_fd_ioctl, when it + * first calls .prime_export and stores the exporting GEM object in + * the dma_buf priv. This reference is released when the dma_buf + * object goes away in the driver .release function. + * + * On the import the importing GEM object holds a reference to the + * dma_buf (which in turn holds a ref to the exporting GEM object). + * It takes that reference in the fd_to_handle ioctl. + * It calls dma_buf_get, creates an attachment to it and stores the + * attachment in the GEM object. When this attachment is destroyed + * when the imported object is destroyed, we remove the attachment + * and drop the reference to the dma_buf. + * + * Thus the chain of references always flows in one direction + * (avoiding loops): importing_gem -> dmabuf -> exporting_gem + * + * Self-importing: if userspace is using PRIME as a replacement for flink + * then it will get a fd->handle request for a GEM object that it created. + * Drivers should detect this situation and return back the gem object + * from the dma-buf private. + */ + +struct drm_prime_member { + struct list_head entry; + struct dma_buf *dma_buf; + uint32_t handle; +}; + +int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, uint32_t flags, + int *prime_fd) +{ + struct drm_gem_object *obj; + void *buf; + + obj = drm_gem_object_lookup(dev, file_priv, handle); + if (!obj) + return -ENOENT; + + mutex_lock(&file_priv->prime.lock); + /* re-export the original imported object */ + if (obj->import_attach) { + get_dma_buf(obj->import_attach->dmabuf); + *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags); + drm_gem_object_unreference_unlocked(obj); + mutex_unlock(&file_priv->prime.lock); + return 0; + } + + if (obj->export_dma_buf) { + get_dma_buf(obj->export_dma_buf); + *prime_fd = dma_buf_fd(obj->export_dma_buf, flags); + drm_gem_object_unreference_unlocked(obj); + } else { + buf = dev->driver->gem_prime_export(dev, obj, flags); + if (IS_ERR(buf)) { + /* normally the created dma-buf takes ownership of the ref, + * but if that fails then drop the ref + */ + drm_gem_object_unreference_unlocked(obj); + mutex_unlock(&file_priv->prime.lock); + return PTR_ERR(buf); + } + obj->export_dma_buf = buf; + *prime_fd = dma_buf_fd(buf, flags); + } + mutex_unlock(&file_priv->prime.lock); + return 0; +} +EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); + +int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, uint32_t *handle) +{ + struct dma_buf *dma_buf; + struct drm_gem_object *obj; + int ret; + + dma_buf = dma_buf_get(prime_fd); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + + mutex_lock(&file_priv->prime.lock); + + ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime, + dma_buf, handle); + if (!ret) { + ret = 0; + goto out_put; + } + + /* never seen this one, need to import */ + obj = dev->driver->gem_prime_import(dev, dma_buf); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); + goto out_put; + } + + ret = drm_gem_handle_create(file_priv, obj, handle); + drm_gem_object_unreference_unlocked(obj); + if (ret) + goto out_put; + + ret = drm_prime_add_imported_buf_handle(&file_priv->prime, + dma_buf, *handle); + if (ret) + goto fail; + + mutex_unlock(&file_priv->prime.lock); + return 0; + +fail: + /* hmm, if driver attached, we are relying on the free-object path + * to detach.. which seems ok.. + */ + drm_gem_object_handle_unreference_unlocked(obj); +out_put: + dma_buf_put(dma_buf); + mutex_unlock(&file_priv->prime.lock); + return ret; +} +EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); + +int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_prime_handle *args = data; + uint32_t flags; + + if (!drm_core_check_feature(dev, DRIVER_PRIME)) + return -EINVAL; + + if (!dev->driver->prime_handle_to_fd) + return -ENOSYS; + + /* check flags are valid */ + if (args->flags & ~DRM_CLOEXEC) + return -EINVAL; + + /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */ + flags = args->flags & DRM_CLOEXEC; + + return dev->driver->prime_handle_to_fd(dev, file_priv, + args->handle, flags, &args->fd); +} + +int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_prime_handle *args = data; + + if (!drm_core_check_feature(dev, DRIVER_PRIME)) + return -EINVAL; + + if (!dev->driver->prime_fd_to_handle) + return -ENOSYS; + + return dev->driver->prime_fd_to_handle(dev, file_priv, + args->fd, &args->handle); +} + +/* + * drm_prime_pages_to_sg + * + * this helper creates an sg table object from a set of pages + * the driver is responsible for mapping the pages into the + * importers address space + */ +struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) +{ + struct sg_table *sg = NULL; + struct scatterlist *iter; + int i; + int ret; + + sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!sg) + goto out; + + ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL); + if (ret) + goto out; + + for_each_sg(sg->sgl, iter, nr_pages, i) + sg_set_page(iter, pages[i], PAGE_SIZE, 0); + + return sg; +out: + kfree(sg); + return NULL; +} +EXPORT_SYMBOL(drm_prime_pages_to_sg); + +/* helper function to cleanup a GEM/prime object */ +void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) +{ + struct dma_buf_attachment *attach; + struct dma_buf *dma_buf; + attach = obj->import_attach; + if (sg) + dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); + dma_buf = attach->dmabuf; + dma_buf_detach(attach->dmabuf, attach); + /* remove the reference */ + dma_buf_put(dma_buf); +} +EXPORT_SYMBOL(drm_prime_gem_destroy); + +void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) +{ + INIT_LIST_HEAD(&prime_fpriv->head); + mutex_init(&prime_fpriv->lock); +} +EXPORT_SYMBOL(drm_prime_init_file_private); + +void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) +{ + struct drm_prime_member *member, *safe; + list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { + list_del(&member->entry); + kfree(member); + } +} +EXPORT_SYMBOL(drm_prime_destroy_file_private); + +int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) +{ + struct drm_prime_member *member; + + member = kmalloc(sizeof(*member), GFP_KERNEL); + if (!member) + return -ENOMEM; + + member->dma_buf = dma_buf; + member->handle = handle; + list_add(&member->entry, &prime_fpriv->head); + return 0; +} +EXPORT_SYMBOL(drm_prime_add_imported_buf_handle); + +int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) +{ + struct drm_prime_member *member; + + list_for_each_entry(member, &prime_fpriv->head, entry) { + if (member->dma_buf == dma_buf) { + *handle = member->handle; + return 0; + } + } + return -ENOENT; +} +EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle); + +void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) +{ + struct drm_prime_member *member, *safe; + + mutex_lock(&prime_fpriv->lock); + list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { + if (member->dma_buf == dma_buf) { + list_del(&member->entry); + kfree(member); + } + } + mutex_unlock(&prime_fpriv->lock); +} +EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle); diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c index c8c83dad2ce..37c9a523dd1 100644 --- a/drivers/gpu/drm/drm_usb.c +++ b/drivers/gpu/drm/drm_usb.c @@ -1,6 +1,6 @@ #include "drmP.h" #include <linux/usb.h> -#include <linux/export.h> +#include <linux/module.h> int drm_get_usb_dev(struct usb_interface *interface, const struct usb_device_id *id, @@ -114,3 +114,7 @@ void drm_usb_exit(struct drm_driver *driver, usb_deregister(udriver); } EXPORT_SYMBOL(drm_usb_exit); + +MODULE_AUTHOR("David Airlie"); +MODULE_DESCRIPTION("USB DRM support"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c index 4a3a5f72ed4..de8d2090bce 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c @@ -34,14 +34,14 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, unsigned int flags, struct exynos_drm_gem_buf *buf) { - dma_addr_t start_addr, end_addr; + dma_addr_t start_addr; unsigned int npages, page_size, i = 0; struct scatterlist *sgl; int ret = 0; DRM_DEBUG_KMS("%s\n", __FILE__); - if (flags & EXYNOS_BO_NONCONTIG) { + if (IS_NONCONTIG_BUFFER(flags)) { DRM_DEBUG_KMS("not support allocation type.\n"); return -EINVAL; } @@ -52,13 +52,13 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, } if (buf->size >= SZ_1M) { - npages = (buf->size >> SECTION_SHIFT) + 1; + npages = buf->size >> SECTION_SHIFT; page_size = SECTION_SIZE; } else if (buf->size >= SZ_64K) { - npages = (buf->size >> 16) + 1; + npages = buf->size >> 16; page_size = SZ_64K; } else { - npages = (buf->size >> PAGE_SHIFT) + 1; + npages = buf->size >> PAGE_SHIFT; page_size = PAGE_SIZE; } @@ -76,26 +76,13 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, return -ENOMEM; } - buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, - &buf->dma_addr, GFP_KERNEL); - if (!buf->kvaddr) { - DRM_ERROR("failed to allocate buffer.\n"); - ret = -ENOMEM; - goto err1; - } - - start_addr = buf->dma_addr; - end_addr = buf->dma_addr + buf->size; - - buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); - if (!buf->pages) { - DRM_ERROR("failed to allocate pages.\n"); - ret = -ENOMEM; - goto err2; - } - - start_addr = buf->dma_addr; - end_addr = buf->dma_addr + buf->size; + buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, + &buf->dma_addr, GFP_KERNEL); + if (!buf->kvaddr) { + DRM_ERROR("failed to allocate buffer.\n"); + ret = -ENOMEM; + goto err1; + } buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); if (!buf->pages) { @@ -105,23 +92,17 @@ static int lowlevel_buffer_allocate(struct drm_device *dev, } sgl = buf->sgt->sgl; + start_addr = buf->dma_addr; while (i < npages) { buf->pages[i] = phys_to_page(start_addr); sg_set_page(sgl, buf->pages[i], page_size, 0); sg_dma_address(sgl) = start_addr; start_addr += page_size; - if (end_addr - start_addr < page_size) - break; sgl = sg_next(sgl); i++; } - buf->pages[i] = phys_to_page(start_addr); - - sgl = sg_next(sgl); - sg_set_page(sgl, buf->pages[i+1], end_addr - start_addr, 0); - DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", (unsigned long)buf->kvaddr, (unsigned long)buf->dma_addr, @@ -150,7 +131,7 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev, * non-continuous memory would be released by exynos * gem framework. */ - if (flags & EXYNOS_BO_NONCONTIG) { + if (IS_NONCONTIG_BUFFER(flags)) { DRM_DEBUG_KMS("not support allocation type.\n"); return; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index 411832e8e17..eaf630dc5db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -54,16 +54,18 @@ static int exynos_drm_subdrv_probe(struct drm_device *dev, * * P.S. note that this driver is considered for modularization. */ - ret = subdrv->probe(dev, subdrv->manager.dev); + ret = subdrv->probe(dev, subdrv->dev); if (ret) return ret; } - if (subdrv->is_local) + if (!subdrv->manager) return 0; + subdrv->manager->dev = subdrv->dev; + /* create and initialize a encoder for this sub driver. */ - encoder = exynos_drm_encoder_create(dev, &subdrv->manager, + encoder = exynos_drm_encoder_create(dev, subdrv->manager, (1 << MAX_CRTC) - 1); if (!encoder) { DRM_ERROR("failed to create encoder\n"); @@ -186,7 +188,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { if (subdrv->open) { - ret = subdrv->open(dev, subdrv->manager.dev, file); + ret = subdrv->open(dev, subdrv->dev, file); if (ret) goto err; } @@ -197,7 +199,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) err: list_for_each_entry_reverse(subdrv, &subdrv->list, list) { if (subdrv->close) - subdrv->close(dev, subdrv->manager.dev, file); + subdrv->close(dev, subdrv->dev, file); } return ret; } @@ -209,7 +211,7 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { if (subdrv->close) - subdrv->close(dev, subdrv->manager.dev, file); + subdrv->close(dev, subdrv->dev, file); } } EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index fbd0a232c93..1d814175cd4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -225,24 +225,25 @@ struct exynos_drm_private { * Exynos drm sub driver structure. * * @list: sub driver has its own list object to register to exynos drm driver. + * @dev: pointer to device object for subdrv device driver. * @drm_dev: pointer to drm_device and this pointer would be set * when sub driver calls exynos_drm_subdrv_register(). - * @is_local: appear encoder and connector disrelated device. + * @manager: subdrv has its own manager to control a hardware appropriately + * and we can access a hardware drawing on this manager. * @probe: this callback would be called by exynos drm driver after * subdrv is registered to it. * @remove: this callback is used to release resources created * by probe callback. * @open: this would be called with drm device file open. * @close: this would be called with drm device file close. - * @manager: subdrv has its own manager to control a hardware appropriately - * and we can access a hardware drawing on this manager. * @encoder: encoder object owned by this sub driver. * @connector: connector object owned by this sub driver. */ struct exynos_drm_subdrv { struct list_head list; + struct device *dev; struct drm_device *drm_dev; - bool is_local; + struct exynos_drm_manager *manager; int (*probe)(struct drm_device *drm_dev, struct device *dev); void (*remove)(struct drm_device *dev); @@ -251,7 +252,6 @@ struct exynos_drm_subdrv { void (*close)(struct drm_device *drm_dev, struct device *dev, struct drm_file *file); - struct exynos_drm_manager manager; struct drm_encoder *encoder; struct drm_connector *connector; }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index ecb6db22970..29fdbfeb43c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -172,7 +172,7 @@ static void fimd_dpms(struct device *subdrv_dev, int mode) static void fimd_apply(struct device *subdrv_dev) { struct fimd_context *ctx = get_fimd_context(subdrv_dev); - struct exynos_drm_manager *mgr = &ctx->subdrv.manager; + struct exynos_drm_manager *mgr = ctx->subdrv.manager; struct exynos_drm_manager_ops *mgr_ops = mgr->ops; struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops; struct fimd_win_data *win_data; @@ -577,6 +577,13 @@ static struct exynos_drm_overlay_ops fimd_overlay_ops = { .disable = fimd_win_disable, }; +static struct exynos_drm_manager fimd_manager = { + .pipe = -1, + .ops = &fimd_manager_ops, + .overlay_ops = &fimd_overlay_ops, + .display_ops = &fimd_display_ops, +}; + static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) { struct exynos_drm_private *dev_priv = drm_dev->dev_private; @@ -628,7 +635,7 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id) struct fimd_context *ctx = (struct fimd_context *)dev_id; struct exynos_drm_subdrv *subdrv = &ctx->subdrv; struct drm_device *drm_dev = subdrv->drm_dev; - struct exynos_drm_manager *manager = &subdrv->manager; + struct exynos_drm_manager *manager = subdrv->manager; u32 val; val = readl(ctx->regs + VIDINTCON1); @@ -744,7 +751,7 @@ static void fimd_clear_win(struct fimd_context *ctx, int win) static int fimd_power_on(struct fimd_context *ctx, bool enable) { struct exynos_drm_subdrv *subdrv = &ctx->subdrv; - struct device *dev = subdrv->manager.dev; + struct device *dev = subdrv->dev; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -867,13 +874,10 @@ static int __devinit fimd_probe(struct platform_device *pdev) subdrv = &ctx->subdrv; + subdrv->dev = dev; + subdrv->manager = &fimd_manager; subdrv->probe = fimd_subdrv_probe; subdrv->remove = fimd_subdrv_remove; - subdrv->manager.pipe = -1; - subdrv->manager.ops = &fimd_manager_ops; - subdrv->manager.overlay_ops = &fimd_overlay_ops; - subdrv->manager.display_ops = &fimd_display_ops; - subdrv->manager.dev = dev; mutex_init(&ctx->lock); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index fa1aa94a3d8..1dffa8359f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -56,9 +56,28 @@ static unsigned int convert_to_vm_err_msg(int msg) return out_msg; } -static unsigned int mask_gem_flags(unsigned int flags) +static int check_gem_flags(unsigned int flags) { - return flags &= EXYNOS_BO_NONCONTIG; + if (flags & ~(EXYNOS_BO_MASK)) { + DRM_ERROR("invalid flags.\n"); + return -EINVAL; + } + + return 0; +} + +static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) +{ + if (!IS_NONCONTIG_BUFFER(flags)) { + if (size >= SZ_1M) + return roundup(size, SECTION_SIZE); + else if (size >= SZ_64K) + return roundup(size, SZ_64K); + else + goto out; + } +out: + return roundup(size, PAGE_SIZE); } static struct page **exynos_gem_get_pages(struct drm_gem_object *obj, @@ -130,22 +149,12 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, unsigned long pfn; if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { - unsigned long usize = buf->size; - if (!buf->pages) return -EINTR; - while (usize > 0) { - pfn = page_to_pfn(buf->pages[page_offset++]); - vm_insert_mixed(vma, f_vaddr, pfn); - f_vaddr += PAGE_SIZE; - usize -= PAGE_SIZE; - } - - return 0; - } - - pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset; + pfn = page_to_pfn(buf->pages[page_offset++]); + } else + pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset; return vm_insert_mixed(vma, f_vaddr, pfn); } @@ -319,10 +328,17 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, struct exynos_drm_gem_buf *buf; int ret; - size = roundup(size, PAGE_SIZE); - DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size); + if (!size) { + DRM_ERROR("invalid size.\n"); + return ERR_PTR(-EINVAL); + } + + size = roundup_gem_size(size, flags); + DRM_DEBUG_KMS("%s\n", __FILE__); - flags = mask_gem_flags(flags); + ret = check_gem_flags(flags); + if (ret) + return ERR_PTR(ret); buf = exynos_drm_init_buf(dev, size); if (!buf) @@ -331,7 +347,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, exynos_gem_obj = exynos_drm_gem_init(dev, size); if (!exynos_gem_obj) { ret = -ENOMEM; - goto err; + goto err_fini_buf; } exynos_gem_obj->buffer = buf; @@ -347,18 +363,19 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base); if (ret < 0) { drm_gem_object_release(&exynos_gem_obj->base); - goto err; + goto err_fini_buf; } } else { ret = exynos_drm_alloc_buf(dev, buf, flags); if (ret < 0) { drm_gem_object_release(&exynos_gem_obj->base); - goto err; + goto err_fini_buf; } } return exynos_gem_obj; -err: + +err_fini_buf: exynos_drm_fini_buf(dev, buf); return ERR_PTR(ret); } @@ -497,6 +514,8 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, if (!buffer->pages) return -EINVAL; + vma->vm_flags |= VM_MIXEDMAP; + do { ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); if (ret) { @@ -554,10 +573,8 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, obj->filp->f_op = &exynos_drm_gem_fops; obj->filp->private_data = obj; - down_write(¤t->mm->mmap_sem); - addr = do_mmap(obj->filp, 0, args->size, + addr = vm_mmap(obj->filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, 0); - up_write(¤t->mm->mmap_sem); drm_gem_object_unreference_unlocked(obj); @@ -685,7 +702,6 @@ int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv, int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_gem_object *obj = vma->vm_private_data; - struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); struct drm_device *dev = obj->dev; unsigned long f_vaddr; pgoff_t page_offset; @@ -697,21 +713,10 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) mutex_lock(&dev->struct_mutex); - /* - * allocate all pages as desired size if user wants to allocate - * physically non-continuous memory. - */ - if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { - ret = exynos_drm_gem_get_pages(obj); - if (ret < 0) - goto err; - } - ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); if (ret < 0) DRM_ERROR("failed to map pages.\n"); -err: mutex_unlock(&dev->struct_mutex); return convert_to_vm_err_msg(ret); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index e40fbad8b70..4ed84203950 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -29,6 +29,8 @@ #define to_exynos_gem_obj(x) container_of(x,\ struct exynos_drm_gem_obj, base) +#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG) + /* * exynos drm gem buffer structure. * diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index 14eb26b0ba1..3424463676e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c @@ -30,9 +30,8 @@ struct drm_hdmi_context, subdrv); /* these callback points shoud be set by specific drivers. */ -static struct exynos_hdmi_display_ops *hdmi_display_ops; -static struct exynos_hdmi_manager_ops *hdmi_manager_ops; -static struct exynos_hdmi_overlay_ops *hdmi_overlay_ops; +static struct exynos_hdmi_ops *hdmi_ops; +static struct exynos_mixer_ops *mixer_ops; struct drm_hdmi_context { struct exynos_drm_subdrv subdrv; @@ -40,31 +39,20 @@ struct drm_hdmi_context { struct exynos_drm_hdmi_context *mixer_ctx; }; -void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops - *display_ops) +void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops) { DRM_DEBUG_KMS("%s\n", __FILE__); - if (display_ops) - hdmi_display_ops = display_ops; + if (ops) + hdmi_ops = ops; } -void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops - *manager_ops) +void exynos_mixer_ops_register(struct exynos_mixer_ops *ops) { DRM_DEBUG_KMS("%s\n", __FILE__); - if (manager_ops) - hdmi_manager_ops = manager_ops; -} - -void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops - *overlay_ops) -{ - DRM_DEBUG_KMS("%s\n", __FILE__); - - if (overlay_ops) - hdmi_overlay_ops = overlay_ops; + if (ops) + mixer_ops = ops; } static bool drm_hdmi_is_connected(struct device *dev) @@ -73,8 +61,8 @@ static bool drm_hdmi_is_connected(struct device *dev) DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_display_ops && hdmi_display_ops->is_connected) - return hdmi_display_ops->is_connected(ctx->hdmi_ctx->ctx); + if (hdmi_ops && hdmi_ops->is_connected) + return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx); return false; } @@ -86,9 +74,9 @@ static int drm_hdmi_get_edid(struct device *dev, DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_display_ops && hdmi_display_ops->get_edid) - return hdmi_display_ops->get_edid(ctx->hdmi_ctx->ctx, - connector, edid, len); + if (hdmi_ops && hdmi_ops->get_edid) + return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid, + len); return 0; } @@ -99,9 +87,8 @@ static int drm_hdmi_check_timing(struct device *dev, void *timing) DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_display_ops && hdmi_display_ops->check_timing) - return hdmi_display_ops->check_timing(ctx->hdmi_ctx->ctx, - timing); + if (hdmi_ops && hdmi_ops->check_timing) + return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing); return 0; } @@ -112,8 +99,8 @@ static int drm_hdmi_power_on(struct device *dev, int mode) DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_display_ops && hdmi_display_ops->power_on) - return hdmi_display_ops->power_on(ctx->hdmi_ctx->ctx, mode); + if (hdmi_ops && hdmi_ops->power_on) + return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode); return 0; } @@ -130,13 +117,13 @@ static int drm_hdmi_enable_vblank(struct device *subdrv_dev) { struct drm_hdmi_context *ctx = to_context(subdrv_dev); struct exynos_drm_subdrv *subdrv = &ctx->subdrv; - struct exynos_drm_manager *manager = &subdrv->manager; + struct exynos_drm_manager *manager = subdrv->manager; DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_overlay_ops && hdmi_overlay_ops->enable_vblank) - return hdmi_overlay_ops->enable_vblank(ctx->mixer_ctx->ctx, - manager->pipe); + if (mixer_ops && mixer_ops->enable_vblank) + return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx, + manager->pipe); return 0; } @@ -147,8 +134,8 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev) DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_overlay_ops && hdmi_overlay_ops->disable_vblank) - return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx); + if (mixer_ops && mixer_ops->disable_vblank) + return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx); } static void drm_hdmi_mode_fixup(struct device *subdrv_dev, @@ -160,9 +147,9 @@ static void drm_hdmi_mode_fixup(struct device *subdrv_dev, DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_manager_ops && hdmi_manager_ops->mode_fixup) - hdmi_manager_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, - mode, adjusted_mode); + if (hdmi_ops && hdmi_ops->mode_fixup) + hdmi_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, mode, + adjusted_mode); } static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode) @@ -171,8 +158,8 @@ static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode) DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_manager_ops && hdmi_manager_ops->mode_set) - hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode); + if (hdmi_ops && hdmi_ops->mode_set) + hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode); } static void drm_hdmi_get_max_resol(struct device *subdrv_dev, @@ -182,9 +169,8 @@ static void drm_hdmi_get_max_resol(struct device *subdrv_dev, DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_manager_ops && hdmi_manager_ops->get_max_resol) - hdmi_manager_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, - height); + if (hdmi_ops && hdmi_ops->get_max_resol) + hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height); } static void drm_hdmi_commit(struct device *subdrv_dev) @@ -193,8 +179,8 @@ static void drm_hdmi_commit(struct device *subdrv_dev) DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_manager_ops && hdmi_manager_ops->commit) - hdmi_manager_ops->commit(ctx->hdmi_ctx->ctx); + if (hdmi_ops && hdmi_ops->commit) + hdmi_ops->commit(ctx->hdmi_ctx->ctx); } static void drm_hdmi_dpms(struct device *subdrv_dev, int mode) @@ -209,8 +195,8 @@ static void drm_hdmi_dpms(struct device *subdrv_dev, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - if (hdmi_manager_ops && hdmi_manager_ops->disable) - hdmi_manager_ops->disable(ctx->hdmi_ctx->ctx); + if (hdmi_ops && hdmi_ops->disable) + hdmi_ops->disable(ctx->hdmi_ctx->ctx); break; default: DRM_DEBUG_KMS("unkown dps mode: %d\n", mode); @@ -235,8 +221,8 @@ static void drm_mixer_mode_set(struct device *subdrv_dev, DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_overlay_ops && hdmi_overlay_ops->win_mode_set) - hdmi_overlay_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay); + if (mixer_ops && mixer_ops->win_mode_set) + mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay); } static void drm_mixer_commit(struct device *subdrv_dev, int zpos) @@ -245,8 +231,8 @@ static void drm_mixer_commit(struct device *subdrv_dev, int zpos) DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_overlay_ops && hdmi_overlay_ops->win_commit) - hdmi_overlay_ops->win_commit(ctx->mixer_ctx->ctx, zpos); + if (mixer_ops && mixer_ops->win_commit) + mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos); } static void drm_mixer_disable(struct device *subdrv_dev, int zpos) @@ -255,8 +241,8 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos) DRM_DEBUG_KMS("%s\n", __FILE__); - if (hdmi_overlay_ops && hdmi_overlay_ops->win_disable) - hdmi_overlay_ops->win_disable(ctx->mixer_ctx->ctx, zpos); + if (mixer_ops && mixer_ops->win_disable) + mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos); } static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = { @@ -265,6 +251,12 @@ static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = { .disable = drm_mixer_disable, }; +static struct exynos_drm_manager hdmi_manager = { + .pipe = -1, + .ops = &drm_hdmi_manager_ops, + .overlay_ops = &drm_hdmi_overlay_ops, + .display_ops = &drm_hdmi_display_ops, +}; static int hdmi_subdrv_probe(struct drm_device *drm_dev, struct device *dev) @@ -332,12 +324,9 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) subdrv = &ctx->subdrv; + subdrv->dev = dev; + subdrv->manager = &hdmi_manager; subdrv->probe = hdmi_subdrv_probe; - subdrv->manager.pipe = -1; - subdrv->manager.ops = &drm_hdmi_manager_ops; - subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops; - subdrv->manager.display_ops = &drm_hdmi_display_ops; - subdrv->manager.dev = dev; platform_set_drvdata(pdev, subdrv); diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h index 44497cfb6c7..f3ae192c8dc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h @@ -38,15 +38,15 @@ struct exynos_drm_hdmi_context { void *ctx; }; -struct exynos_hdmi_display_ops { +struct exynos_hdmi_ops { + /* display */ bool (*is_connected)(void *ctx); int (*get_edid)(void *ctx, struct drm_connector *connector, u8 *edid, int len); int (*check_timing)(void *ctx, void *timing); int (*power_on)(void *ctx, int mode); -}; -struct exynos_hdmi_manager_ops { + /* manager */ void (*mode_fixup)(void *ctx, struct drm_connector *connector, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); @@ -57,22 +57,17 @@ struct exynos_hdmi_manager_ops { void (*disable)(void *ctx); }; -struct exynos_hdmi_overlay_ops { +struct exynos_mixer_ops { + /* manager */ int (*enable_vblank)(void *ctx, int pipe); void (*disable_vblank)(void *ctx); + + /* overlay */ void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay); void (*win_commit)(void *ctx, int zpos); void (*win_disable)(void *ctx, int zpos); }; -extern struct platform_driver hdmi_driver; -extern struct platform_driver mixer_driver; - -void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops - *display_ops); -void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops - *manager_ops); -void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops - *overlay_ops); - +void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops); +void exynos_mixer_ops_register(struct exynos_mixer_ops *ops); #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index c277a3a445f..f92fe4c6174 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -24,6 +24,10 @@ struct exynos_plane { static const uint32_t formats[] = { DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_NV12, + DRM_FORMAT_NV12M, + DRM_FORMAT_NV12MT, }; static int diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 8e1339f9fe1..7b9c153dceb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -199,7 +199,7 @@ static void vidi_dpms(struct device *subdrv_dev, int mode) static void vidi_apply(struct device *subdrv_dev) { struct vidi_context *ctx = get_vidi_context(subdrv_dev); - struct exynos_drm_manager *mgr = &ctx->subdrv.manager; + struct exynos_drm_manager *mgr = ctx->subdrv.manager; struct exynos_drm_manager_ops *mgr_ops = mgr->ops; struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops; struct vidi_win_data *win_data; @@ -374,6 +374,13 @@ static struct exynos_drm_overlay_ops vidi_overlay_ops = { .disable = vidi_win_disable, }; +static struct exynos_drm_manager vidi_manager = { + .pipe = -1, + .ops = &vidi_manager_ops, + .overlay_ops = &vidi_overlay_ops, + .display_ops = &vidi_display_ops, +}; + static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) { struct exynos_drm_private *dev_priv = drm_dev->dev_private; @@ -425,7 +432,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work) struct vidi_context *ctx = container_of(work, struct vidi_context, work); struct exynos_drm_subdrv *subdrv = &ctx->subdrv; - struct exynos_drm_manager *manager = &subdrv->manager; + struct exynos_drm_manager *manager = subdrv->manager; if (manager->pipe < 0) return; @@ -471,7 +478,7 @@ static void vidi_subdrv_remove(struct drm_device *drm_dev) static int vidi_power_on(struct vidi_context *ctx, bool enable) { struct exynos_drm_subdrv *subdrv = &ctx->subdrv; - struct device *dev = subdrv->manager.dev; + struct device *dev = subdrv->dev; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -611,13 +618,10 @@ static int __devinit vidi_probe(struct platform_device *pdev) ctx->raw_edid = (struct edid *)fake_edid_info; subdrv = &ctx->subdrv; + subdrv->dev = dev; + subdrv->manager = &vidi_manager; subdrv->probe = vidi_subdrv_probe; subdrv->remove = vidi_subdrv_remove; - subdrv->manager.pipe = -1; - subdrv->manager.ops = &vidi_manager_ops; - subdrv->manager.overlay_ops = &vidi_overlay_ops; - subdrv->manager.display_ops = &vidi_display_ops; - subdrv->manager.dev = dev; mutex_init(&ctx->lock); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 575a8cbd353..b0035387645 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -40,7 +40,6 @@ #include "exynos_hdmi.h" -#define HDMI_OVERLAY_NUMBER 3 #define MAX_WIDTH 1920 #define MAX_HEIGHT 1080 #define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) @@ -1194,7 +1193,7 @@ static int hdmi_conf_index(struct hdmi_context *hdata, static bool hdmi_is_connected(void *ctx) { - struct hdmi_context *hdata = (struct hdmi_context *)ctx; + struct hdmi_context *hdata = ctx; u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS); if (val) @@ -1207,7 +1206,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector, u8 *edid, int len) { struct edid *raw_edid; - struct hdmi_context *hdata = (struct hdmi_context *)ctx; + struct hdmi_context *hdata = ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); @@ -1275,7 +1274,7 @@ static int hdmi_v14_check_timing(struct fb_videomode *check_timing) static int hdmi_check_timing(void *ctx, void *timing) { - struct hdmi_context *hdata = (struct hdmi_context *)ctx; + struct hdmi_context *hdata = ctx; struct fb_videomode *check_timing = timing; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); @@ -1312,13 +1311,6 @@ static int hdmi_display_power_on(void *ctx, int mode) return 0; } -static struct exynos_hdmi_display_ops display_ops = { - .is_connected = hdmi_is_connected, - .get_edid = hdmi_get_edid, - .check_timing = hdmi_check_timing, - .power_on = hdmi_display_power_on, -}; - static void hdmi_set_acr(u32 freq, u8 *acr) { u32 n, cts; @@ -1914,7 +1906,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, struct drm_display_mode *adjusted_mode) { struct drm_display_mode *m; - struct hdmi_context *hdata = (struct hdmi_context *)ctx; + struct hdmi_context *hdata = ctx; int index; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); @@ -1951,7 +1943,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector, static void hdmi_mode_set(void *ctx, void *mode) { - struct hdmi_context *hdata = (struct hdmi_context *)ctx; + struct hdmi_context *hdata = ctx; int conf_idx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); @@ -1974,7 +1966,7 @@ static void hdmi_get_max_resol(void *ctx, unsigned int *width, static void hdmi_commit(void *ctx) { - struct hdmi_context *hdata = (struct hdmi_context *)ctx; + struct hdmi_context *hdata = ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); @@ -1985,7 +1977,7 @@ static void hdmi_commit(void *ctx) static void hdmi_disable(void *ctx) { - struct hdmi_context *hdata = (struct hdmi_context *)ctx; + struct hdmi_context *hdata = ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); @@ -1996,7 +1988,14 @@ static void hdmi_disable(void *ctx) } } -static struct exynos_hdmi_manager_ops manager_ops = { +static struct exynos_hdmi_ops hdmi_ops = { + /* display */ + .is_connected = hdmi_is_connected, + .get_edid = hdmi_get_edid, + .check_timing = hdmi_check_timing, + .power_on = hdmi_display_power_on, + + /* manager */ .mode_fixup = hdmi_mode_fixup, .mode_set = hdmi_mode_set, .get_max_resol = hdmi_get_max_resol, @@ -2020,7 +2019,7 @@ static void hdmi_hotplug_func(struct work_struct *work) static irqreturn_t hdmi_irq_handler(int irq, void *arg) { struct exynos_drm_hdmi_context *ctx = arg; - struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx; + struct hdmi_context *hdata = ctx->ctx; u32 intc_flag; intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG); @@ -2173,7 +2172,7 @@ static int hdmi_runtime_suspend(struct device *dev) DRM_DEBUG_KMS("%s\n", __func__); - hdmi_resource_poweroff((struct hdmi_context *)ctx->ctx); + hdmi_resource_poweroff(ctx->ctx); return 0; } @@ -2184,7 +2183,7 @@ static int hdmi_runtime_resume(struct device *dev) DRM_DEBUG_KMS("%s\n", __func__); - hdmi_resource_poweron((struct hdmi_context *)ctx->ctx); + hdmi_resource_poweron(ctx->ctx); return 0; } @@ -2322,8 +2321,7 @@ static int __devinit hdmi_probe(struct platform_device *pdev) hdata->irq = res->start; /* register specific callbacks to common hdmi. */ - exynos_drm_display_ops_register(&display_ops); - exynos_drm_manager_ops_register(&manager_ops); + exynos_hdmi_ops_register(&hdmi_ops); hdmi_resource_poweron(hdata); @@ -2351,7 +2349,7 @@ err_data: static int __devexit hdmi_remove(struct platform_device *pdev) { struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev); - struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx; + struct hdmi_context *hdata = ctx->ctx; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 4d5f41e1952..e15438c0112 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -37,7 +37,8 @@ #include "exynos_drm_drv.h" #include "exynos_drm_hdmi.h" -#define HDMI_OVERLAY_NUMBER 3 +#define MIXER_WIN_NR 3 +#define MIXER_DEFAULT_WIN 0 #define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) @@ -75,16 +76,12 @@ struct mixer_resources { }; struct mixer_context { - struct fb_videomode *default_timing; - unsigned int default_win; - unsigned int default_bpp; unsigned int irq; int pipe; bool interlace; - bool vp_enabled; struct mixer_resources mixer_res; - struct hdmi_win_data win_data[HDMI_OVERLAY_NUMBER]; + struct hdmi_win_data win_data[MIXER_WIN_NR]; }; static const u8 filter_y_horiz_tap8[] = { @@ -643,9 +640,9 @@ static void mixer_win_mode_set(void *ctx, win = overlay->zpos; if (win == DEFAULT_ZPOS) - win = mixer_ctx->default_win; + win = MIXER_DEFAULT_WIN; - if (win < 0 || win > HDMI_OVERLAY_NUMBER) { + if (win < 0 || win > MIXER_WIN_NR) { DRM_ERROR("overlay plane[%d] is wrong\n", win); return; } @@ -683,9 +680,9 @@ static void mixer_win_commit(void *ctx, int zpos) DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); if (win == DEFAULT_ZPOS) - win = mixer_ctx->default_win; + win = MIXER_DEFAULT_WIN; - if (win < 0 || win > HDMI_OVERLAY_NUMBER) { + if (win < 0 || win > MIXER_WIN_NR) { DRM_ERROR("overlay plane[%d] is wrong\n", win); return; } @@ -706,9 +703,9 @@ static void mixer_win_disable(void *ctx, int zpos) DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); if (win == DEFAULT_ZPOS) - win = mixer_ctx->default_win; + win = MIXER_DEFAULT_WIN; - if (win < 0 || win > HDMI_OVERLAY_NUMBER) { + if (win < 0 || win > MIXER_WIN_NR) { DRM_ERROR("overlay plane[%d] is wrong\n", win); return; } @@ -722,9 +719,12 @@ static void mixer_win_disable(void *ctx, int zpos) spin_unlock_irqrestore(&res->reg_slock, flags); } -static struct exynos_hdmi_overlay_ops overlay_ops = { +static struct exynos_mixer_ops mixer_ops = { + /* manager */ .enable_vblank = mixer_enable_vblank, .disable_vblank = mixer_disable_vblank, + + /* overlay */ .win_mode_set = mixer_win_mode_set, .win_commit = mixer_win_commit, .win_disable = mixer_win_disable, @@ -771,8 +771,7 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc) static irqreturn_t mixer_irq_handler(int irq, void *arg) { struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg; - struct mixer_context *ctx = - (struct mixer_context *)drm_hdmi_ctx->ctx; + struct mixer_context *ctx = drm_hdmi_ctx->ctx; struct mixer_resources *res = &ctx->mixer_res; u32 val, val_base; @@ -902,7 +901,7 @@ static int mixer_runtime_resume(struct device *dev) DRM_DEBUG_KMS("resume - start\n"); - mixer_resource_poweron((struct mixer_context *)ctx->ctx); + mixer_resource_poweron(ctx->ctx); return 0; } @@ -913,7 +912,7 @@ static int mixer_runtime_suspend(struct device *dev) DRM_DEBUG_KMS("suspend - start\n"); - mixer_resource_poweroff((struct mixer_context *)ctx->ctx); + mixer_resource_poweroff(ctx->ctx); return 0; } @@ -926,8 +925,7 @@ static const struct dev_pm_ops mixer_pm_ops = { static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, struct platform_device *pdev) { - struct mixer_context *mixer_ctx = - (struct mixer_context *)ctx->ctx; + struct mixer_context *mixer_ctx = ctx->ctx; struct device *dev = &pdev->dev; struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; struct resource *res; @@ -1076,7 +1074,7 @@ static int __devinit mixer_probe(struct platform_device *pdev) goto fail; /* register specific callback point to common hdmi. */ - exynos_drm_overlay_ops_register(&overlay_ops); + exynos_mixer_ops_register(&mixer_ops); mixer_resource_poweron(ctx); @@ -1093,7 +1091,7 @@ static int mixer_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct exynos_drm_hdmi_context *drm_hdmi_ctx = platform_get_drvdata(pdev); - struct mixer_context *ctx = (struct mixer_context *)drm_hdmi_ctx->ctx; + struct mixer_context *ctx = drm_hdmi_ctx->ctx; dev_info(dev, "remove successful\n"); diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h index 21071cef92a..36eb0744841 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h @@ -29,7 +29,6 @@ #define __MDFLD_DSI_OUTPUT_H__ #include <linux/backlight.h> -#include <linux/version.h> #include <drm/drmP.h> #include <drm/drm.h> #include <drm/drm_crtc.h> diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 2c8a60c3b98..f920fb5e42b 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -129,6 +129,7 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv) if (buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL; + /* This is all entirely broken */ down_write(¤t->mm->mmap_sem); old_fops = file_priv->filp->f_op; file_priv->filp->f_op = &i810_buffer_fops; @@ -157,11 +158,8 @@ static int i810_unmap_buffer(struct drm_buf *buf) if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EINVAL; - down_write(¤t->mm->mmap_sem); - retcode = do_munmap(current->mm, - (unsigned long)buf_priv->virtual, + retcode = vm_munmap((unsigned long)buf_priv->virtual, (size_t) buf->total); - up_write(¤t->mm->mmap_sem); buf_priv->currently_mapped = I810_BUF_UNMAPPED; buf_priv->virtual = NULL; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index fdb7ccefffb..b505b70dba0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1502,14 +1502,6 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) return 0; } -static int -i915_debugfs_common_open(struct inode *inode, - struct file *filp) -{ - filp->private_data = inode->i_private; - return 0; -} - static ssize_t i915_wedged_read(struct file *filp, char __user *ubuf, @@ -1560,7 +1552,7 @@ i915_wedged_write(struct file *filp, static const struct file_operations i915_wedged_fops = { .owner = THIS_MODULE, - .open = i915_debugfs_common_open, + .open = simple_open, .read = i915_wedged_read, .write = i915_wedged_write, .llseek = default_llseek, @@ -1622,7 +1614,7 @@ i915_max_freq_write(struct file *filp, static const struct file_operations i915_max_freq_fops = { .owner = THIS_MODULE, - .open = i915_debugfs_common_open, + .open = simple_open, .read = i915_max_freq_read, .write = i915_max_freq_write, .llseek = default_llseek, @@ -1693,7 +1685,7 @@ i915_cache_sharing_write(struct file *filp, static const struct file_operations i915_cache_sharing_fops = { .owner = THIS_MODULE, - .open = i915_debugfs_common_open, + .open = simple_open, .read = i915_cache_sharing_read, .write = i915_cache_sharing_write, .llseek = default_llseek, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9341eb8ce93..785f67f963e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1183,6 +1183,21 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) return can_switch; } +static bool +intel_enable_ppgtt(struct drm_device *dev) +{ + if (i915_enable_ppgtt >= 0) + return i915_enable_ppgtt; + +#ifdef CONFIG_INTEL_IOMMU + /* Disable ppgtt on SNB if VT-d is on. */ + if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) + return false; +#endif + + return true; +} + static int i915_load_gem_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1197,7 +1212,7 @@ static int i915_load_gem_init(struct drm_device *dev) drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); mutex_lock(&dev->struct_mutex); - if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) { + if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { /* PPGTT pdes are stolen from global gtt ptes, so shrink the * aperture accordingly when using aliasing ppgtt. */ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; @@ -1207,8 +1222,10 @@ static int i915_load_gem_init(struct drm_device *dev) i915_gem_do_init(dev, 0, mappable_size, gtt_size); ret = i915_gem_init_aliasing_ppgtt(dev); - if (ret) + if (ret) { + mutex_unlock(&dev->struct_mutex); return ret; + } } else { /* Let GEM Manage all of the aperture. * diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1a7559b5999..ae8a64f9f84 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -64,9 +64,13 @@ MODULE_PARM_DESC(semaphores, "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); int i915_enable_rc6 __read_mostly = -1; -module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); +module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400); MODULE_PARM_DESC(i915_enable_rc6, - "Enable power-saving render C-state 6 (default: -1 (use per-chip default)"); + "Enable power-saving render C-state 6. " + "Different stages can be selected via bitmask values " + "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " + "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " + "default: -1 (use per-chip default)"); int i915_enable_fbc __read_mostly = -1; module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); @@ -103,8 +107,8 @@ MODULE_PARM_DESC(enable_hangcheck, "WARNING: Disabling this can cause system wide hangs. " "(default: true)"); -bool i915_enable_ppgtt __read_mostly = 1; -module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600); +int i915_enable_ppgtt __read_mostly = -1; +module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); MODULE_PARM_DESC(i915_enable_ppgtt, "Enable PPGTT (default: true)"); @@ -292,6 +296,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ + INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ {0, 0, 0} }; @@ -533,7 +538,9 @@ static int i915_drm_thaw(struct drm_device *dev) drm_irq_install(dev); /* Resume the modeset for every activated CRTC */ + mutex_lock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); + mutex_unlock(&dev->mode_config.mutex); if (IS_IRONLAKE_M(dev)) ironlake_enable_rc6(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c0f19f57200..5fabc6c31fe 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1053,6 +1053,27 @@ struct drm_i915_file_private { #include "i915_trace.h" +/** + * RC6 is a special power stage which allows the GPU to enter an very + * low-voltage mode when idle, using down to 0V while at this stage. This + * stage is entered automatically when the GPU is idle when RC6 support is + * enabled, and as soon as new workload arises GPU wakes up automatically as well. + * + * There are different RC6 modes available in Intel GPU, which differentiate + * among each other with the latency required to enter and leave RC6 and + * voltage consumed by the GPU in different states. + * + * The combination of the following flags define which states GPU is allowed + * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and + * RC6pp is deepest RC6. Their support by hardware varies according to the + * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one + * which brings the most power savings; deeper states save more power, but + * require higher latency to switch to and wake up. + */ +#define INTEL_RC6_ENABLE (1<<0) +#define INTEL_RC6p_ENABLE (1<<1) +#define INTEL_RC6pp_ENABLE (1<<2) + extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc __always_unused; @@ -1065,7 +1086,7 @@ extern int i915_vbt_sdvo_panel_type __read_mostly; extern int i915_enable_rc6 __read_mostly; extern int i915_enable_fbc __read_mostly; extern bool i915_enable_hangcheck __read_mostly; -extern bool i915_enable_ppgtt __read_mostly; +extern int i915_enable_ppgtt __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1f441f5c240..0d1e4b7b4b9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1087,11 +1087,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (obj == NULL) return -ENOENT; - down_write(¤t->mm->mmap_sem); - addr = do_mmap(obj->filp, 0, args->size, + addr = vm_mmap(obj->filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); - up_write(¤t->mm->mmap_sem); drm_gem_object_unreference_unlocked(obj); if (IS_ERR((void *)addr)) return addr; @@ -1472,16 +1470,19 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, list_move_tail(&obj->ring_list, &ring->active_list); obj->last_rendering_seqno = seqno; - if (obj->fenced_gpu_access) { - struct drm_i915_fence_reg *reg; - - BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE); + if (obj->fenced_gpu_access) { obj->last_fenced_seqno = seqno; obj->last_fenced_ring = ring; - reg = &dev_priv->fence_regs[obj->fence_reg]; - list_move_tail(®->lru_list, &dev_priv->mm.fence_list); + /* Bump MRU to take account of the delayed flush */ + if (obj->fence_reg != I915_FENCE_REG_NONE) { + struct drm_i915_fence_reg *reg; + + reg = &dev_priv->fence_regs[obj->fence_reg]; + list_move_tail(®->lru_list, + &dev_priv->mm.fence_list); + } } } @@ -1490,6 +1491,7 @@ i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) { list_del_init(&obj->ring_list); obj->last_rendering_seqno = 0; + obj->last_fenced_seqno = 0; } static void @@ -1518,6 +1520,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) BUG_ON(!list_empty(&obj->gpu_write_list)); BUG_ON(!obj->active); obj->ring = NULL; + obj->last_fenced_ring = NULL; i915_gem_object_move_off_active(obj); obj->fenced_gpu_access = false; @@ -3754,12 +3757,32 @@ void i915_gem_init_ppgtt(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t pd_offset; struct intel_ring_buffer *ring; + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + uint32_t __iomem *pd_addr; + uint32_t pd_entry; int i; if (!dev_priv->mm.aliasing_ppgtt) return; - pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset; + + pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); + for (i = 0; i < ppgtt->num_pd_entries; i++) { + dma_addr_t pt_addr; + + if (dev_priv->mm.gtt->needs_dmar) + pt_addr = ppgtt->pt_dma_addr[i]; + else + pt_addr = page_to_phys(ppgtt->pt_pages[i]); + + pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); + pd_entry |= GEN6_PDE_VALID; + + writel(pd_entry, pd_addr + i); + } + readl(pd_addr); + + pd_offset = ppgtt->pd_offset; pd_offset /= 64; /* in cachelines, */ pd_offset <<= 16; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 81687af0089..de431942ded 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -498,8 +498,8 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, if (ret) goto err_unpin; } + obj->pending_fenced_gpu_access = true; } - obj->pending_fenced_gpu_access = need_fence; } entry->offset = obj->gtt_offset; @@ -1133,6 +1133,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, return -EINVAL; } + if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { + DRM_DEBUG("execbuf with %u cliprects\n", + args->num_cliprects); + return -EINVAL; + } cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), GFP_KERNEL); if (cliprects == NULL) { @@ -1404,7 +1409,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object2 *exec2_list = NULL; int ret; - if (args->buffer_count < 1) { + if (args->buffer_count < 1 || + args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2eacd78bb93..a135c61f411 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -65,9 +65,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct i915_hw_ppgtt *ppgtt; - uint32_t pd_entry; unsigned first_pd_entry_in_global_pt; - uint32_t __iomem *pd_addr; int i; int ret = -ENOMEM; @@ -100,7 +98,6 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) goto err_pt_alloc; } - pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt; for (i = 0; i < ppgtt->num_pd_entries; i++) { dma_addr_t pt_addr; if (dev_priv->mm.gtt->needs_dmar) { @@ -117,13 +114,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) ppgtt->pt_dma_addr[i] = pt_addr; } else pt_addr = page_to_phys(ppgtt->pt_pages[i]); - - pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); - pd_entry |= GEN6_PDE_VALID; - - writel(pd_entry, pd_addr + i); } - readl(pd_addr); ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3886cf051ba..9d24d65f0c3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -568,6 +568,7 @@ #define CM0_MASK_SHIFT 16 #define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_ZR_OPT_DISABLE (1<<5) +#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) #define CM0_DEPTH_EVICT_DISABLE (1<<4) #define CM0_COLOR_EVICT_DISABLE (1<<3) #define CM0_DEPTH_WRITE_DISABLE (1<<1) @@ -2385,6 +2386,7 @@ #define PIPECONF_DISABLE 0 #define PIPECONF_DOUBLE_WIDE (1<<30) #define I965_PIPECONF_ACTIVE (1<<30) +#define PIPECONF_FRAME_START_DELAY_MASK (3<<27) #define PIPECONF_SINGLE_WIDE 0 #define PIPECONF_PIPE_UNLOCKED 0 #define PIPECONF_PIPE_LOCKED (1<<25) @@ -3727,6 +3729,9 @@ #define GT_FIFO_FREE_ENTRIES 0x120008 #define GT_FIFO_NUM_RESERVED_ENTRIES 20 +#define GEN6_UCGCTL1 0x9400 +# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) + #define GEN6_UCGCTL2 0x9404 # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 8168d8f8a63..b48fc2a8410 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -24,6 +24,7 @@ * Eric Anholt <eric@anholt.net> * */ +#include <linux/dmi.h> #include <drm/drm_dp_helper.h> #include "drmP.h" #include "drm.h" @@ -621,6 +622,26 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) dev_priv->edp.bpp = 18; } +static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) +{ + DRM_DEBUG_KMS("Falling back to manually reading VBT from " + "VBIOS ROM for %s\n", + id->ident); + return 1; +} + +static const struct dmi_system_id intel_no_opregion_vbt[] = { + { + .callback = intel_no_opregion_vbt_callback, + .ident = "ThinkCentre A57", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), + }, + }, + { } +}; + /** * intel_parse_bios - find VBT and initialize settings from the BIOS * @dev: DRM device @@ -641,7 +662,7 @@ intel_parse_bios(struct drm_device *dev) init_vbt_defaults(dev_priv); /* XXX Should this validation be moved to intel_opregion.c? */ - if (dev_priv->opregion.vbt) { + if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) { struct vbt_header *vbt = dev_priv->opregion.vbt; if (memcmp(vbt->signature, "$VBT", 4) == 0) { DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 4d3d736a4f5..90b9793fd5d 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -430,8 +430,8 @@ intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(connector); - struct drm_crtc *crtc; enum drm_connector_status status; + struct intel_load_detect_pipe tmp; if (I915_HAS_HOTPLUG(dev)) { if (intel_crt_detect_hotplug(connector)) { @@ -450,23 +450,16 @@ intel_crt_detect(struct drm_connector *connector, bool force) return connector->status; /* for pre-945g platforms use load detect */ - crtc = crt->base.base.crtc; - if (crtc && crtc->enabled) { - status = intel_crt_load_detect(crt); - } else { - struct intel_load_detect_pipe tmp; - - if (intel_get_load_detect_pipe(&crt->base, connector, NULL, - &tmp)) { - if (intel_crt_detect_ddc(connector)) - status = connector_status_connected; - else - status = intel_crt_load_detect(crt); - intel_release_load_detect_pipe(&crt->base, connector, - &tmp); - } else - status = connector_status_unknown; - } + if (intel_get_load_detect_pipe(&crt->base, connector, NULL, + &tmp)) { + if (intel_crt_detect_ddc(connector)) + status = connector_status_connected; + else + status = intel_crt_load_detect(crt); + intel_release_load_detect_pipe(&crt->base, connector, + &tmp); + } else + status = connector_status_unknown; return status; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d514719f65e..5908cd56340 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2245,6 +2245,33 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, } static int +intel_finish_fb(struct drm_framebuffer *old_fb) +{ + struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + bool was_interruptible = dev_priv->mm.interruptible; + int ret; + + wait_event(dev_priv->pending_flip_queue, + atomic_read(&dev_priv->mm.wedged) || + atomic_read(&obj->pending_flip) == 0); + + /* Big Hammer, we also need to ensure that any pending + * MI_WAIT_FOR_EVENT inside a user batch buffer on the + * current scanout is retired before unpinning the old + * framebuffer. + * + * This should only fail upon a hung GPU, in which case we + * can safely continue. + */ + dev_priv->mm.interruptible = false; + ret = i915_gem_object_finish_gpu(obj); + dev_priv->mm.interruptible = was_interruptible; + + return ret; +} + +static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { @@ -2282,25 +2309,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return ret; } - if (old_fb) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; - - wait_event(dev_priv->pending_flip_queue, - atomic_read(&dev_priv->mm.wedged) || - atomic_read(&obj->pending_flip) == 0); - - /* Big Hammer, we also need to ensure that any pending - * MI_WAIT_FOR_EVENT inside a user batch buffer on the - * current scanout is retired before unpinning the old - * framebuffer. - * - * This should only fail upon a hung GPU, in which case we - * can safely continue. - */ - ret = i915_gem_object_finish_gpu(obj); - (void) ret; - } + if (old_fb) + intel_finish_fb(old_fb); ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, LEAVE_ATOMIC_MODE_SET); @@ -3371,6 +3381,23 @@ static void intel_crtc_disable(struct drm_crtc *crtc) struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_device *dev = crtc->dev; + /* Flush any pending WAITs before we disable the pipe. Note that + * we need to drop the struct_mutex in order to acquire it again + * during the lowlevel dpms routines around a couple of the + * operations. It does not look trivial nor desirable to move + * that locking higher. So instead we leave a window for the + * submission of further commands on the fb before we can actually + * disable it. This race with userspace exists anyway, and we can + * only rely on the pipe being disabled by userspace after it + * receives the hotplug notification and has flushed any pending + * batches. + */ + if (crtc->fb) { + mutex_lock(&dev->struct_mutex); + intel_finish_fb(crtc->fb); + mutex_unlock(&dev->struct_mutex); + } + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); @@ -3451,8 +3478,11 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, return false; } - /* All interlaced capable intel hw wants timings in frames. */ - drm_mode_set_crtcinfo(adjusted_mode, 0); + /* All interlaced capable intel hw wants timings in frames. Note though + * that intel_lvds_mode_fixup does some funny tricks with the crtc + * timings, so we need to be careful not to clobber these.*/ + if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) + drm_mode_set_crtcinfo(adjusted_mode, 0); return true; } @@ -5539,7 +5569,8 @@ void ironlake_init_pch_refclk(struct drm_device *dev) if (intel_panel_use_ssc(dev_priv) && can_ssc) { DRM_DEBUG_KMS("Using SSC on panel\n"); temp |= DREF_SSC1_ENABLE; - } + } else + temp &= ~DREF_SSC1_ENABLE; /* Get SSC going before enabling the outputs */ I915_WRITE(PCH_DREF_CONTROL, temp); @@ -7437,7 +7468,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev, OUT_RING(fb->pitches[0] | obj->tiling_mode); OUT_RING(obj->gtt_offset); - pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; + /* Contrary to the suggestions in the documentation, + * "Enable Panel Fitter" does not seem to be required when page + * flipping with a non-native mode, and worse causes a normal + * modeset to fail. + * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; + */ + pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; OUT_RING(pf | pipesrc); ADVANCE_LP_RING(); @@ -7580,6 +7617,12 @@ static void intel_sanitize_modesetting(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; u32 reg, val; + /* Clear any frame start delays used for debugging left by the BIOS */ + for_each_pipe(pipe) { + reg = PIPECONF(pipe); + I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); + } + if (HAS_PCH_SPLIT(dev)) return; @@ -8215,7 +8258,7 @@ void intel_init_emon(struct drm_device *dev) dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } -static bool intel_enable_rc6(struct drm_device *dev) +static int intel_enable_rc6(struct drm_device *dev) { /* * Respect the kernel parameter if it is set @@ -8233,11 +8276,11 @@ static bool intel_enable_rc6(struct drm_device *dev) * Disable rc6 on Sandybridge */ if (INTEL_INFO(dev)->gen == 6) { - DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n"); - return 0; + DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); + return INTEL_RC6_ENABLE; } - DRM_DEBUG_DRIVER("RC6 enabled\n"); - return 1; + DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); + return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); } void gen6_enable_rps(struct drm_i915_private *dev_priv) @@ -8247,6 +8290,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) u32 pcu_mbox, rc6_mask = 0; u32 gtfifodbg; int cur_freq, min_freq, max_freq; + int rc6_mode; int i; /* Here begins a magic sequence of register writes to enable @@ -8284,9 +8328,20 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ - if (intel_enable_rc6(dev_priv->dev)) - rc6_mask = GEN6_RC_CTL_RC6_ENABLE | - ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0); + rc6_mode = intel_enable_rc6(dev_priv->dev); + if (rc6_mode & INTEL_RC6_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; + + if (rc6_mode & INTEL_RC6p_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; + + if (rc6_mode & INTEL_RC6pp_ENABLE) + rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; + + DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", + (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", + (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", + (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); I915_WRITE(GEN6_RC_CONTROL, rc6_mask | @@ -8510,6 +8565,10 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); + I915_WRITE(GEN6_UCGCTL1, + I915_READ(GEN6_UCGCTL1) | + GEN6_BLBUNIT_CLOCK_GATE_DISABLE); + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock * gating disable must be set. Failure to set it results in * flickering pixels due to Z write ordering failures after diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 110552ff302..4b637919f74 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -219,14 +219,38 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes) return (max_link_clock * max_lanes * 8) / 10; } +static bool +intel_dp_adjust_dithering(struct intel_dp *intel_dp, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); + int max_lanes = intel_dp_max_lane_count(intel_dp); + int max_rate, mode_rate; + + mode_rate = intel_dp_link_required(mode->clock, 24); + max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); + + if (mode_rate > max_rate) { + mode_rate = intel_dp_link_required(mode->clock, 18); + if (mode_rate > max_rate) + return false; + + if (adjusted_mode) + adjusted_mode->private_flags + |= INTEL_MODE_DP_FORCE_6BPC; + + return true; + } + + return true; +} + static int intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_dp *intel_dp = intel_attached_dp(connector); - int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); - int max_lanes = intel_dp_max_lane_count(intel_dp); - int max_rate, mode_rate; if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) @@ -236,16 +260,8 @@ intel_dp_mode_valid(struct drm_connector *connector, return MODE_PANEL; } - mode_rate = intel_dp_link_required(mode->clock, 24); - max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); - - if (mode_rate > max_rate) { - mode_rate = intel_dp_link_required(mode->clock, 18); - if (mode_rate > max_rate) - return MODE_CLOCK_HIGH; - else - mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC; - } + if (!intel_dp_adjust_dithering(intel_dp, mode, NULL)) + return MODE_CLOCK_HIGH; if (mode->clock < 10000) return MODE_CLOCK_LOW; @@ -672,7 +688,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, int lane_count, clock; int max_lane_count = intel_dp_max_lane_count(intel_dp); int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; - int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; + int bpp; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { @@ -686,6 +702,11 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, mode->clock = intel_dp->panel_fixed_mode->clock; } + if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode)) + return false; + + bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5a14149b379..715afa15302 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -105,6 +105,10 @@ #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) #define INTEL_MODE_DP_FORCE_6BPC (0x10) +/* This flag must be set by the encoder's mode_fixup if it changes the crtc + * timings in the mode to prevent the crtc fixup from overwriting them. + * Currently only lvds needs that. */ +#define INTEL_MODE_CRTC_TIMINGS_SET (0x20) static inline void intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 19ecd78b8a2..6e9ee33fd41 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -279,6 +279,8 @@ void intel_fb_restore_mode(struct drm_device *dev) struct drm_mode_config *config = &dev->mode_config; struct drm_plane *plane; + mutex_lock(&dev->mode_config.mutex); + ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); if (ret) DRM_DEBUG("failed to restore crtc mode\n"); @@ -286,4 +288,6 @@ void intel_fb_restore_mode(struct drm_device *dev) /* Be sure to shut off any planes that may be active */ list_for_each_entry(plane, &config->plane_list, head) plane->funcs->disable_plane(plane); + + mutex_unlock(&dev->mode_config.mutex); } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 601c86e664a..8fdc9570021 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -390,7 +390,7 @@ int intel_setup_gmbus(struct drm_device *dev) bus->has_gpio = intel_gpio_setup(bus, i); /* XXX force bit banging until GMBUS is fully debugged */ - if (bus->has_gpio && IS_GEN2(dev)) + if (bus->has_gpio) bus->force_bit = true; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index c5c0973af8a..30e2c82101d 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -187,6 +187,8 @@ centre_horizontally(struct drm_display_mode *mode, mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; + + mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET; } static void @@ -208,6 +210,8 @@ centre_vertically(struct drm_display_mode *mode, mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; + + mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET; } static inline u32 panel_fitter_scaling(u32 source, u32 target) @@ -283,6 +287,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, for_each_pipe(pipe) I915_WRITE(BCLRPAT(pipe), 0); + drm_mode_set_crtcinfo(adjusted_mode, 0); + switch (intel_lvds->fitting_mode) { case DRM_MODE_SCALE_CENTER: /* @@ -755,6 +761,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "hp st5747"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "MSI Wind Box DC500", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), + }, + }, { } /* terminating entry */ }; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 230a141dbea..48177ec4720 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -47,8 +47,6 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, adjusted_mode->vtotal = fixed_mode->vtotal; adjusted_mode->clock = fixed_mode->clock; - - drm_mode_set_crtcinfo(adjusted_mode, 0); } /* adjusted_mode has been preset to be the panel's fixed mode */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index fc66af6a944..80fce51e2f4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -401,6 +401,14 @@ static int init_render_ring(struct intel_ring_buffer *ring) if (INTEL_INFO(dev)->gen >= 6) { I915_WRITE(INSTPM, INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING); + + /* From the Sandybridge PRM, volume 1 part 3, page 24: + * "If this bit is set, STCunit will have LRA as replacement + * policy. [...] This bit must be reset. LRA replacement + * policy is not supported." + */ + I915_WRITE(CACHE_MODE_0, + CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); } return ret; @@ -626,7 +634,7 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring) /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like * ACTHD) before reading the status page. */ - if (IS_GEN7(dev)) + if (IS_GEN6(dev) || IS_GEN7(dev)) intel_ring_get_active_head(ring); return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } @@ -1038,7 +1046,7 @@ int intel_init_ring_buffer(struct drm_device *dev, * of the buffer. */ ring->effective_size = ring->size; - if (IS_I830(ring->dev)) + if (IS_I830(ring->dev) || IS_845G(ring->dev)) ring->effective_size -= 128; return 0; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e36b171c1e7..232d77d07d8 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -731,6 +731,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, uint16_t width, height; uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; uint16_t h_sync_offset, v_sync_offset; + int mode_clock; width = mode->crtc_hdisplay; height = mode->crtc_vdisplay; @@ -745,7 +746,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; - dtd->part1.clock = mode->clock / 10; + mode_clock = mode->clock; + mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1; + mode_clock /= 10; + dtd->part1.clock = mode_clock; + dtd->part1.h_active = width & 0xff; dtd->part1.h_blank = h_blank_len & 0xff; dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | @@ -996,7 +1001,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; - struct intel_sdvo_dtd input_dtd; + struct intel_sdvo_dtd input_dtd, output_dtd; int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); int rate; @@ -1021,20 +1026,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, intel_sdvo->attached_output)) return; - /* We have tried to get input timing in mode_fixup, and filled into - * adjusted_mode. - */ - if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { - input_dtd = intel_sdvo->input_dtd; - } else { - /* Set the output timing to the screen */ - if (!intel_sdvo_set_target_output(intel_sdvo, - intel_sdvo->attached_output)) - return; - - intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); - (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); - } + /* lvds has a special fixed output timing. */ + if (intel_sdvo->is_lvds) + intel_sdvo_get_dtd_from_mode(&output_dtd, + intel_sdvo->sdvo_lvds_fixed_mode); + else + intel_sdvo_get_dtd_from_mode(&output_dtd, mode); + (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) @@ -1052,6 +1050,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, !intel_sdvo_set_tv_format(intel_sdvo)) return; + /* We have tried to get input timing in mode_fixup, and filled into + * adjusted_mode. + */ + intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); switch (pixel_multiplier) { diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 7aa0450399a..e90dfb625c4 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -95,7 +95,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, /* must disable */ sprctl |= SPRITE_TRICKLE_FEED_DISABLE; sprctl |= SPRITE_ENABLE; - sprctl |= SPRITE_DEST_KEY; /* Sizes are 0 based */ src_w--; @@ -411,6 +410,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, old_obj = intel_plane->obj; + src_w = src_w >> 16; + src_h = src_h >> 16; + /* Pipe must be running... */ if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index ca1639918f5..97a81260485 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -13,6 +13,7 @@ config DRM_NOUVEAU select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT select ACPI_WMI if ACPI select MXM_WMI if ACPI + select POWER_SUPPLY help Choose this option for open-source nVidia support. diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 637afe71de5..80963d05b54 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -177,14 +177,15 @@ bios_shadow_pci(struct nvbios *bios) if (!pci_enable_rom(pdev)) { void __iomem *rom = pci_map_rom(pdev, &length); - if (rom) { + if (rom && length) { bios->data = kmalloc(length, GFP_KERNEL); if (bios->data) { memcpy_fromio(bios->data, rom, length); bios->length = length; } - pci_unmap_rom(pdev, rom); } + if (rom) + pci_unmap_rom(pdev, rom); pci_disable_rom(pdev); } diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 44e6416d4a3..846afb0bfef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -436,11 +436,11 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, } if (dev_priv->card_type < NV_C0) { - init->subchan[0].handle = NvSw; - init->subchan[0].grclass = NV_SW; - init->nr_subchan = 1; - } else { - init->nr_subchan = 0; + init->subchan[0].handle = 0x00000000; + init->subchan[0].grclass = 0x0000; + init->subchan[1].handle = NvSw; + init->subchan[1].grclass = NV_SW; + init->nr_subchan = 2; } /* Named memory object area */ diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index bcf0fd9e313..23d4edf992b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -48,8 +48,8 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, /* Hardcoded object assignments to subchannels (subchannel id). */ enum { - NvSubSw = 0, - NvSubM2MF = 1, + NvSubM2MF = 0, + NvSubSw = 1, NvSub2D = 2, NvSubCtxSurf2D = 2, NvSubGdiRect = 3, diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 34d591b7d4e..da3e7c3abab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -235,6 +235,7 @@ nouveau_pm_profile_set(struct drm_device *dev, const char *profile) return -EPERM; strncpy(string, profile, sizeof(string)); + string[sizeof(string) - 1] = 0; if ((ptr = strchr(string, '\n'))) *ptr = '\0'; diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index a4886b36d0f..c2a8511e855 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -642,7 +642,7 @@ nouveau_card_channel_init(struct drm_device *dev) OUT_RING (chan, chan->vram_handle); OUT_RING (chan, chan->gart_handle); } else - if (dev_priv->card_type <= NV_C0) { + if (dev_priv->card_type <= NV_D0) { ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039); if (ret) goto error; diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index a7844ab6a50..27464021247 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c @@ -42,7 +42,7 @@ nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane) struct drm_nouveau_private *dev_priv = dev->dev_private; static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ static const u8 nv50[] = { 16, 8, 0, 24 }; - if (dev_priv->card_type == 0xaf) + if (dev_priv->chipset == 0xaf) return nvaf[lane]; return nv50[lane]; } diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index d1bd239cd9e..5ce9bf51a8d 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -1306,8 +1306,11 @@ struct atom_context *atom_parse(struct card_info *card, void *bios) int atom_asic_init(struct atom_context *ctx) { + struct radeon_device *rdev = ctx->card->dev->dev_private; int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); uint32_t ps[16]; + int ret; + memset(ps, 0, 64); ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); @@ -1317,7 +1320,17 @@ int atom_asic_init(struct atom_context *ctx) if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) return 1; - return atom_execute_table(ctx, ATOM_CMD_INIT, ps); + ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps); + if (ret) + return ret; + + memset(ps, 0, 64); + + if (rdev->family < CHIP_R600) { + if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL)) + atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps); + } + return ret; } void atom_destroy(struct atom_context *ctx) diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index 93cfe2086ba..25fea631dad 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h @@ -44,6 +44,7 @@ #define ATOM_CMD_SETSCLK 0x0A #define ATOM_CMD_SETMCLK 0x0B #define ATOM_CMD_SETPCLK 0x0C +#define ATOM_CMD_SPDFANCNTL 0x39 #define ATOM_DATA_FWI_PTR 0xC #define ATOM_DATA_IIO_PTR 0x32 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b5ff1f7b6f7..af1054f8202 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -575,6 +575,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (rdev->family < CHIP_RV770) pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; + /* use frac fb div on APUs */ + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) + pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; } else { pll->flags |= RADEON_PLL_LEGACY; @@ -955,8 +958,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode break; } - if (radeon_encoder->active_device & - (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { + if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || + (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index e607c4d7dd9..2d39f9977e0 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -230,6 +230,10 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action) if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; + /* some R4xx chips have the wrong frev */ + if (rdev->family <= CHIP_RV410) + frev = 1; + switch (frev) { case 1: switch (crev) { diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 81801c176aa..fe33d35dae8 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2553,7 +2553,7 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev) * or the chip could hang on a subsequent access */ if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { - udelay(5000); + mdelay(5); } /* This function is required to workaround a hardware bug in some (all?) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 391bd2636a8..c8187c4b6ae 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1135,7 +1135,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc } if (rdev->flags & RADEON_IS_AGP) { size_bf = mc->gtt_start; - size_af = 0xFFFFFFFF - mc->gtt_end + 1; + size_af = 0xFFFFFFFF - mc->gtt_end; if (size_bf > size_af) { if (mc->mc_vram_size > size_bf) { dev_warn(rdev->dev, "limiting VRAM\n"); @@ -1149,7 +1149,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc mc->real_vram_size = size_af; mc->mc_vram_size = size_af; } - mc->vram_start = mc->gtt_end; + mc->vram_start = mc->gtt_end + 1; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", @@ -2839,7 +2839,7 @@ void r600_rlc_stop(struct radeon_device *rdev) /* r7xx asics need to soft reset RLC before halting */ WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); RREG32(SRBM_SOFT_RESET); - udelay(15000); + mdelay(15); WREG32(SRBM_SOFT_RESET, 0); RREG32(SRBM_SOFT_RESET); } diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 84c54625095..75ed17c9611 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -407,7 +407,7 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); RADEON_READ(R600_GRBM_SOFT_RESET); - DRM_UDELAY(15000); + mdelay(15); RADEON_WRITE(R600_GRBM_SOFT_RESET, 0); fw_data = (const __be32 *)dev_priv->me_fw->data; @@ -500,7 +500,7 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv) RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); RADEON_READ(R600_GRBM_SOFT_RESET); - DRM_UDELAY(15000); + mdelay(15); RADEON_WRITE(R600_GRBM_SOFT_RESET, 0); fw_data = (const __be32 *)dev_priv->pfp_fw->data; @@ -1797,7 +1797,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); RADEON_READ(R600_GRBM_SOFT_RESET); - DRM_UDELAY(15000); + mdelay(15); RADEON_WRITE(R600_GRBM_SOFT_RESET, 0); diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 6ae0c75f016..9c6b29a4192 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c @@ -633,7 +633,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) tmp &= ~(R300_SCLK_FORCE_VAP); tmp |= RADEON_SCLK_FORCE_CP; WREG32_PLL(RADEON_SCLK_CNTL, tmp); - udelay(15000); + mdelay(15); tmp = RREG32_PLL(R300_SCLK_CNTL2); tmp &= ~(R300_SCLK_FORCE_TCL | @@ -651,12 +651,12 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) tmp |= (RADEON_ENGIN_DYNCLK_MODE | (0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT)); WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp); - udelay(15000); + mdelay(15); tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL); tmp |= RADEON_SCLK_DYN_START_CNTL; WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp); - udelay(15000); + mdelay(15); /* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200 to lockup randomly, leave them as set by BIOS. @@ -696,7 +696,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) tmp |= RADEON_SCLK_MORE_FORCEON; } WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp); - udelay(15000); + mdelay(15); } /* RV200::A11 A12, RV250::A11 A12 */ @@ -709,7 +709,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) tmp |= RADEON_TCL_BYPASS_DISABLE; WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); } - udelay(15000); + mdelay(15); /*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */ tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL); @@ -722,14 +722,14 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) RADEON_PIXCLK_TMDS_ALWAYS_ONb); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); - udelay(15000); + mdelay(15); tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL); tmp |= (RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb); WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp); - udelay(15000); + mdelay(15); } } else { /* Turn everything OFF (ForceON to everything) */ @@ -861,7 +861,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) } WREG32_PLL(RADEON_SCLK_CNTL, tmp); - udelay(16000); + mdelay(16); if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) { @@ -870,7 +870,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) R300_SCLK_FORCE_GA | R300_SCLK_FORCE_CBA); WREG32_PLL(R300_SCLK_CNTL2, tmp); - udelay(16000); + mdelay(16); } if (rdev->flags & RADEON_IS_IGP) { @@ -878,7 +878,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) tmp &= ~(RADEON_FORCEON_MCLKA | RADEON_FORCEON_YCLKA); WREG32_PLL(RADEON_MCLK_CNTL, tmp); - udelay(16000); + mdelay(16); } if ((rdev->family == CHIP_RV200) || @@ -887,7 +887,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL); tmp |= RADEON_SCLK_MORE_FORCEON; WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp); - udelay(16000); + mdelay(16); } tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL); @@ -900,7 +900,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) RADEON_PIXCLK_TMDS_ALWAYS_ONb); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); - udelay(16000); + mdelay(16); tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL); tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb | diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 81fc100be7e..2cad9fde92f 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2845,7 +2845,7 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder) case 4: val = RBIOS16(index); index += 2; - udelay(val * 1000); + mdelay(val); break; case 6: slave_addr = id & 0xff; @@ -3044,7 +3044,7 @@ static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset) udelay(150); break; case 2: - udelay(1000); + mdelay(1); break; case 3: while (tmp--) { @@ -3075,13 +3075,13 @@ static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset) /*mclk_cntl |= 0x00001111;*//* ??? */ WREG32_PLL(RADEON_MCLK_CNTL, mclk_cntl); - udelay(10000); + mdelay(10); #endif WREG32_PLL (RADEON_CLK_PWRMGT_CNTL, tmp & ~RADEON_CG_NO1_DEBUG_0); - udelay(10000); + mdelay(10); } break; default: diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index bd05156edbd..3c2e7a000a2 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -970,7 +970,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) encoder = obj_to_encoder(obj); - if (encoder->encoder_type != DRM_MODE_ENCODER_DAC || + if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) continue; @@ -1000,6 +1000,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) * cases the DVI port is actually a virtual KVM port connected to the service * processor. */ +out: if ((!rdev->is_atom_bios) && (ret == connector_status_disconnected) && rdev->mode_info.bios_hardcoded_edid_size) { @@ -1007,7 +1008,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) ret = connector_status_connected; } -out: /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); return ret; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 8086c96e0b0..0a1d4bd65ed 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -533,7 +533,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) radeon_legacy_init_crtc(dev, radeon_crtc); } -static const char *encoder_names[36] = { +static const char *encoder_names[37] = { "NONE", "INTERNAL_LVDS", "INTERNAL_TMDS1", @@ -570,6 +570,7 @@ static const char *encoder_names[36] = { "INTERNAL_UNIPHY2", "NUTMEG", "TRAVIS", + "INTERNAL_VCE" }; static const char *connector_names[15] = { diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 85bcfc8923a..3edec1c198e 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -900,6 +900,10 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, struct radeon_i2c_chan *i2c; int ret; + /* don't add the mm_i2c bus unless hw_i2c is enabled */ + if (rec->mm_i2c && (radeon_hw_i2c == 0)) + return NULL; + i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL); if (i2c == NULL) return NULL; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 66d5fe1c817..65060b77c80 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -147,6 +147,12 @@ static bool radeon_msi_ok(struct radeon_device *rdev) (rdev->pdev->subsystem_device == 0x01fd)) return true; + /* RV515 seems to have MSI issues where it loses + * MSI rearms occasionally. This leads to lockups and freezes. + * disable it by default. + */ + if (rdev->family == CHIP_RV515) + return false; if (rdev->flags & RADEON_IS_IGP) { /* APUs work fine with MSIs */ if (rdev->family >= CHIP_PALM) diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 2f46e0c8df5..42db254f6bb 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -88,7 +88,7 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode) lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl |= RADEON_LVDS_PLL_EN; WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); - udelay(1000); + mdelay(1); lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET; @@ -101,7 +101,7 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode) (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT)); if (is_mac) lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN; - udelay(panel_pwr_delay * 1000); + mdelay(panel_pwr_delay); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); break; case DRM_MODE_DPMS_STANDBY: @@ -118,10 +118,10 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode) WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); } - udelay(panel_pwr_delay * 1000); + mdelay(panel_pwr_delay); WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); - udelay(panel_pwr_delay * 1000); + mdelay(panel_pwr_delay); break; } @@ -656,7 +656,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc WREG32(RADEON_DAC_MACRO_CNTL, tmp); - udelay(2000); + mdelay(2); if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT) found = connector_status_connected; @@ -1499,7 +1499,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN; WREG32(RADEON_DAC_CNTL2, tmp); - udelay(10000); + mdelay(10); if (ASIC_IS_R300(rdev)) { if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 6f70158d34e..df6a4dbd93f 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -241,7 +241,8 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, domain_start = bo->rdev->mc.vram_start; else domain_start = bo->rdev->mc.gtt_start; - WARN_ON_ONCE((*gpu_addr - domain_start) > max_offset); + WARN_ON_ONCE(max_offset < + (radeon_bo_gpu_offset(bo) - domain_start)); } return 0; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index c62ae4be384..cdab1aeaed6 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -969,7 +969,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) } if (rdev->flags & RADEON_IS_AGP) { size_bf = mc->gtt_start; - size_af = 0xFFFFFFFF - mc->gtt_end + 1; + size_af = 0xFFFFFFFF - mc->gtt_end; if (size_bf > size_af) { if (mc->mc_vram_size > size_bf) { dev_warn(rdev->dev, "limiting VRAM\n"); @@ -983,7 +983,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) mc->real_vram_size = size_af; mc->mc_vram_size = size_af; } - mc->vram_start = mc->gtt_end; + mc->vram_start = mc->gtt_end + 1; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ac7a199ffec..27bda986fc2 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2999,8 +2999,8 @@ int si_rlc_init(struct radeon_device *rdev) } r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.save_restore_gpu_addr); + radeon_bo_unreserve(rdev->rlc.save_restore_obj); if (r) { - radeon_bo_unreserve(rdev->rlc.save_restore_obj); dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); si_rlc_fini(rdev); return r; @@ -3023,9 +3023,8 @@ int si_rlc_init(struct radeon_device *rdev) } r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.clear_state_gpu_addr); + radeon_bo_unreserve(rdev->rlc.clear_state_obj); if (r) { - - radeon_bo_unreserve(rdev->rlc.clear_state_obj); dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); si_rlc_fini(rdev); return r; diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c index 031aaaf79ac..b6d8608375c 100644 --- a/drivers/gpu/drm/savage/savage_state.c +++ b/drivers/gpu/drm/savage/savage_state.c @@ -988,7 +988,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ * for locking on FreeBSD. */ if (cmdbuf->size) { - kcmd_addr = kmalloc(cmdbuf->size * 8, GFP_KERNEL); + kcmd_addr = kmalloc_array(cmdbuf->size, 8, GFP_KERNEL); if (kcmd_addr == NULL) return -ENOMEM; @@ -1015,8 +1015,8 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ cmdbuf->vb_addr = kvb_addr; } if (cmdbuf->nbox) { - kbox_addr = kmalloc(cmdbuf->nbox * sizeof(struct drm_clip_rect), - GFP_KERNEL); + kbox_addr = kmalloc_array(cmdbuf->nbox, sizeof(struct drm_clip_rect), + GFP_KERNEL); if (kbox_addr == NULL) { ret = -ENOMEM; goto done; diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 5340c5f3987..53673907a6a 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -47,7 +47,7 @@ static struct vm_operations_struct udl_gem_vm_ops = { static const struct file_operations udl_driver_fops = { .owner = THIS_MODULE, .open = drm_open, - .mmap = drm_gem_mmap, + .mmap = udl_drm_gem_mmap, .poll = drm_poll, .read = drm_read, .unlocked_ioctl = drm_ioctl, diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 1612954a5bc..96820d03a30 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -121,6 +121,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, int udl_gem_vmap(struct udl_gem_object *obj); void udl_gem_vunmap(struct udl_gem_object *obj); +int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 852642dc118..92f19ef329b 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -71,6 +71,20 @@ int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev, return drm_gem_handle_delete(file, handle); } +int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + + return ret; +} + int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); |