diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_prime.c | 137 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 205 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 30 |
11 files changed, 351 insertions, 90 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 2cc6cd91ac1..9f8b690bcf5 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ - vmwgfx_surface.o + vmwgfx_surface.o vmwgfx_prime.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 7776e6f0aef..0489c615248 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -150,6 +150,8 @@ struct vmw_ttm_tt { bool mapped; }; +const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); + /** * Helper functions to advance a struct vmw_piter iterator. * diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0b5c7818ebf..c7a549694e5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -453,12 +453,13 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) */ static int vmw_dma_select_mode(struct vmw_private *dev_priv) { - const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); static const char *names[vmw_dma_map_max] = { [vmw_dma_phys] = "Using physical TTM page addresses.", [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", [vmw_dma_map_populate] = "Keeping DMA mappings.", [vmw_dma_map_bind] = "Giving up DMA mappings early."}; +#ifdef CONFIG_X86 + const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); #ifdef CONFIG_INTEL_IOMMU if (intel_iommu_enabled) { @@ -482,7 +483,9 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) dev_priv->map_mode = vmw_dma_map_populate; #endif +#ifdef CONFIG_INTEL_IOMMU out_fixup: +#endif if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) dev_priv->map_mode = vmw_dma_map_bind; @@ -498,6 +501,10 @@ out_fixup: return -EINVAL; #endif +#else /* CONFIG_X86 */ + dev_priv->map_mode = vmw_dma_map_populate; +#endif /* CONFIG_X86 */ + DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); return 0; @@ -670,7 +677,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) } dev_priv->tdev = ttm_object_device_init - (dev_priv->mem_global_ref.object, 12); + (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); if (unlikely(dev_priv->tdev == NULL)) { DRM_ERROR("Unable to initialize TTM object management.\n"); @@ -823,9 +830,17 @@ static void vmw_postclose(struct drm_device *dev, struct vmw_fpriv *vmw_fp; vmw_fp = vmw_fpriv(file_priv); - ttm_object_file_release(&vmw_fp->tfile); - if (vmw_fp->locked_master) + + if (vmw_fp->locked_master) { + struct vmw_master *vmaster = + vmw_master(vmw_fp->locked_master); + + ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); + ttm_vt_unlock(&vmaster->lock); drm_master_put(&vmw_fp->locked_master); + } + + ttm_object_file_release(&vmw_fp->tfile); kfree(vmw_fp); } @@ -1008,14 +1023,13 @@ static void vmw_master_drop(struct drm_device *dev, vmw_fp->locked_master = drm_master_get(file_priv->master); ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); - vmw_execbuf_release_pinned_bo(dev_priv); - if (unlikely((ret != 0))) { DRM_ERROR("Unable to lock TTM at VT switch.\n"); drm_master_put(&vmw_fp->locked_master); } - ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); + ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); + vmw_execbuf_release_pinned_bo(dev_priv); if (!dev_priv->enable_fb) { ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); @@ -1196,7 +1210,7 @@ static const struct file_operations vmwgfx_driver_fops = { static struct drm_driver driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | - DRIVER_MODESET, + DRIVER_MODESET | DRIVER_PRIME, .load = vmw_driver_load, .unload = vmw_driver_unload, .lastclose = vmw_lastclose, @@ -1221,6 +1235,9 @@ static struct drm_driver driver = { .dumb_map_offset = vmw_dumb_map_offset, .dumb_destroy = vmw_dumb_destroy, + .prime_fd_to_handle = vmw_prime_fd_to_handle, + .prime_handle_to_fd = vmw_prime_handle_to_fd, + .fops = &vmwgfx_driver_fops, .name = VMWGFX_DRIVER_NAME, .desc = VMWGFX_DRIVER_DESC, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index e401d5dbcb9..20890ad8408 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -615,6 +615,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); * TTM buffer object driver - vmwgfx_buffer.c */ +extern const size_t vmw_tt_size; extern struct ttm_placement vmw_vram_placement; extern struct ttm_placement vmw_vram_ne_placement; extern struct ttm_placement vmw_vram_sys_placement; @@ -819,6 +820,20 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; /** + * Prime - vmwgfx_prime.c + */ + +extern const struct dma_buf_ops vmw_prime_dmabuf_ops; +extern int vmw_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, + int fd, u32 *handle); +extern int vmw_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t handle, uint32_t flags, + int *prime_fd); + + +/** * Inline helper functions */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 6d0952366f9..6ef0b035bec 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c @@ -145,7 +145,9 @@ static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, } page_virtual = kmap_atomic(page); - desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT; + desc_dma = (dma_addr_t) + le32_to_cpu(page_virtual[desc_per_page].ppn) << + PAGE_SHIFT; kunmap_atomic(page_virtual); __free_page(page); @@ -217,7 +219,8 @@ static int vmw_gmr_build_descriptors(struct device *dev, desc_dma = 0; list_for_each_entry_reverse(page, desc_pages, lru) { page_virtual = kmap_atomic(page); - page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT; + page_virtual[desc_per_page].ppn = cpu_to_le32 + (desc_dma >> PAGE_SHIFT); kunmap_atomic(page_virtual); desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_TO_DEVICE); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index ecb3d867b42..03f1c203863 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -75,6 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du) vmw_surface_unreference(&du->cursor_surface); if (du->cursor_dmabuf) vmw_dmabuf_unreference(&du->cursor_dmabuf); + drm_sysfs_connector_remove(&du->connector); drm_crtc_cleanup(&du->crtc); drm_encoder_cleanup(&du->encoder); drm_connector_cleanup(&du->connector); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 79f7e8e6052..a055a26819c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -260,6 +260,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) connector->encoder = NULL; encoder->crtc = NULL; crtc->fb = NULL; + crtc->enabled = false; vmw_ldu_del_active(dev_priv, ldu); @@ -285,6 +286,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) crtc->x = set->x; crtc->y = set->y; crtc->mode = *mode; + crtc->enabled = true; vmw_ldu_add_active(dev_priv, ldu, vfb); @@ -369,6 +371,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; + (void) drm_sysfs_connector_add(connector); + drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); drm_mode_crtc_set_gamma_size(crtc, 256); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c new file mode 100644 index 00000000000..31fe32d8d65 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c @@ -0,0 +1,137 @@ +/************************************************************************** + * + * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: + * Thomas Hellstrom <thellstrom@vmware.com> + * + */ + +#include "vmwgfx_drv.h" +#include <linux/dma-buf.h> +#include <drm/ttm/ttm_object.h> + +/* + * DMA-BUF attach- and mapping methods. No need to implement + * these until we have other virtual devices use them. + */ + +static int vmw_prime_map_attach(struct dma_buf *dma_buf, + struct device *target_dev, + struct dma_buf_attachment *attach) +{ + return -ENOSYS; +} + +static void vmw_prime_map_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) +{ +} + +static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + return ERR_PTR(-ENOSYS); +} + +static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgb, + enum dma_data_direction dir) +{ +} + +static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf) +{ + return NULL; +} + +static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ +} + +static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf, + unsigned long page_num) +{ + return NULL; +} + +static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, + unsigned long page_num, void *addr) +{ + +} +static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf, + unsigned long page_num) +{ + return NULL; +} + +static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf, + unsigned long page_num, void *addr) +{ + +} + +static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf, + struct vm_area_struct *vma) +{ + WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n"); + return -ENOSYS; +} + +const struct dma_buf_ops vmw_prime_dmabuf_ops = { + .attach = vmw_prime_map_attach, + .detach = vmw_prime_map_detach, + .map_dma_buf = vmw_prime_map_dma_buf, + .unmap_dma_buf = vmw_prime_unmap_dma_buf, + .release = NULL, + .kmap = vmw_prime_dmabuf_kmap, + .kmap_atomic = vmw_prime_dmabuf_kmap_atomic, + .kunmap = vmw_prime_dmabuf_kunmap, + .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic, + .mmap = vmw_prime_dmabuf_mmap, + .vmap = vmw_prime_dmabuf_vmap, + .vunmap = vmw_prime_dmabuf_vunmap, +}; + +int vmw_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, + int fd, u32 *handle) +{ + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + + return ttm_prime_fd_to_handle(tfile, fd, handle); +} + +int vmw_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t handle, uint32_t flags, + int *prime_fd) +{ + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + + return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 0e67cf41065..9b5ea2ac7dd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -32,8 +32,10 @@ #include <drm/drmP.h> #include "vmwgfx_resource_priv.h" +#define VMW_RES_EVICT_ERR_COUNT 10 + struct vmw_user_dma_buffer { - struct ttm_base_object base; + struct ttm_prime_object prime; struct vmw_dma_buffer dma; }; @@ -295,7 +297,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, if (unlikely(base == NULL)) return -EINVAL; - if (unlikely(base->object_type != converter->object_type)) + if (unlikely(ttm_base_object_type(base) != converter->object_type)) goto out_bad_resource; res = converter->base_obj_to_res(base); @@ -350,6 +352,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, /** * Buffer management. */ + +/** + * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers + * + * @dev_priv: Pointer to a struct vmw_private identifying the device. + * @size: The requested buffer size. + * @user: Whether this is an ordinary dma buffer or a user dma buffer. + */ +static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, + bool user) +{ + static size_t struct_size, user_struct_size; + size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); + + if (unlikely(struct_size == 0)) { + size_t backend_size = ttm_round_pot(vmw_tt_size); + + struct_size = backend_size + + ttm_round_pot(sizeof(struct vmw_dma_buffer)); + user_struct_size = backend_size + + ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); + } + + if (dev_priv->map_mode == vmw_dma_alloc_coherent) + page_array_size += + ttm_round_pot(num_pages * sizeof(dma_addr_t)); + + return ((user) ? user_struct_size : struct_size) + + page_array_size; +} + void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) { struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); @@ -357,6 +391,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) kfree(vmw_bo); } +static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) +{ + struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); + + ttm_prime_object_kfree(vmw_user_bo, prime); +} + int vmw_dmabuf_init(struct vmw_private *dev_priv, struct vmw_dma_buffer *vmw_bo, size_t size, struct ttm_placement *placement, @@ -366,28 +407,23 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, struct ttm_bo_device *bdev = &dev_priv->bdev; size_t acc_size; int ret; + bool user = (bo_free == &vmw_user_dmabuf_destroy); - BUG_ON(!bo_free); + BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); - acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); + acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); memset(vmw_bo, 0, sizeof(*vmw_bo)); INIT_LIST_HEAD(&vmw_bo->res_list); ret = ttm_bo_init(bdev, &vmw_bo->base, size, - ttm_bo_type_device, placement, + (user) ? ttm_bo_type_device : + ttm_bo_type_kernel, placement, 0, interruptible, NULL, acc_size, NULL, bo_free); return ret; } -static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) -{ - struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); - - ttm_base_object_kfree(vmw_user_bo, base); -} - static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) { struct vmw_user_dma_buffer *vmw_user_bo; @@ -399,7 +435,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) if (unlikely(base == NULL)) return; - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, + prime.base); bo = &vmw_user_bo->dma.base; ttm_bo_unref(&bo); } @@ -440,18 +477,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, return ret; tmp = ttm_bo_reference(&user_bo->dma.base); - ret = ttm_base_object_init(tfile, - &user_bo->base, - shareable, - ttm_buffer_type, - &vmw_user_dmabuf_release, NULL); + ret = ttm_prime_object_init(tfile, + size, + &user_bo->prime, + shareable, + ttm_buffer_type, + &vmw_user_dmabuf_release, NULL); if (unlikely(ret != 0)) { ttm_bo_unref(&tmp); goto out_no_base_object; } *p_dma_buf = &user_bo->dma; - *handle = user_bo->base.hash.key; + *handle = user_bo->prime.base.hash.key; out_no_base_object: return ret; @@ -473,8 +511,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, return -EPERM; vmw_user_bo = vmw_user_dma_buffer(bo); - return (vmw_user_bo->base.tfile == tfile || - vmw_user_bo->base.shareable) ? 0 : -EPERM; + return (vmw_user_bo->prime.base.tfile == tfile || + vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; } int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, @@ -536,14 +574,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, return -ESRCH; } - if (unlikely(base->object_type != ttm_buffer_type)) { + if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { ttm_base_object_unref(&base); printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", (unsigned long)handle); return -EINVAL; } - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, + prime.base); (void)ttm_bo_reference(&vmw_user_bo->dma.base); ttm_base_object_unref(&base); *out = &vmw_user_bo->dma; @@ -560,7 +599,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, return -EINVAL; user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); - return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); + return ttm_ref_object_add(tfile, &user_bo->prime.base, + TTM_REF_USAGE, NULL); } /* @@ -775,53 +815,55 @@ err_ref: } +/** + * vmw_dumb_create - Create a dumb kms buffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @args: Pointer to a struct drm_mode_create_dumb structure + * + * This is a driver callback for the core drm create_dumb functionality. + * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except + * that the arguments have a different format. + */ int vmw_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_master *vmaster = vmw_master(file_priv->master); - struct vmw_user_dma_buffer *vmw_user_bo; - struct ttm_buffer_object *tmp; + struct vmw_dma_buffer *dma_buf; int ret; args->pitch = args->width * ((args->bpp + 7) / 8); args->size = args->pitch * args->height; - vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); - if (vmw_user_bo == NULL) - return -ENOMEM; - ret = ttm_read_lock(&vmaster->lock, true); - if (ret != 0) { - kfree(vmw_user_bo); + if (unlikely(ret != 0)) return ret; - } - - ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, - &vmw_vram_sys_placement, true, - &vmw_user_dmabuf_destroy); - if (ret != 0) - goto out_no_dmabuf; - tmp = ttm_bo_reference(&vmw_user_bo->dma.base); - ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, - &vmw_user_bo->base, - false, - ttm_buffer_type, - &vmw_user_dmabuf_release, NULL); + ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, + args->size, false, &args->handle, + &dma_buf); if (unlikely(ret != 0)) - goto out_no_base_object; - - args->handle = vmw_user_bo->base.hash.key; + goto out_no_dmabuf; -out_no_base_object: - ttm_bo_unref(&tmp); + vmw_dmabuf_unreference(&dma_buf); out_no_dmabuf: ttm_read_unlock(&vmaster->lock); return ret; } +/** + * vmw_dumb_map_offset - Return the address space offset of a dumb buffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @handle: Handle identifying the dumb buffer. + * @offset: The address space offset returned. + * + * This is a driver callback for the core drm dumb_map_offset functionality. + */ int vmw_dumb_map_offset(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset) @@ -839,6 +881,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, return 0; } +/** + * vmw_dumb_destroy - Destroy a dumb boffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @handle: Handle identifying the dumb buffer. + * + * This is a driver callback for the core drm dumb_destroy functionality. + */ int vmw_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle) @@ -970,7 +1021,7 @@ void vmw_resource_unreserve(struct vmw_resource *res, if (new_backup) res->backup_offset = new_backup_offset; - if (!res->func->may_evict) + if (!res->func->may_evict || res->id == -1) return; write_lock(&dev_priv->resource_lock); @@ -992,7 +1043,6 @@ void vmw_resource_unreserve(struct vmw_resource *res, */ static int vmw_resource_check_buffer(struct vmw_resource *res, - struct ww_acquire_ctx *ticket, bool interruptible, struct ttm_validate_buffer *val_buf) { @@ -1009,7 +1059,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, INIT_LIST_HEAD(&val_list); val_buf->bo = ttm_bo_reference(&res->backup->base); list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(ticket, &val_list); + ret = ttm_eu_reserve_buffers(NULL, &val_list); if (unlikely(ret != 0)) goto out_no_reserve; @@ -1027,7 +1077,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, return 0; out_no_validate: - ttm_eu_backoff_reservation(ticket, &val_list); + ttm_eu_backoff_reservation(NULL, &val_list); out_no_reserve: ttm_bo_unref(&val_buf->bo); if (backup_dirty) @@ -1072,8 +1122,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) * @val_buf: Backup buffer information. */ static void -vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, - struct ttm_validate_buffer *val_buf) +vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) { struct list_head val_list; @@ -1082,7 +1131,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, INIT_LIST_HEAD(&val_list); list_add_tail(&val_buf->head, &val_list); - ttm_eu_backoff_reservation(ticket, &val_list); + ttm_eu_backoff_reservation(NULL, &val_list); ttm_bo_unref(&val_buf->bo); } @@ -1091,18 +1140,18 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, * to a backup buffer. * * @res: The resource to evict. + * @interruptible: Whether to wait interruptible. */ -int vmw_resource_do_evict(struct vmw_resource *res) +int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) { struct ttm_validate_buffer val_buf; const struct vmw_res_func *func = res->func; - struct ww_acquire_ctx ticket; int ret; BUG_ON(!func->may_evict); val_buf.bo = NULL; - ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf); + ret = vmw_resource_check_buffer(res, interruptible, &val_buf); if (unlikely(ret != 0)) return ret; @@ -1117,7 +1166,7 @@ int vmw_resource_do_evict(struct vmw_resource *res) res->backup_dirty = true; res->res_dirty = false; out_no_unbind: - vmw_resource_backoff_reservation(&ticket, &val_buf); + vmw_resource_backoff_reservation(&val_buf); return ret; } @@ -1141,6 +1190,7 @@ int vmw_resource_validate(struct vmw_resource *res) struct vmw_private *dev_priv = res->dev_priv; struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; struct ttm_validate_buffer val_buf; + unsigned err_count = 0; if (likely(!res->func->may_evict)) return 0; @@ -1155,7 +1205,7 @@ int vmw_resource_validate(struct vmw_resource *res) write_lock(&dev_priv->resource_lock); if (list_empty(lru_list) || !res->func->may_evict) { - DRM_ERROR("Out of device device id entries " + DRM_ERROR("Out of device device resources " "for %s.\n", res->func->type_name); ret = -EBUSY; write_unlock(&dev_priv->resource_lock); @@ -1168,7 +1218,19 @@ int vmw_resource_validate(struct vmw_resource *res) list_del_init(&evict_res->lru_head); write_unlock(&dev_priv->resource_lock); - vmw_resource_do_evict(evict_res); + + ret = vmw_resource_do_evict(evict_res, true); + if (unlikely(ret != 0)) { + write_lock(&dev_priv->resource_lock); + list_add_tail(&evict_res->lru_head, lru_list); + write_unlock(&dev_priv->resource_lock); + if (ret == -ERESTARTSYS || + ++err_count > VMW_RES_EVICT_ERR_COUNT) { + vmw_resource_unreference(&evict_res); + goto out_no_validate; + } + } + vmw_resource_unreference(&evict_res); } while (1); @@ -1253,13 +1315,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res) * @type: The resource type to evict * * To avoid thrashing starvation or as part of the hibernation sequence, - * evict all evictable resources of a specific type. + * try to evict all evictable resources of a specific type. */ static void vmw_resource_evict_type(struct vmw_private *dev_priv, enum vmw_res_type type) { struct list_head *lru_list = &dev_priv->res_lru[type]; struct vmw_resource *evict_res; + unsigned err_count = 0; + int ret; do { write_lock(&dev_priv->resource_lock); @@ -1272,7 +1336,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, lru_head)); list_del_init(&evict_res->lru_head); write_unlock(&dev_priv->resource_lock); - vmw_resource_do_evict(evict_res); + + ret = vmw_resource_do_evict(evict_res, false); + if (unlikely(ret != 0)) { + write_lock(&dev_priv->resource_lock); + list_add_tail(&evict_res->lru_head, lru_list); + write_unlock(&dev_priv->resource_lock); + if (++err_count > VMW_RES_EVICT_ERR_COUNT) { + vmw_resource_unreference(&evict_res); + return; + } + } + vmw_resource_unreference(&evict_res); } while (1); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 26387c3d5a2..22406c8651e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -310,6 +310,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) crtc->fb = NULL; crtc->x = 0; crtc->y = 0; + crtc->enabled = false; vmw_sou_del_active(dev_priv, sou); @@ -370,6 +371,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) crtc->fb = NULL; crtc->x = 0; crtc->y = 0; + crtc->enabled = false; return ret; } @@ -382,6 +384,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) crtc->fb = fb; crtc->x = set->x; crtc->y = set->y; + crtc->enabled = true; return 0; } @@ -464,6 +467,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) encoder->possible_crtcs = (1 << unit); encoder->possible_clones = 0; + (void) drm_sysfs_connector_add(connector); + drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); drm_mode_crtc_set_gamma_size(crtc, 256); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 58281433974..7de2ea8bd55 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -38,7 +38,7 @@ * @size: TTM accounting size for the surface. */ struct vmw_user_surface { - struct ttm_base_object base; + struct ttm_prime_object prime; struct vmw_surface srf; uint32_t size; uint32_t backup_handle; @@ -580,7 +580,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv, static struct vmw_resource * vmw_user_surface_base_to_res(struct ttm_base_object *base) { - return &(container_of(base, struct vmw_user_surface, base)->srf.res); + return &(container_of(base, struct vmw_user_surface, + prime.base)->srf.res); } /** @@ -599,7 +600,7 @@ static void vmw_user_surface_free(struct vmw_resource *res) kfree(srf->offsets); kfree(srf->sizes); kfree(srf->snooper.image); - ttm_base_object_kfree(user_srf, base); + ttm_prime_object_kfree(user_srf, prime); ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } @@ -616,7 +617,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; struct vmw_user_surface *user_srf = - container_of(base, struct vmw_user_surface, base); + container_of(base, struct vmw_user_surface, prime.base); struct vmw_resource *res = &user_srf->srf.res; *p_base = NULL; @@ -790,8 +791,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, } srf->snooper.crtc = NULL; - user_srf->base.shareable = false; - user_srf->base.tfile = NULL; + user_srf->prime.base.shareable = false; + user_srf->prime.base.tfile = NULL; /** * From this point, the generic resource management functions @@ -803,9 +804,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, goto out_unlock; tmp = vmw_resource_reference(&srf->res); - ret = ttm_base_object_init(tfile, &user_srf->base, - req->shareable, VMW_RES_SURFACE, - &vmw_user_surface_base_release, NULL); + ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, + req->shareable, VMW_RES_SURFACE, + &vmw_user_surface_base_release, NULL); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); @@ -813,7 +814,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - rep->sid = user_srf->base.hash.key; + rep->sid = user_srf->prime.base.hash.key; vmw_resource_unreference(&res); ttm_read_unlock(&vmaster->lock); @@ -823,7 +824,7 @@ out_no_copy: out_no_offsets: kfree(srf->sizes); out_no_sizes: - ttm_base_object_kfree(user_srf, base); + ttm_prime_object_kfree(user_srf, prime); out_no_user_srf: ttm_mem_global_free(vmw_mem_glob(dev_priv), size); out_unlock: @@ -859,13 +860,14 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - if (unlikely(base->object_type != VMW_RES_SURFACE)) + if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) goto out_bad_resource; - user_srf = container_of(base, struct vmw_user_surface, base); + user_srf = container_of(base, struct vmw_user_surface, prime.base); srf = &user_srf->srf; - ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, &user_srf->prime.base, + TTM_REF_USAGE, NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not add a reference to a surface.\n"); goto out_no_reference; |