diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 325 |
1 files changed, 204 insertions, 121 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 29eacfa29cf..3da8264d303 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -17,11 +17,50 @@ #include <linux/spinlock.h> #include <linux/shmem_fs.h> +#include <linux/dma-buf.h> #include "msm_drv.h" #include "msm_gem.h" #include "msm_gpu.h" +#include "msm_mmu.h" +static dma_addr_t physaddr(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_drm_private *priv = obj->dev->dev_private; + return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + + priv->vram.paddr; +} + +/* allocate pages from VRAM carveout, used when no IOMMU: */ +static struct page **get_pages_vram(struct drm_gem_object *obj, + int npages) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_drm_private *priv = obj->dev->dev_private; + dma_addr_t paddr; + struct page **p; + int ret, i; + + p = drm_malloc_ab(npages, sizeof(struct page *)); + if (!p) + return ERR_PTR(-ENOMEM); + + ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, + npages, 0, DRM_MM_SEARCH_DEFAULT); + if (ret) { + drm_free_large(p); + return ERR_PTR(ret); + } + + paddr = physaddr(obj); + for (i = 0; i < npages; i++) { + p[i] = phys_to_page(paddr); + paddr += PAGE_SIZE; + } + + return p; +} /* called with dev->struct_mutex held */ static struct page **get_pages(struct drm_gem_object *obj) @@ -30,9 +69,14 @@ static struct page **get_pages(struct drm_gem_object *obj) if (!msm_obj->pages) { struct drm_device *dev = obj->dev; - struct page **p = drm_gem_get_pages(obj, 0); + struct page **p; int npages = obj->size >> PAGE_SHIFT; + if (iommu_present(&platform_bus_type)) + p = drm_gem_get_pages(obj, 0); + else + p = get_pages_vram(obj, npages); + if (IS_ERR(p)) { dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); @@ -72,11 +116,30 @@ static void put_pages(struct drm_gem_object *obj) sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); - drm_gem_put_pages(obj, msm_obj->pages, true, false); + if (iommu_present(&platform_bus_type)) + drm_gem_put_pages(obj, msm_obj->pages, true, false); + else + drm_mm_remove_node(msm_obj->vram_node); + msm_obj->pages = NULL; } } +struct page **msm_gem_get_pages(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct page **p; + mutex_lock(&dev->struct_mutex); + p = get_pages(obj); + mutex_unlock(&dev->struct_mutex); + return p; +} + +void msm_gem_put_pages(struct drm_gem_object *obj) +{ + /* when we start tracking the pin count, then do something here */ +} + int msm_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma) { @@ -122,7 +185,6 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_gem_object *obj = vma->vm_private_data; - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct drm_device *dev = obj->dev; struct page **pages; unsigned long pfn; @@ -147,7 +209,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) pgoff = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; - pfn = page_to_pfn(msm_obj->pages[pgoff]); + pfn = page_to_pfn(pages[pgoff]); VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, pfn, pfn << PAGE_SHIFT); @@ -162,6 +224,11 @@ out: case 0: case -ERESTARTSYS: case -EINTR: + case -EBUSY: + /* + * EBUSY is ok: this just means that another thread + * already did the job. + */ return VM_FAULT_NOPAGE; case -ENOMEM: return VM_FAULT_OOM; @@ -198,67 +265,6 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) return offset; } -/* helpers for dealing w/ iommu: */ -static int map_range(struct iommu_domain *domain, unsigned int iova, - struct sg_table *sgt, unsigned int len, int prot) -{ - struct scatterlist *sg; - unsigned int da = iova; - unsigned int i, j; - int ret; - - if (!domain || !sgt) - return -EINVAL; - - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - u32 pa = sg_phys(sg) - sg->offset; - size_t bytes = sg->length + sg->offset; - - VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes); - - ret = iommu_map(domain, da, pa, bytes, prot); - if (ret) - goto fail; - - da += bytes; - } - - return 0; - -fail: - da = iova; - - for_each_sg(sgt->sgl, sg, i, j) { - size_t bytes = sg->length + sg->offset; - iommu_unmap(domain, da, bytes); - da += bytes; - } - return ret; -} - -static void unmap_range(struct iommu_domain *domain, unsigned int iova, - struct sg_table *sgt, unsigned int len) -{ - struct scatterlist *sg; - unsigned int da = iova; - int i; - - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - size_t bytes = sg->length + sg->offset; - size_t unmapped; - - unmapped = iommu_unmap(domain, da, bytes); - if (unmapped < bytes) - break; - - VERB("unmap[%d]: %08x(%x)", i, iova, bytes); - - BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); - - da += bytes; - } -} - /* should be called under struct_mutex.. although it can be called * from atomic context without struct_mutex to acquire an extra * iova ref if you know one is already held. @@ -274,15 +280,20 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, if (!msm_obj->domain[id].iova) { struct msm_drm_private *priv = obj->dev->dev_private; - uint32_t offset = (uint32_t)mmap_offset(obj); - struct page **pages; - pages = get_pages(obj); + struct msm_mmu *mmu = priv->mmus[id]; + struct page **pages = get_pages(obj); + if (IS_ERR(pages)) return PTR_ERR(pages); - // XXX ideally we would not map buffers writable when not needed... - ret = map_range(priv->iommus[id], offset, msm_obj->sgt, - obj->size, IOMMU_READ | IOMMU_WRITE); - msm_obj->domain[id].iova = offset; + + if (iommu_present(&platform_bus_type)) { + uint32_t offset = (uint32_t)mmap_offset(obj); + ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, + obj->size, IOMMU_READ | IOMMU_WRITE); + msm_obj->domain[id].iova = offset; + } else { + msm_obj->domain[id].iova = physaddr(obj); + } } if (!ret) @@ -293,7 +304,17 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) { + struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret; + + /* this is safe right now because we don't unmap until the + * bo is deleted: + */ + if (msm_obj->domain[id].iova) { + *iova = msm_obj->domain[id].iova; + return 0; + } + mutex_lock(&obj->dev->struct_mutex); ret = msm_gem_get_iova_locked(obj, id, iova); mutex_unlock(&obj->dev->struct_mutex); @@ -319,13 +340,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, MSM_BO_SCANOUT | MSM_BO_WC, &args->handle); } -int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, - uint32_t handle) -{ - /* No special work needed, drop the reference and see what falls out */ - return drm_gem_handle_delete(file, handle); -} - int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { @@ -370,8 +384,11 @@ void *msm_gem_vaddr(struct drm_gem_object *obj) return ret; } -int msm_gem_queue_inactive_work(struct drm_gem_object *obj, - struct work_struct *work) +/* setup callback for when bo is no longer busy.. + * TODO probably want to differentiate read vs write.. + */ +int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, + struct msm_fence_cb *cb) { struct drm_device *dev = obj->dev; struct msm_drm_private *priv = dev->dev_private; @@ -379,12 +396,13 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj, int ret = 0; mutex_lock(&dev->struct_mutex); - if (!list_empty(&work->entry)) { + if (!list_empty(&cb->work.entry)) { ret = -EINVAL; } else if (is_active(msm_obj)) { - list_add_tail(&work->entry, &msm_obj->inactive_work); + cb->fence = max(msm_obj->read_fence, msm_obj->write_fence); + list_add_tail(&cb->work.entry, &priv->fence_cbs); } else { - queue_work(priv->wq, work); + queue_work(priv->wq, &cb->work); } mutex_unlock(&dev->struct_mutex); @@ -417,16 +435,6 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) msm_obj->write_fence = 0; list_del_init(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - - while (!list_empty(&msm_obj->inactive_work)) { - struct work_struct *work; - - work = list_first_entry(&msm_obj->inactive_work, - struct work_struct, entry); - - list_del_init(&work->entry); - queue_work(priv->wq, work); - } } int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, @@ -496,6 +504,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) void msm_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); int id; @@ -507,20 +516,30 @@ void msm_gem_free_object(struct drm_gem_object *obj) list_del(&msm_obj->mm_list); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { - if (msm_obj->domain[id].iova) { - struct msm_drm_private *priv = obj->dev->dev_private; + struct msm_mmu *mmu = priv->mmus[id]; + if (mmu && msm_obj->domain[id].iova) { uint32_t offset = (uint32_t)mmap_offset(obj); - unmap_range(priv->iommus[id], offset, - msm_obj->sgt, obj->size); + mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); } } drm_gem_free_mmap_offset(obj); - if (msm_obj->vaddr) - vunmap(msm_obj->vaddr); + if (obj->import_attach) { + if (msm_obj->vaddr) + dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); + + /* Don't drop the pages for imported dmabuf, as they are not + * ours, just free the array we allocated: + */ + if (msm_obj->pages) + drm_free_large(msm_obj->pages); - put_pages(obj); + } else { + if (msm_obj->vaddr) + vunmap(msm_obj->vaddr); + put_pages(obj); + } if (msm_obj->resv == &msm_obj->_resv) reservation_object_fini(msm_obj->resv); @@ -556,17 +575,13 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, return ret; } -struct drm_gem_object *msm_gem_new(struct drm_device *dev, - uint32_t size, uint32_t flags) +static int msm_gem_new_impl(struct drm_device *dev, + uint32_t size, uint32_t flags, + struct drm_gem_object **obj) { struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; - struct drm_gem_object *obj = NULL; - int ret; - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - size = PAGE_ALIGN(size); + unsigned sz; switch (flags & MSM_BO_CACHE_MASK) { case MSM_BO_UNCACHED: @@ -576,21 +591,19 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, default: dev_err(dev->dev, "invalid cache flag: %x\n", (flags & MSM_BO_CACHE_MASK)); - ret = -EINVAL; - goto fail; + return -EINVAL; } - msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); - if (!msm_obj) { - ret = -ENOMEM; - goto fail; - } + sz = sizeof(*msm_obj); + if (!iommu_present(&platform_bus_type)) + sz += sizeof(struct drm_mm_node); - obj = &msm_obj->base; + msm_obj = kzalloc(sz, GFP_KERNEL); + if (!msm_obj) + return -ENOMEM; - ret = drm_gem_object_init(dev, obj, size); - if (ret) - goto fail; + if (!iommu_present(&platform_bus_type)) + msm_obj->vram_node = (void *)&msm_obj[1]; msm_obj->flags = flags; @@ -598,9 +611,79 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, reservation_object_init(msm_obj->resv); INIT_LIST_HEAD(&msm_obj->submit_entry); - INIT_LIST_HEAD(&msm_obj->inactive_work); list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + *obj = &msm_obj->base; + + return 0; +} + +struct drm_gem_object *msm_gem_new(struct drm_device *dev, + uint32_t size, uint32_t flags) +{ + struct drm_gem_object *obj = NULL; + int ret; + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + + size = PAGE_ALIGN(size); + + ret = msm_gem_new_impl(dev, size, flags, &obj); + if (ret) + goto fail; + + if (iommu_present(&platform_bus_type)) { + ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto fail; + } else { + drm_gem_private_object_init(dev, obj, size); + } + + return obj; + +fail: + if (obj) + drm_gem_object_unreference(obj); + + return ERR_PTR(ret); +} + +struct drm_gem_object *msm_gem_import(struct drm_device *dev, + uint32_t size, struct sg_table *sgt) +{ + struct msm_gem_object *msm_obj; + struct drm_gem_object *obj; + int ret, npages; + + /* if we don't have IOMMU, don't bother pretending we can import: */ + if (!iommu_present(&platform_bus_type)) { + dev_err(dev->dev, "cannot import without IOMMU\n"); + return ERR_PTR(-EINVAL); + } + + size = PAGE_ALIGN(size); + + ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); + if (ret) + goto fail; + + drm_gem_private_object_init(dev, obj, size); + + npages = size / PAGE_SIZE; + + msm_obj = to_msm_bo(obj); + msm_obj->sgt = sgt; + msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); + if (!msm_obj->pages) { + ret = -ENOMEM; + goto fail; + } + + ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); + if (ret) + goto fail; + return obj; fail: |