summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c58
1 files changed, 26 insertions, 32 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12d32579b95..5c0d1247f45 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -463,10 +463,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
if (do_bit17_swizzling) {
slow_shmem_bit17_copy(page,
@@ -795,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -905,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -1217,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
- }
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto unlock;
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
+ }
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
@@ -1556,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
inode = obj->base.filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
+ gfpmask |= mapping_gfp_mask(mapping);
+
for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_RECLAIMABLE |
- gfpmask);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
if (IS_ERR(page))
goto err_pages;
@@ -1699,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*. Here we mirror the actions taken
- * when by shmem_delete_inode() to release the backing store.
+ * backing pages, *now*.
*/
inode = obj->base.filp->f_path.dentry->d_inode;
- truncate_inode_pages(inode->i_mapping, 0);
- if (inode->i_op->truncate_range)
- inode->i_op->truncate_range(inode, 0, (loff_t)-1);
+ shmem_truncate_range(inode, 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
}
@@ -2078,8 +2072,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
- i915_driver_irq_preinstall(ring->dev);
- i915_driver_irq_postinstall(ring->dev);
+ ring->dev->driver->irq_preinstall(ring->dev);
+ ring->dev->driver->irq_postinstall(ring->dev);
}
trace_i915_gem_request_wait_begin(ring, seqno);
@@ -2924,8 +2918,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
*/
wmb();
- i915_gem_release_mmap(obj);
-
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3565,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct address_space *mapping;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
@@ -3575,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
i915_gem_info_add_obj(dev_priv, size);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -3950,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- struct page *page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ struct page *page = shmem_read_mapping_page(mapping, i);
if (!IS_ERR(page)) {
char *dst = kmap_atomic(page);
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4012,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
struct page *page;
char *dst, *src;
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
return PTR_ERR(page);