summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/percpu.c15
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/shmem_acl.c11
-rw-r--r--mm/slub.c4
6 files changed, 25 insertions, 20 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index 4bde489ec43..66e81e7e9fe 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1352,6 +1352,7 @@ unsigned long do_mmap_pgoff(struct file *file,
}
vma->vm_region = region;
+ add_nommu_region(region);
/* set up the mapping */
if (file && vma->vm_flags & VM_SHARED)
@@ -1361,8 +1362,6 @@ unsigned long do_mmap_pgoff(struct file *file,
if (ret < 0)
goto error_put_region;
- add_nommu_region(region);
-
/* okay... we have a mapping; now we have to register it */
result = vma->vm_start;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5cc986eb9f6..a0de15f4698 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -817,13 +817,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
* agressive about taking ownership of free pages
*/
if (unlikely(current_order >= (pageblock_order >> 1)) ||
- start_migratetype == MIGRATE_RECLAIMABLE) {
+ start_migratetype == MIGRATE_RECLAIMABLE ||
+ page_group_by_mobility_disabled) {
unsigned long pages;
pages = move_freepages_block(zone, page,
start_migratetype);
/* Claim the whole block if over half of it is free */
- if (pages >= (1 << (pageblock_order-1)))
+ if (pages >= (1 << (pageblock_order-1)) ||
+ page_group_by_mobility_disabled)
set_pageblock_migratetype(page,
start_migratetype);
diff --git a/mm/percpu.c b/mm/percpu.c
index 5fe37842e0e..3311c8919f3 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -197,7 +197,12 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
int page_idx)
{
- return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
+ /*
+ * Any possible cpu id can be used here, so there's no need to
+ * worry about preemption or cpu hotplug.
+ */
+ return *pcpu_chunk_pagep(chunk, raw_smp_processor_id(),
+ page_idx) != NULL;
}
/* set the pointer to a chunk in a page struct */
@@ -297,6 +302,14 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
return pcpu_first_chunk;
}
+ /*
+ * The address is relative to unit0 which might be unused and
+ * thus unmapped. Offset the address to the unit space of the
+ * current processor before looking it up in the vmalloc
+ * space. Note that any possible cpu id can be used here, so
+ * there's no need to worry about preemption or cpu hotplug.
+ */
+ addr += raw_smp_processor_id() * pcpu_unit_size;
return pcpu_get_page_chunk(vmalloc_to_page(addr));
}
diff --git a/mm/shmem.c b/mm/shmem.c
index d713239ce2c..5a0b3d4055f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2446,7 +2446,7 @@ static const struct inode_operations shmem_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = shmem_check_acl,
#endif
};
@@ -2469,7 +2469,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = shmem_check_acl,
#endif
};
@@ -2480,7 +2480,7 @@ static const struct inode_operations shmem_special_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .permission = shmem_permission,
+ .check_acl = shmem_check_acl,
#endif
};
diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c
index 606a8e757a4..df2c87fdae5 100644
--- a/mm/shmem_acl.c
+++ b/mm/shmem_acl.c
@@ -157,7 +157,7 @@ shmem_acl_init(struct inode *inode, struct inode *dir)
/**
* shmem_check_acl - check_acl() callback for generic_permission()
*/
-static int
+int
shmem_check_acl(struct inode *inode, int mask)
{
struct posix_acl *acl = shmem_get_acl(inode, ACL_TYPE_ACCESS);
@@ -169,12 +169,3 @@ shmem_check_acl(struct inode *inode, int mask)
}
return -EAGAIN;
}
-
-/**
- * shmem_permission - permission() inode operation
- */
-int
-shmem_permission(struct inode *inode, int mask)
-{
- return generic_permission(inode, mask, shmem_check_acl);
-}
diff --git a/mm/slub.c b/mm/slub.c
index b9f1491a58a..b6276753626 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2594,8 +2594,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
*/
void kmem_cache_destroy(struct kmem_cache *s)
{
- if (s->flags & SLAB_DESTROY_BY_RCU)
- rcu_barrier();
down_write(&slub_lock);
s->refcount--;
if (!s->refcount) {
@@ -2606,6 +2604,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
"still has objects.\n", s->name, __func__);
dump_stack();
}
+ if (s->flags & SLAB_DESTROY_BY_RCU)
+ rcu_barrier();
sysfs_slab_remove(s);
} else
up_write(&slub_lock);