summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c31
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/highmem.c14
-rw-r--r--mm/hugetlb.c57
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/page_alloc.c35
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c8
-rw-r--r--mm/vmscan.c8
11 files changed, 112 insertions, 65 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index c1330cc1978..a58699b6579 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -154,10 +154,10 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
*/
static void * __init
__alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
- unsigned long align, unsigned long goal)
+ unsigned long align, unsigned long goal, unsigned long limit)
{
unsigned long offset, remaining_size, areasize, preferred;
- unsigned long i, start = 0, incr, eidx;
+ unsigned long i, start = 0, incr, eidx, end_pfn = bdata->node_low_pfn;
void *ret;
if(!size) {
@@ -166,7 +166,14 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
}
BUG_ON(align & (align-1));
- eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
+ if (limit && bdata->node_boot_start >= limit)
+ return NULL;
+
+ limit >>=PAGE_SHIFT;
+ if (limit && end_pfn > limit)
+ end_pfn = limit;
+
+ eidx = end_pfn - (bdata->node_boot_start >> PAGE_SHIFT);
offset = 0;
if (align &&
(bdata->node_boot_start & (align - 1UL)) != 0)
@@ -178,11 +185,12 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
* first, then we try to allocate lower pages.
*/
if (goal && (goal >= bdata->node_boot_start) &&
- ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) {
+ ((goal >> PAGE_SHIFT) < end_pfn)) {
preferred = goal - bdata->node_boot_start;
if (bdata->last_success >= preferred)
- preferred = bdata->last_success;
+ if (!limit || (limit && limit > bdata->last_success))
+ preferred = bdata->last_success;
} else
preferred = 0;
@@ -382,14 +390,15 @@ unsigned long __init free_all_bootmem (void)
return(free_all_bootmem_core(NODE_DATA(0)));
}
-void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal)
+void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal,
+ unsigned long limit)
{
pg_data_t *pgdat = pgdat_list;
void *ptr;
for_each_pgdat(pgdat)
if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
- align, goal)))
+ align, goal, limit)))
return(ptr);
/*
@@ -400,14 +409,16 @@ void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned
return NULL;
}
-void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal)
+
+void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align,
+ unsigned long goal, unsigned long limit)
{
void *ptr;
- ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal);
+ ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, limit);
if (ptr)
return (ptr);
- return __alloc_bootmem(size, align, goal);
+ return __alloc_bootmem_limit(size, align, goal, limit);
}
diff --git a/mm/filemap.c b/mm/filemap.c
index b5346576e58..1c31b2fd2ca 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -377,7 +377,7 @@ int filemap_write_and_wait_range(struct address_space *mapping,
* This function does not add the page to the LRU. The caller must do that.
*/
int add_to_page_cache(struct page *page, struct address_space *mapping,
- pgoff_t offset, int gfp_mask)
+ pgoff_t offset, gfp_t gfp_mask)
{
int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
@@ -401,7 +401,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
EXPORT_SYMBOL(add_to_page_cache);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t offset, int gfp_mask)
+ pgoff_t offset, gfp_t gfp_mask)
{
int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
if (ret == 0)
@@ -591,7 +591,7 @@ EXPORT_SYMBOL(find_lock_page);
* memory exhaustion.
*/
struct page *find_or_create_page(struct address_space *mapping,
- unsigned long index, unsigned int gfp_mask)
+ unsigned long index, gfp_t gfp_mask)
{
struct page *page, *cached_page = NULL;
int err;
@@ -683,7 +683,7 @@ struct page *
grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
{
struct page *page = find_get_page(mapping, index);
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
if (page) {
if (!TestSetPageLocked(page))
diff --git a/mm/highmem.c b/mm/highmem.c
index 90e1861e2da..ce2e7e8bbfa 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -30,11 +30,9 @@
static mempool_t *page_pool, *isa_page_pool;
-static void *page_pool_alloc(gfp_t gfp_mask, void *data)
+static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data)
{
- unsigned int gfp = gfp_mask | (unsigned int) (long) data;
-
- return alloc_page(gfp);
+ return alloc_page(gfp_mask | GFP_DMA);
}
static void page_pool_free(void *page, void *data)
@@ -51,6 +49,12 @@ static void page_pool_free(void *page, void *data)
* n means that there are (n-1) current users of it.
*/
#ifdef CONFIG_HIGHMEM
+
+static void *page_pool_alloc(gfp_t gfp_mask, void *data)
+{
+ return alloc_page(gfp_mask);
+}
+
static int pkmap_count[LAST_PKMAP];
static unsigned int last_pkmap_nr;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
@@ -267,7 +271,7 @@ int init_emergency_isa_pool(void)
if (isa_page_pool)
return 0;
- isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA);
+ isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL);
if (!isa_page_pool)
BUG();
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 901ac523a1c..61d38067803 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -274,21 +274,22 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
{
pte_t *src_pte, *dst_pte, entry;
struct page *ptepage;
- unsigned long addr = vma->vm_start;
- unsigned long end = vma->vm_end;
+ unsigned long addr;
- while (addr < end) {
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
dst_pte = huge_pte_alloc(dst, addr);
if (!dst_pte)
goto nomem;
+ spin_lock(&src->page_table_lock);
src_pte = huge_pte_offset(src, addr);
- BUG_ON(!src_pte || pte_none(*src_pte)); /* prefaulted */
- entry = *src_pte;
- ptepage = pte_page(entry);
- get_page(ptepage);
- add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
- set_huge_pte_at(dst, addr, dst_pte, entry);
- addr += HPAGE_SIZE;
+ if (src_pte && !pte_none(*src_pte)) {
+ entry = *src_pte;
+ ptepage = pte_page(entry);
+ get_page(ptepage);
+ add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
+ set_huge_pte_at(dst, addr, dst_pte, entry);
+ }
+ spin_unlock(&src->page_table_lock);
}
return 0;
@@ -323,8 +324,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
page = pte_page(pte);
put_page(page);
+ add_mm_counter(mm, rss, - (HPAGE_SIZE / PAGE_SIZE));
}
- add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
flush_tlb_range(vma, start, end);
}
@@ -393,6 +394,28 @@ out:
return ret;
}
+/*
+ * On ia64 at least, it is possible to receive a hugetlb fault from a
+ * stale zero entry left in the TLB from earlier hardware prefetching.
+ * Low-level arch code should already have flushed the stale entry as
+ * part of its fault handling, but we do need to accept this minor fault
+ * and return successfully. Whereas the "normal" case is that this is
+ * an access to a hugetlb page which has been truncated off since mmap.
+ */
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, int write_access)
+{
+ int ret = VM_FAULT_SIGBUS;
+ pte_t *pte;
+
+ spin_lock(&mm->page_table_lock);
+ pte = huge_pte_offset(mm, address);
+ if (pte && !pte_none(*pte))
+ ret = VM_FAULT_MINOR;
+ spin_unlock(&mm->page_table_lock);
+ return ret;
+}
+
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i)
@@ -403,6 +426,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
BUG_ON(!is_vm_hugetlb_page(vma));
vpfn = vaddr/PAGE_SIZE;
+ spin_lock(&mm->page_table_lock);
while (vaddr < vma->vm_end && remainder) {
if (pages) {
@@ -415,8 +439,13 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
* indexing below to work. */
pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
- /* hugetlb should be locked, and hence, prefaulted */
- WARN_ON(!pte || pte_none(*pte));
+ /* the hugetlb file might have been truncated */
+ if (!pte || pte_none(*pte)) {
+ remainder = 0;
+ if (!i)
+ i = -EFAULT;
+ break;
+ }
page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
@@ -434,7 +463,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
--remainder;
++i;
}
-
+ spin_unlock(&mm->page_table_lock);
*length = remainder;
*position = vaddr;
diff --git a/mm/memory.c b/mm/memory.c
index ae8161f1f45..1db40e935e5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2045,8 +2045,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
inc_page_state(pgfault);
- if (is_vm_hugetlb_page(vma))
- return VM_FAULT_SIGBUS; /* mapping truncation does this. */
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return hugetlb_fault(mm, vma, address, write_access);
/*
* We need the page table lock to synchronize with kswapd
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 37af443eb09..1d5c64df165 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -700,7 +700,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
case MPOL_BIND:
/* Lower zones don't get a policy applied */
/* Careful: current->mems_allowed might have moved */
- if ((gfp & GFP_ZONEMASK) >= policy_zone)
+ if (gfp_zone(gfp) >= policy_zone)
if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
return policy->v.zonelist;
/*FALL THROUGH*/
@@ -712,7 +712,7 @@ static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
nd = 0;
BUG();
}
- return NODE_DATA(nd)->node_zonelists + (gfp & GFP_ZONEMASK);
+ return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
}
/* Do dynamic interleaving for a process */
@@ -757,7 +757,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned ni
struct page *page;
BUG_ON(!node_online(nid));
- zl = NODE_DATA(nid)->node_zonelists + (gfp & GFP_ZONEMASK);
+ zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
page = __alloc_pages(gfp, order, zl);
if (page && page_zone(page) == zl->zones[0]) {
zone_pcp(zl->zones[0],get_cpu())->interleave_hit++;
diff --git a/mm/mempool.c b/mm/mempool.c
index 9e377ea700b..1a99b80480d 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -205,7 +205,7 @@ void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
void *element;
unsigned long flags;
wait_queue_t wait;
- unsigned int gfp_temp;
+ gfp_t gfp_temp;
might_sleep_if(gfp_mask & __GFP_WAIT);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cc1fe2672a3..94c864eac9c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -734,7 +734,7 @@ buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
* of the allocation.
*/
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
- int classzone_idx, int can_try_harder, int gfp_high)
+ int classzone_idx, int can_try_harder, gfp_t gfp_high)
{
/* free_pages my go negative - that's OK */
long min = mark, free_pages = z->free_pages - (1 << order) + 1;
@@ -777,7 +777,7 @@ struct page * fastcall
__alloc_pages(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist)
{
- const int wait = gfp_mask & __GFP_WAIT;
+ const gfp_t wait = gfp_mask & __GFP_WAIT;
struct zone **zones, *z;
struct page *page;
struct reclaim_state reclaim_state;
@@ -996,7 +996,7 @@ fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
* get_zeroed_page() returns a 32-bit address, which cannot represent
* a highmem page
*/
- BUG_ON(gfp_mask & __GFP_HIGHMEM);
+ BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
if (page)
@@ -1089,7 +1089,7 @@ static unsigned int nr_free_zone_pages(int offset)
*/
unsigned int nr_free_buffer_pages(void)
{
- return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK);
+ return nr_free_zone_pages(gfp_zone(GFP_USER));
}
/*
@@ -1097,7 +1097,7 @@ unsigned int nr_free_buffer_pages(void)
*/
unsigned int nr_free_pagecache_pages(void)
{
- return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
+ return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
}
#ifdef CONFIG_HIGHMEM
@@ -1428,6 +1428,16 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli
return j;
}
+static inline int highest_zone(int zone_bits)
+{
+ int res = ZONE_NORMAL;
+ if (zone_bits & (__force int)__GFP_HIGHMEM)
+ res = ZONE_HIGHMEM;
+ if (zone_bits & (__force int)__GFP_DMA)
+ res = ZONE_DMA;
+ return res;
+}
+
#ifdef CONFIG_NUMA
#define MAX_NODE_LOAD (num_online_nodes())
static int __initdata node_load[MAX_NUMNODES];
@@ -1524,11 +1534,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
zonelist = pgdat->node_zonelists + i;
for (j = 0; zonelist->zones[j] != NULL; j++);
- k = ZONE_NORMAL;
- if (i & __GFP_HIGHMEM)
- k = ZONE_HIGHMEM;
- if (i & __GFP_DMA)
- k = ZONE_DMA;
+ k = highest_zone(i);
j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
zonelist->zones[j] = NULL;
@@ -1549,12 +1555,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
zonelist = pgdat->node_zonelists + i;
j = 0;
- k = ZONE_NORMAL;
- if (i & __GFP_HIGHMEM)
- k = ZONE_HIGHMEM;
- if (i & __GFP_DMA)
- k = ZONE_DMA;
-
+ k = highest_zone(i);
j = build_zonelists_node(pgdat, zonelist, j, k);
/*
* Now we build the zonelist so that it contains the zones
@@ -1750,6 +1751,8 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;
+ memset(p, 0, sizeof(*p));
+
pcp = &p->pcp[0]; /* hot */
pcp->count = 0;
pcp->low = 2 * batch;
diff --git a/mm/shmem.c b/mm/shmem.c
index ea064d89cda..55e04a0734c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@ enum sgp_type {
static int shmem_getpage(struct inode *inode, unsigned long idx,
struct page **pagep, enum sgp_type sgp, int *type);
-static inline struct page *shmem_dir_alloc(unsigned int gfp_mask)
+static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
{
/*
* The above definition of ENTRIES_PER_PAGE, and the use of
@@ -898,7 +898,7 @@ struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
}
static struct page *
-shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info,
+shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
unsigned long idx)
{
struct vm_area_struct pvma;
diff --git a/mm/slab.c b/mm/slab.c
index d05c678bceb..d30423f167a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -386,7 +386,7 @@ struct kmem_cache_s {
unsigned int gfporder;
/* force GFP flags, e.g. GFP_DMA */
- unsigned int gfpflags;
+ gfp_t gfpflags;
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
@@ -2117,7 +2117,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
slabp->free = 0;
}
-static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags)
+static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
{
if (flags & SLAB_DMA) {
if (!(cachep->gfpflags & GFP_DMA))
@@ -2152,7 +2152,7 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
struct slab *slabp;
void *objp;
size_t offset;
- unsigned int local_flags;
+ gfp_t local_flags;
unsigned long ctor_flags;
struct kmem_list3 *l3;
@@ -2546,7 +2546,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
/*
* A interface to enable slab creation on nodeid
*/
-static void *__cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid)
+static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
{
struct list_head *entry;
struct slab *slabp;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 64f9570cff5..843c87d1e61 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -70,7 +70,7 @@ struct scan_control {
unsigned int priority;
/* This context's GFP mask */
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
int may_writepage;
@@ -186,7 +186,7 @@ EXPORT_SYMBOL(remove_shrinker);
*
* Returns the number of slab objects which we shrunk.
*/
-static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
+static int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
unsigned long lru_pages)
{
struct shrinker *shrinker;
@@ -926,7 +926,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
* holds filesystem locks which prevent writeout this might not work, and the
* allocation attempt will fail.
*/
-int try_to_free_pages(struct zone **zones, unsigned int gfp_mask)
+int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
{
int priority;
int ret = 0;
@@ -1338,7 +1338,7 @@ module_init(kswapd_init)
/*
* Try to free up some pages from this zone through reclaim.
*/
-int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
+int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
{
struct scan_control sc;
int nr_pages = 1 << order;