diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 4 | ||||
-rw-r--r-- | mm/memcontrol.c | 3 | ||||
-rw-r--r-- | mm/mempolicy.c | 6 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/page-writeback.c | 4 | ||||
-rw-r--r-- | mm/shmem.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 10 | ||||
-rw-r--r-- | mm/slub.c | 7 | ||||
-rw-r--r-- | mm/swap.c | 29 | ||||
-rw-r--r-- | mm/vmpressure.c | 28 | ||||
-rw-r--r-- | mm/vmstat.c | 6 | ||||
-rw-r--r-- | mm/zbud.c | 2 |
13 files changed, 52 insertions, 54 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index d014ee5fcbb..e04454cdb33 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -232,8 +232,6 @@ static ssize_t stable_pages_required_show(struct device *dev, bdi_cap_stable_pages_required(bdi) ? 1 : 0); } -#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) - static struct device_attribute bdi_dev_attrs[] = { __ATTR_RW(read_ahead_kb), __ATTR_RW(min_ratio), diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 243e710c603..a92012a7170 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1620,7 +1620,9 @@ static void __split_huge_page_refcount(struct page *page, ((1L << PG_referenced) | (1L << PG_swapbacked) | (1L << PG_mlocked) | - (1L << PG_uptodate))); + (1L << PG_uptodate) | + (1L << PG_active) | + (1L << PG_unevictable))); page_tail->flags |= (1L << PG_dirty); /* clear PageTail before overwriting first_page */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d12ca6f3c29..c290a1cf386 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2522,7 +2522,7 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) spin_unlock(&memcg->pcp_counter_lock); } -static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, +static int memcg_cpu_hotplug_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { @@ -6335,6 +6335,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont) mem_cgroup_invalidate_reclaim_iterators(memcg); mem_cgroup_reparent_charges(memcg); mem_cgroup_destroy_all_caches(memcg); + vmpressure_cleanup(&memcg->vmpressure); } static void mem_cgroup_css_free(struct cgroup *cont) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 74310017296..4baf12e534d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -732,7 +732,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, if (prev) { vma = prev; next = vma->vm_next; - continue; + if (mpol_equal(vma_policy(vma), new_pol)) + continue; + /* vma_merge() joined vma && vma->next, case 8 */ + goto replace; } if (vma->vm_start != vmstart) { err = split_vma(vma->vm_mm, vma, vmstart, 1); @@ -744,6 +747,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, if (err) goto out; } + replace: err = vma_replace_policy(vma, new_pol); if (err) goto out; diff --git a/mm/mmap.c b/mm/mmap.c index fbad7b09109..1edbaa3136c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -865,7 +865,7 @@ again: remove_next = 1 + (end > next->vm_end); if (next->anon_vma) anon_vma_merge(vma, next); mm->map_count--; - vma_set_policy(vma, vma_policy(next)); + mpol_put(vma_policy(next)); kmem_cache_free(vm_area_cachep, next); /* * In mprotect's case 6 (see comments on vma_merge), diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 4514ad7415c..3f0c895c71f 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1619,7 +1619,7 @@ void writeback_set_ratelimit(void) ratelimit_pages = 16; } -static int __cpuinit +static int ratelimit_handler(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action, } } -static struct notifier_block __cpuinitdata ratelimit_nb = { +static struct notifier_block ratelimit_nb = { .notifier_call = ratelimit_handler, .next = NULL, }; diff --git a/mm/shmem.c b/mm/shmem.c index a87990cf9f9..8335dbd3fc3 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1798,7 +1798,8 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) } } - offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); + if (offset >= 0) + offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); mutex_unlock(&inode->i_mutex); return offset; } diff --git a/mm/slab.c b/mm/slab.c index 35cb0c86150..2580db062df 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -787,7 +787,7 @@ static void next_reap_node(void) * the CPUs getting into lockstep and contending for the global cache chain * lock. */ -static void __cpuinit start_cpu_timer(int cpu) +static void start_cpu_timer(int cpu) { struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); @@ -1186,7 +1186,7 @@ static inline int slabs_tofree(struct kmem_cache *cachep, return (n->free_objects + cachep->num - 1) / cachep->num; } -static void __cpuinit cpuup_canceled(long cpu) +static void cpuup_canceled(long cpu) { struct kmem_cache *cachep; struct kmem_cache_node *n = NULL; @@ -1251,7 +1251,7 @@ free_array_cache: } } -static int __cpuinit cpuup_prepare(long cpu) +static int cpuup_prepare(long cpu) { struct kmem_cache *cachep; struct kmem_cache_node *n = NULL; @@ -1334,7 +1334,7 @@ bad: return -ENOMEM; } -static int __cpuinit cpuup_callback(struct notifier_block *nfb, +static int cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -1390,7 +1390,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, return notifier_from_errno(err); } -static struct notifier_block __cpuinitdata cpucache_notifier = { +static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; diff --git a/mm/slub.c b/mm/slub.c index 3b482c86300..e3ba1f2cf60 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1968,9 +1968,6 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) int pages; int pobjects; - if (!s->cpu_partial) - return; - do { pages = 0; pobjects = 0; @@ -3773,7 +3770,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) * Use the cpu notifier to insure that the cpu slabs are flushed when * necessary. */ -static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, +static int slab_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -3799,7 +3796,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata slab_notifier = { +static struct notifier_block slab_notifier = { .notifier_call = slab_cpuup_callback }; diff --git a/mm/swap.c b/mm/swap.c index 4a1d0d2c52f..62b78a6e224 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -512,12 +512,7 @@ EXPORT_SYMBOL(__lru_cache_add); */ void lru_cache_add(struct page *page) { - if (PageActive(page)) { - VM_BUG_ON(PageUnevictable(page)); - } else if (PageUnevictable(page)) { - VM_BUG_ON(PageActive(page)); - } - + VM_BUG_ON(PageActive(page) && PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); __lru_cache_add(page); } @@ -539,6 +534,7 @@ void add_page_to_unevictable_list(struct page *page) spin_lock_irq(&zone->lru_lock); lruvec = mem_cgroup_page_lruvec(page, zone); + ClearPageActive(page); SetPageUnevictable(page); SetPageLRU(page); add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); @@ -774,8 +770,6 @@ EXPORT_SYMBOL(__pagevec_release); void lru_add_page_tail(struct page *page, struct page *page_tail, struct lruvec *lruvec, struct list_head *list) { - int uninitialized_var(active); - enum lru_list lru; const int file = 0; VM_BUG_ON(!PageHead(page)); @@ -787,20 +781,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, if (!list) SetPageLRU(page_tail); - if (page_evictable(page_tail)) { - if (PageActive(page)) { - SetPageActive(page_tail); - active = 1; - lru = LRU_ACTIVE_ANON; - } else { - active = 0; - lru = LRU_INACTIVE_ANON; - } - } else { - SetPageUnevictable(page_tail); - lru = LRU_UNEVICTABLE; - } - if (likely(PageLRU(page))) list_add_tail(&page_tail->lru, &page->lru); else if (list) { @@ -816,13 +796,13 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, * Use the standard add function to put page_tail on the list, * but then correct its position so they all end up in order. */ - add_page_to_lru_list(page_tail, lruvec, lru); + add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); list_head = page_tail->lru.prev; list_move_tail(&page_tail->lru, list_head); } if (!PageUnevictable(page)) - update_page_reclaim_stat(lruvec, file, active); + update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -833,7 +813,6 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, int active = PageActive(page); enum lru_list lru = page_lru(page); - VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 736a6011c2c..0c1e37d829f 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -180,12 +180,12 @@ static void vmpressure_work_fn(struct work_struct *work) if (!vmpr->scanned) return; - mutex_lock(&vmpr->sr_lock); + spin_lock(&vmpr->sr_lock); scanned = vmpr->scanned; reclaimed = vmpr->reclaimed; vmpr->scanned = 0; vmpr->reclaimed = 0; - mutex_unlock(&vmpr->sr_lock); + spin_unlock(&vmpr->sr_lock); do { if (vmpressure_event(vmpr, scanned, reclaimed)) @@ -240,13 +240,13 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, if (!scanned) return; - mutex_lock(&vmpr->sr_lock); + spin_lock(&vmpr->sr_lock); vmpr->scanned += scanned; vmpr->reclaimed += reclaimed; scanned = vmpr->scanned; - mutex_unlock(&vmpr->sr_lock); + spin_unlock(&vmpr->sr_lock); - if (scanned < vmpressure_win || work_pending(&vmpr->work)) + if (scanned < vmpressure_win) return; schedule_work(&vmpr->work); } @@ -367,8 +367,24 @@ void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft, */ void vmpressure_init(struct vmpressure *vmpr) { - mutex_init(&vmpr->sr_lock); + spin_lock_init(&vmpr->sr_lock); mutex_init(&vmpr->events_lock); INIT_LIST_HEAD(&vmpr->events); INIT_WORK(&vmpr->work, vmpressure_work_fn); } + +/** + * vmpressure_cleanup() - shuts down vmpressure control structure + * @vmpr: Structure to be cleaned up + * + * This function should be called before the structure in which it is + * embedded is cleaned up. + */ +void vmpressure_cleanup(struct vmpressure *vmpr) +{ + /* + * Make sure there is no pending work before eventfd infrastructure + * goes away. + */ + flush_work(&vmpr->work); +} diff --git a/mm/vmstat.c b/mm/vmstat.c index f42745e6578..20c2ef4458f 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1182,7 +1182,7 @@ static void vmstat_update(struct work_struct *w) round_jiffies_relative(sysctl_stat_interval)); } -static void __cpuinit start_cpu_timer(int cpu) +static void start_cpu_timer(int cpu) { struct delayed_work *work = &per_cpu(vmstat_work, cpu); @@ -1194,7 +1194,7 @@ static void __cpuinit start_cpu_timer(int cpu) * Use the cpu notifier to insure that the thresholds are recalculated * when necessary. */ -static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, +static int vmstat_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -1226,7 +1226,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata vmstat_notifier = +static struct notifier_block vmstat_notifier = { &vmstat_cpuup_callback, NULL, 0 }; #endif diff --git a/mm/zbud.c b/mm/zbud.c index 9bb4710e358..ad1e781284f 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -257,7 +257,7 @@ int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp, if (size <= 0 || gfp & __GFP_HIGHMEM) return -EINVAL; - if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED) + if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) return -ENOSPC; chunks = size_to_chunks(size); spin_lock(&pool->lock); |