summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/linux/swap.h4
-rw-r--r--ipc/shm.c4
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/vmscan.c89
6 files changed, 112 insertions, 5 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c61ba10768e..40236290e2a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -700,10 +700,10 @@ static inline int page_mapped(struct page *page)
extern void show_free_areas(void);
#ifdef CONFIG_SHMEM
-int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
#else
static inline int shmem_lock(struct file *file, int lock,
- struct user_struct *user)
+ struct user_struct *user)
{
return 0;
}
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 09164d2c5c2..4b6c4d8d26b 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -40,14 +40,20 @@ static inline void mapping_set_unevictable(struct address_space *mapping)
set_bit(AS_UNEVICTABLE, &mapping->flags);
}
+static inline void mapping_clear_unevictable(struct address_space *mapping)
+{
+ clear_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
static inline int mapping_unevictable(struct address_space *mapping)
{
- if (mapping && (mapping->flags & AS_UNEVICTABLE))
- return 1;
- return 0;
+ if (likely(mapping))
+ return test_bit(AS_UNEVICTABLE, &mapping->flags);
+ return !!mapping;
}
#else
static inline void mapping_set_unevictable(struct address_space *mapping) { }
+static inline void mapping_clear_unevictable(struct address_space *mapping) { }
static inline int mapping_unevictable(struct address_space *mapping)
{
return 0;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a2113044d20..7edb4cbc29f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -232,12 +232,16 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
#ifdef CONFIG_UNEVICTABLE_LRU
extern int page_evictable(struct page *page, struct vm_area_struct *vma);
+extern void scan_mapping_unevictable_pages(struct address_space *);
#else
static inline int page_evictable(struct page *page,
struct vm_area_struct *vma)
{
return 1;
}
+static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
+{
+}
#endif
extern int kswapd_run(int nid);
diff --git a/ipc/shm.c b/ipc/shm.c
index e77ec698cf4..0add3fa5f54 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -737,6 +737,10 @@ asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
case SHM_LOCK:
case SHM_UNLOCK:
{
+ struct file *uninitialized_var(shm_file);
+
+ lru_add_drain_all(); /* drain pagevecs to lru lists */
+
shp = shm_lock_check(ns, shmid);
if (IS_ERR(shp)) {
err = PTR_ERR(shp);
diff --git a/mm/shmem.c b/mm/shmem.c
index fc2ccf79a77..d38d7e61fcd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1477,12 +1477,16 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
if (!user_shm_lock(inode->i_size, user))
goto out_nomem;
info->flags |= VM_LOCKED;
+ mapping_set_unevictable(file->f_mapping);
}
if (!lock && (info->flags & VM_LOCKED) && user) {
user_shm_unlock(inode->i_size, user);
info->flags &= ~VM_LOCKED;
+ mapping_clear_unevictable(file->f_mapping);
+ scan_mapping_unevictable_pages(file->f_mapping);
}
retval = 0;
+
out_nomem:
spin_unlock(&info->lock);
return retval;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9babfbc1ddc..dfb342e0db9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2346,4 +2346,93 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
return 1;
}
+
+/**
+ * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
+ * @page: page to check evictability and move to appropriate lru list
+ * @zone: zone page is in
+ *
+ * Checks a page for evictability and moves the page to the appropriate
+ * zone lru list.
+ *
+ * Restrictions: zone->lru_lock must be held, page must be on LRU and must
+ * have PageUnevictable set.
+ */
+static void check_move_unevictable_page(struct page *page, struct zone *zone)
+{
+ VM_BUG_ON(PageActive(page));
+
+retry:
+ ClearPageUnevictable(page);
+ if (page_evictable(page, NULL)) {
+ enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
+ __dec_zone_state(zone, NR_UNEVICTABLE);
+ list_move(&page->lru, &zone->lru[l].list);
+ __inc_zone_state(zone, NR_INACTIVE_ANON + l);
+ __count_vm_event(UNEVICTABLE_PGRESCUED);
+ } else {
+ /*
+ * rotate unevictable list
+ */
+ SetPageUnevictable(page);
+ list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
+ if (page_evictable(page, NULL))
+ goto retry;
+ }
+}
+
+/**
+ * scan_mapping_unevictable_pages - scan an address space for evictable pages
+ * @mapping: struct address_space to scan for evictable pages
+ *
+ * Scan all pages in mapping. Check unevictable pages for
+ * evictability and move them to the appropriate zone lru list.
+ */
+void scan_mapping_unevictable_pages(struct address_space *mapping)
+{
+ pgoff_t next = 0;
+ pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+ struct zone *zone;
+ struct pagevec pvec;
+
+ if (mapping->nrpages == 0)
+ return;
+
+ pagevec_init(&pvec, 0);
+ while (next < end &&
+ pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+ int i;
+ int pg_scanned = 0;
+
+ zone = NULL;
+
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ struct page *page = pvec.pages[i];
+ pgoff_t page_index = page->index;
+ struct zone *pagezone = page_zone(page);
+
+ pg_scanned++;
+ if (page_index > next)
+ next = page_index;
+ next++;
+
+ if (pagezone != zone) {
+ if (zone)
+ spin_unlock_irq(&zone->lru_lock);
+ zone = pagezone;
+ spin_lock_irq(&zone->lru_lock);
+ }
+
+ if (PageLRU(page) && PageUnevictable(page))
+ check_move_unevictable_page(page, zone);
+ }
+ if (zone)
+ spin_unlock_irq(&zone->lru_lock);
+ pagevec_release(&pvec);
+
+ count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
+ }
+
+}
#endif