summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c62
1 files changed, 44 insertions, 18 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0ea71e887bb..b0cd81c32de 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -70,7 +70,7 @@ struct scan_control {
unsigned int priority;
/* This context's GFP mask */
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
int may_writepage;
@@ -186,7 +186,7 @@ EXPORT_SYMBOL(remove_shrinker);
*
* Returns the number of slab objects which we shrunk.
*/
-static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
+static int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
unsigned long lru_pages)
{
struct shrinker *shrinker;
@@ -201,13 +201,25 @@ static int shrink_slab(unsigned long scanned, unsigned int gfp_mask,
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
unsigned long total_scan;
+ unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
delta = (4 * scanned) / shrinker->seeks;
- delta *= (*shrinker->shrinker)(0, gfp_mask);
+ delta *= max_pass;
do_div(delta, lru_pages + 1);
shrinker->nr += delta;
- if (shrinker->nr < 0)
- shrinker->nr = LONG_MAX; /* It wrapped! */
+ if (shrinker->nr < 0) {
+ printk(KERN_ERR "%s: nr=%ld\n",
+ __FUNCTION__, shrinker->nr);
+ shrinker->nr = max_pass;
+ }
+
+ /*
+ * Avoid risking looping forever due to too large nr value:
+ * never try to free more than twice the estimate number of
+ * freeable entries.
+ */
+ if (shrinker->nr > max_pass * 2)
+ shrinker->nr = max_pass * 2;
total_scan = shrinker->nr;
shrinker->nr = 0;
@@ -407,7 +419,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
if (PageWriteback(page))
goto keep_locked;
- referenced = page_referenced(page, 1, sc->priority <= 0);
+ referenced = page_referenced(page, 1);
/* In active use or really unfreeable? Activate it. */
if (referenced && page_mapping_inuse(page))
goto activate_locked;
@@ -417,7 +429,9 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
* Anonymous process memory has backing store?
* Try to allocate it some swap space here.
*/
- if (PageAnon(page) && !PageSwapCache(page) && sc->may_swap) {
+ if (PageAnon(page) && !PageSwapCache(page)) {
+ if (!sc->may_swap)
+ goto keep_locked;
if (!add_to_swap(page))
goto activate_locked;
}
@@ -511,14 +525,15 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
* PageDirty _after_ making sure that the page is freeable and
* not in use by anybody. (pagecache + us == 2)
*/
- if (page_count(page) != 2 || PageDirty(page)) {
- write_unlock_irq(&mapping->tree_lock);
- goto keep_locked;
- }
+ if (unlikely(page_count(page) != 2))
+ goto cannot_free;
+ smp_rmb();
+ if (unlikely(PageDirty(page)))
+ goto cannot_free;
#ifdef CONFIG_SWAP
if (PageSwapCache(page)) {
- swp_entry_t swap = { .val = page->private };
+ swp_entry_t swap = { .val = page_private(page) };
__delete_from_swap_cache(page);
write_unlock_irq(&mapping->tree_lock);
swap_free(swap);
@@ -538,6 +553,10 @@ free_it:
__pagevec_release_nonlru(&freed_pvec);
continue;
+cannot_free:
+ write_unlock_irq(&mapping->tree_lock);
+ goto keep_locked;
+
activate_locked:
SetPageActive(page);
pgactivate++;
@@ -749,7 +768,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
if (page_mapped(page)) {
if (!reclaim_mapped ||
(total_swap_pages == 0 && PageAnon(page)) ||
- page_referenced(page, 0, sc->priority <= 0)) {
+ page_referenced(page, 0)) {
list_add(&page->lru, &l_active);
continue;
}
@@ -921,7 +940,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
* holds filesystem locks which prevent writeout this might not work, and the
* allocation attempt will fail.
*/
-int try_to_free_pages(struct zone **zones, unsigned int gfp_mask)
+int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
{
int priority;
int ret = 0;
@@ -953,6 +972,8 @@ int try_to_free_pages(struct zone **zones, unsigned int gfp_mask)
sc.nr_reclaimed = 0;
sc.priority = priority;
sc.swap_cluster_max = SWAP_CLUSTER_MAX;
+ if (!priority)
+ disable_swap_token();
shrink_caches(zones, &sc);
shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
if (reclaim_state) {
@@ -1049,6 +1070,10 @@ loop_again:
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long lru_pages = 0;
+ /* The swap token gets in the way of swapout... */
+ if (!priority)
+ disable_swap_token();
+
all_zones_ok = 1;
if (nr_pages == 0) {
@@ -1067,7 +1092,7 @@ loop_again:
continue;
if (!zone_watermark_ok(zone, order,
- zone->pages_high, 0, 0, 0)) {
+ zone->pages_high, 0, 0)) {
end_zone = i;
goto scan;
}
@@ -1104,7 +1129,7 @@ scan:
if (nr_pages == 0) { /* Not software suspend */
if (!zone_watermark_ok(zone, order,
- zone->pages_high, end_zone, 0, 0))
+ zone->pages_high, end_zone, 0))
all_zones_ok = 0;
}
zone->temp_priority = priority;
@@ -1252,7 +1277,7 @@ void wakeup_kswapd(struct zone *zone, int order)
return;
pgdat = zone->zone_pgdat;
- if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0, 0))
+ if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
return;
if (pgdat->kswapd_max_order < order)
pgdat->kswapd_max_order = order;
@@ -1333,7 +1358,7 @@ module_init(kswapd_init)
/*
* Try to free up some pages from this zone through reclaim.
*/
-int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
+int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
{
struct scan_control sc;
int nr_pages = 1 << order;
@@ -1353,6 +1378,7 @@ int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
sc.nr_reclaimed = 0;
/* scan at the highest priority */
sc.priority = 0;
+ disable_swap_token();
if (nr_pages > SWAP_CLUSTER_MAX)
sc.swap_cluster_max = nr_pages;