diff options
author | James Bottomley <JBottomley@Parallels.com> | 2013-06-26 23:08:22 -0700 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2013-06-26 23:08:22 -0700 |
commit | a9e94ec3504ead4b87a929bc57f5aa99a6590437 (patch) | |
tree | b6e7aaec0e844dc066e93296a85e5769542ef525 /mm/memcontrol.c | |
parent | 36a279686bc02d340eb213a9f2a0b67e50de64b4 (diff) | |
parent | 2884d4230867c8a46cf701214051e923301e7429 (diff) |
Merge tag 'fcoe1' into fixes
This patch fixes a critical bug that was introduced in 3.9
related to VLAN tagging FCoE frames.
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 28 |
1 files changed, 17 insertions, 11 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cb1c9dedf9b..194721839cf 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1199,7 +1199,6 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, mz = mem_cgroup_zoneinfo(root, nid, zid); iter = &mz->reclaim_iter[reclaim->priority]; - last_visited = iter->last_visited; if (prev && reclaim->generation != iter->generation) { iter->last_visited = NULL; goto out_unlock; @@ -1218,13 +1217,12 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, * is alive. */ dead_count = atomic_read(&root->dead_count); - smp_rmb(); - last_visited = iter->last_visited; - if (last_visited) { - if ((dead_count != iter->last_dead_count) || - !css_tryget(&last_visited->css)) { + if (dead_count == iter->last_dead_count) { + smp_rmb(); + last_visited = iter->last_visited; + if (last_visited && + !css_tryget(&last_visited->css)) last_visited = NULL; - } } } @@ -3141,8 +3139,6 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) return -ENOMEM; } - INIT_WORK(&s->memcg_params->destroy, - kmem_cache_destroy_work_func); s->memcg_params->is_root_cache = true; /* @@ -4108,8 +4104,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, if (mem_cgroup_disabled()) return NULL; - VM_BUG_ON(PageSwapCache(page)); - if (PageTransHuge(page)) { nr_pages <<= compound_order(page); VM_BUG_ON(!PageTransHuge(page)); @@ -4205,6 +4199,18 @@ void mem_cgroup_uncharge_page(struct page *page) if (page_mapped(page)) return; VM_BUG_ON(page->mapping && !PageAnon(page)); + /* + * If the page is in swap cache, uncharge should be deferred + * to the swap path, which also properly accounts swap usage + * and handles memcg lifetime. + * + * Note that this check is not stable and reclaim may add the + * page to swap cache at any time after this. However, if the + * page is not in swap cache by the time page->mapcount hits + * 0, there won't be any page table references to the swap + * slot, and reclaim will free it and not actually write the + * page to disk. + */ if (PageSwapCache(page)) return; __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false); |