summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c338
1 files changed, 238 insertions, 100 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2b552224f5c..0f1d92163f3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -49,6 +49,7 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
+#include <linux/vmpressure.h>
#include <linux/mm_inline.h>
#include <linux/page_cgroup.h>
#include <linux/cpu.h>
@@ -152,8 +153,13 @@ struct mem_cgroup_stat_cpu {
};
struct mem_cgroup_reclaim_iter {
- /* css_id of the last scanned hierarchy member */
- int position;
+ /*
+ * last scanned hierarchy member. Valid only if last_dead_count
+ * matches memcg->dead_count of the hierarchy root group.
+ */
+ struct mem_cgroup *last_visited;
+ unsigned long last_dead_count;
+
/* scan generation, increased every round-trip */
unsigned int generation;
};
@@ -256,6 +262,9 @@ struct mem_cgroup {
*/
struct res_counter res;
+ /* vmpressure notifications */
+ struct vmpressure vmpressure;
+
union {
/*
* the counter to account for mem+swap usage.
@@ -335,6 +344,7 @@ struct mem_cgroup {
struct mem_cgroup_stat_cpu nocpu_base;
spinlock_t pcp_counter_lock;
+ atomic_t dead_count;
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
struct tcp_memcontrol tcp_mem;
#endif
@@ -353,6 +363,7 @@ struct mem_cgroup {
atomic_t numainfo_events;
atomic_t numainfo_updating;
#endif
+
/*
* Per cgroup active and inactive list, similar to the
* per zone LRU lists.
@@ -504,6 +515,24 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
return container_of(s, struct mem_cgroup, css);
}
+/* Some nice accessors for the vmpressure. */
+struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
+{
+ if (!memcg)
+ memcg = root_mem_cgroup;
+ return &memcg->vmpressure;
+}
+
+struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
+{
+ return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
+}
+
+struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css)
+{
+ return &mem_cgroup_from_css(css)->vmpressure;
+}
+
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
return (memcg == root_mem_cgroup);
@@ -1067,6 +1096,51 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
return memcg;
}
+/*
+ * Returns a next (in a pre-order walk) alive memcg (with elevated css
+ * ref. count) or NULL if the whole root's subtree has been visited.
+ *
+ * helper function to be used by mem_cgroup_iter
+ */
+static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
+ struct mem_cgroup *last_visited)
+{
+ struct cgroup *prev_cgroup, *next_cgroup;
+
+ /*
+ * Root is not visited by cgroup iterators so it needs an
+ * explicit visit.
+ */
+ if (!last_visited)
+ return root;
+
+ prev_cgroup = (last_visited == root) ? NULL
+ : last_visited->css.cgroup;
+skip_node:
+ next_cgroup = cgroup_next_descendant_pre(
+ prev_cgroup, root->css.cgroup);
+
+ /*
+ * Even if we found a group we have to make sure it is
+ * alive. css && !memcg means that the groups should be
+ * skipped and we should continue the tree walk.
+ * last_visited css is safe to use because it is
+ * protected by css_get and the tree walk is rcu safe.
+ */
+ if (next_cgroup) {
+ struct mem_cgroup *mem = mem_cgroup_from_cont(
+ next_cgroup);
+ if (css_tryget(&mem->css))
+ return mem;
+ else {
+ prev_cgroup = next_cgroup;
+ goto skip_node;
+ }
+ }
+
+ return NULL;
+}
+
/**
* mem_cgroup_iter - iterate over memory cgroup hierarchy
* @root: hierarchy root
@@ -1089,7 +1163,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup_reclaim_cookie *reclaim)
{
struct mem_cgroup *memcg = NULL;
- int id = 0;
+ struct mem_cgroup *last_visited = NULL;
+ unsigned long uninitialized_var(dead_count);
if (mem_cgroup_disabled())
return NULL;
@@ -1098,20 +1173,17 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
root = root_mem_cgroup;
if (prev && !reclaim)
- id = css_id(&prev->css);
-
- if (prev && prev != root)
- css_put(&prev->css);
+ last_visited = prev;
if (!root->use_hierarchy && root != root_mem_cgroup) {
if (prev)
- return NULL;
+ goto out_css_put;
return root;
}
+ rcu_read_lock();
while (!memcg) {
struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
- struct cgroup_subsys_state *css;
if (reclaim) {
int nid = zone_to_nid(reclaim->zone);
@@ -1120,31 +1192,60 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
mz = mem_cgroup_zoneinfo(root, nid, zid);
iter = &mz->reclaim_iter[reclaim->priority];
- if (prev && reclaim->generation != iter->generation)
- return NULL;
- id = iter->position;
+ last_visited = iter->last_visited;
+ if (prev && reclaim->generation != iter->generation) {
+ iter->last_visited = NULL;
+ goto out_unlock;
+ }
+
+ /*
+ * If the dead_count mismatches, a destruction
+ * has happened or is happening concurrently.
+ * If the dead_count matches, a destruction
+ * might still happen concurrently, but since
+ * we checked under RCU, that destruction
+ * won't free the object until we release the
+ * RCU reader lock. Thus, the dead_count
+ * check verifies the pointer is still valid,
+ * css_tryget() verifies the cgroup pointed to
+ * is alive.
+ */
+ dead_count = atomic_read(&root->dead_count);
+ smp_rmb();
+ last_visited = iter->last_visited;
+ if (last_visited) {
+ if ((dead_count != iter->last_dead_count) ||
+ !css_tryget(&last_visited->css)) {
+ last_visited = NULL;
+ }
+ }
}
- rcu_read_lock();
- css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
- if (css) {
- if (css == &root->css || css_tryget(css))
- memcg = mem_cgroup_from_css(css);
- } else
- id = 0;
- rcu_read_unlock();
+ memcg = __mem_cgroup_iter_next(root, last_visited);
if (reclaim) {
- iter->position = id;
- if (!css)
+ if (last_visited)
+ css_put(&last_visited->css);
+
+ iter->last_visited = memcg;
+ smp_wmb();
+ iter->last_dead_count = dead_count;
+
+ if (!memcg)
iter->generation++;
else if (!prev && memcg)
reclaim->generation = iter->generation;
}
- if (prev && !css)
- return NULL;
+ if (prev && !memcg)
+ goto out_unlock;
}
+out_unlock:
+ rcu_read_unlock();
+out_css_put:
+ if (prev && prev != root)
+ css_put(&prev->css);
+
return memcg;
}
@@ -1686,11 +1787,11 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
struct task_struct *chosen = NULL;
/*
- * If current has a pending SIGKILL, then automatically select it. The
- * goal is to allow it to allocate so that it may quickly exit and free
- * its memory.
+ * If current has a pending SIGKILL or is exiting, then automatically
+ * select it. The goal is to allow it to allocate so that it may
+ * quickly exit and free its memory.
*/
- if (fatal_signal_pending(current)) {
+ if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
set_thread_flag(TIF_MEMDIE);
return;
}
@@ -3114,12 +3215,12 @@ void memcg_release_cache(struct kmem_cache *s)
root = s->memcg_params->root_cache;
root->memcg_params->memcg_caches[id] = NULL;
- mem_cgroup_put(memcg);
mutex_lock(&memcg->slab_caches_mutex);
list_del(&s->memcg_params->list);
mutex_unlock(&memcg->slab_caches_mutex);
+ mem_cgroup_put(memcg);
out:
kfree(s->memcg_params);
}
@@ -3220,52 +3321,53 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
schedule_work(&cachep->memcg_params->destroy);
}
-static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s)
-{
- char *name;
- struct dentry *dentry;
-
- rcu_read_lock();
- dentry = rcu_dereference(memcg->css.cgroup->dentry);
- rcu_read_unlock();
-
- BUG_ON(dentry == NULL);
-
- name = kasprintf(GFP_KERNEL, "%s(%d:%s)", s->name,
- memcg_cache_id(memcg), dentry->d_name.name);
-
- return name;
-}
+/*
+ * This lock protects updaters, not readers. We want readers to be as fast as
+ * they can, and they will either see NULL or a valid cache value. Our model
+ * allow them to see NULL, in which case the root memcg will be selected.
+ *
+ * We need this lock because multiple allocations to the same cache from a non
+ * will span more than one worker. Only one of them can create the cache.
+ */
+static DEFINE_MUTEX(memcg_cache_mutex);
+/*
+ * Called with memcg_cache_mutex held
+ */
static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
struct kmem_cache *s)
{
- char *name;
struct kmem_cache *new;
+ static char *tmp_name = NULL;
- name = memcg_cache_name(memcg, s);
- if (!name)
- return NULL;
+ lockdep_assert_held(&memcg_cache_mutex);
+
+ /*
+ * kmem_cache_create_memcg duplicates the given name and
+ * cgroup_name for this name requires RCU context.
+ * This static temporary buffer is used to prevent from
+ * pointless shortliving allocation.
+ */
+ if (!tmp_name) {
+ tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!tmp_name)
+ return NULL;
+ }
+
+ rcu_read_lock();
+ snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
+ memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
+ rcu_read_unlock();
- new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align,
+ new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
(s->flags & ~SLAB_PANIC), s->ctor, s);
if (new)
new->allocflags |= __GFP_KMEMCG;
- kfree(name);
return new;
}
-/*
- * This lock protects updaters, not readers. We want readers to be as fast as
- * they can, and they will either see NULL or a valid cache value. Our model
- * allow them to see NULL, in which case the root memcg will be selected.
- *
- * We need this lock because multiple allocations to the same cache from a non
- * will span more than one worker. Only one of them can create the cache.
- */
-static DEFINE_MUTEX(memcg_cache_mutex);
static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
struct kmem_cache *cachep)
{
@@ -3382,7 +3484,6 @@ static void memcg_create_cache_work_func(struct work_struct *w)
/*
* Enqueue the creation of a per-memcg kmem_cache.
- * Called with rcu_read_lock.
*/
static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
struct kmem_cache *cachep)
@@ -3390,12 +3491,8 @@ static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
struct create_work *cw;
cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
- if (cw == NULL)
- return;
-
- /* The corresponding put will be done in the workqueue. */
- if (!css_tryget(&memcg->css)) {
- kfree(cw);
+ if (cw == NULL) {
+ css_put(&memcg->css);
return;
}
@@ -3451,10 +3548,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
- rcu_read_unlock();
if (!memcg_can_account_kmem(memcg))
- return cachep;
+ goto out;
idx = memcg_cache_id(memcg);
@@ -3463,29 +3559,38 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
* code updating memcg_caches will issue a write barrier to match this.
*/
read_barrier_depends();
- if (unlikely(cachep->memcg_params->memcg_caches[idx] == NULL)) {
- /*
- * If we are in a safe context (can wait, and not in interrupt
- * context), we could be be predictable and return right away.
- * This would guarantee that the allocation being performed
- * already belongs in the new cache.
- *
- * However, there are some clashes that can arrive from locking.
- * For instance, because we acquire the slab_mutex while doing
- * kmem_cache_dup, this means no further allocation could happen
- * with the slab_mutex held.
- *
- * Also, because cache creation issue get_online_cpus(), this
- * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
- * that ends up reversed during cpu hotplug. (cpuset allocates
- * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
- * better to defer everything.
- */
- memcg_create_cache_enqueue(memcg, cachep);
- return cachep;
+ if (likely(cachep->memcg_params->memcg_caches[idx])) {
+ cachep = cachep->memcg_params->memcg_caches[idx];
+ goto out;
}
- return cachep->memcg_params->memcg_caches[idx];
+ /* The corresponding put will be done in the workqueue. */
+ if (!css_tryget(&memcg->css))
+ goto out;
+ rcu_read_unlock();
+
+ /*
+ * If we are in a safe context (can wait, and not in interrupt
+ * context), we could be be predictable and return right away.
+ * This would guarantee that the allocation being performed
+ * already belongs in the new cache.
+ *
+ * However, there are some clashes that can arrive from locking.
+ * For instance, because we acquire the slab_mutex while doing
+ * kmem_cache_dup, this means no further allocation could happen
+ * with the slab_mutex held.
+ *
+ * Also, because cache creation issue get_online_cpus(), this
+ * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
+ * that ends up reversed during cpu hotplug. (cpuset allocates
+ * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
+ * better to defer everything.
+ */
+ memcg_create_cache_enqueue(memcg, cachep);
+ return cachep;
+out:
+ rcu_read_unlock();
+ return cachep;
}
EXPORT_SYMBOL(__memcg_kmem_get_cache);
@@ -4947,9 +5052,6 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
type = MEMFILE_TYPE(cft->private);
name = MEMFILE_ATTR(cft->private);
- if (!do_swap_account && type == _MEMSWAP)
- return -EOPNOTSUPP;
-
switch (type) {
case _MEM:
if (name == RES_USAGE)
@@ -5084,9 +5186,6 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
type = MEMFILE_TYPE(cft->private);
name = MEMFILE_ATTR(cft->private);
- if (!do_swap_account && type == _MEMSWAP)
- return -EOPNOTSUPP;
-
switch (name) {
case RES_LIMIT:
if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
@@ -5163,9 +5262,6 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
type = MEMFILE_TYPE(event);
name = MEMFILE_ATTR(event);
- if (!do_swap_account && type == _MEMSWAP)
- return -EOPNOTSUPP;
-
switch (name) {
case RES_MAX_USAGE:
if (type == _MEM)
@@ -5744,7 +5840,7 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
return ret;
return mem_cgroup_sockets_init(memcg, ss);
-};
+}
static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
{
@@ -5817,6 +5913,7 @@ static struct cftype mem_cgroup_files[] = {
},
{
.name = "use_hierarchy",
+ .flags = CFTYPE_INSANE,
.write_u64 = mem_cgroup_hierarchy_write,
.read_u64 = mem_cgroup_hierarchy_read,
},
@@ -5838,6 +5935,11 @@ static struct cftype mem_cgroup_files[] = {
.unregister_event = mem_cgroup_oom_unregister_event,
.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
},
+ {
+ .name = "pressure_level",
+ .register_event = vmpressure_register_event,
+ .unregister_event = vmpressure_unregister_event,
+ },
#ifdef CONFIG_NUMA
{
.name = "numa_stat",
@@ -6119,6 +6221,7 @@ mem_cgroup_css_alloc(struct cgroup *cont)
memcg->move_charge_at_immigrate = 0;
mutex_init(&memcg->thresholds_lock);
spin_lock_init(&memcg->move_lock);
+ vmpressure_init(&memcg->vmpressure);
return &memcg->css;
@@ -6184,10 +6287,29 @@ mem_cgroup_css_online(struct cgroup *cont)
return error;
}
+/*
+ * Announce all parents that a group from their hierarchy is gone.
+ */
+static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
+{
+ struct mem_cgroup *parent = memcg;
+
+ while ((parent = parent_mem_cgroup(parent)))
+ atomic_inc(&parent->dead_count);
+
+ /*
+ * if the root memcg is not hierarchical we have to check it
+ * explicitely.
+ */
+ if (!root_mem_cgroup->use_hierarchy)
+ atomic_inc(&root_mem_cgroup->dead_count);
+}
+
static void mem_cgroup_css_offline(struct cgroup *cont)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ mem_cgroup_invalidate_reclaim_iterators(memcg);
mem_cgroup_reparent_charges(memcg);
mem_cgroup_destroy_all_caches(memcg);
}
@@ -6787,6 +6909,21 @@ static void mem_cgroup_move_task(struct cgroup *cont,
}
#endif
+/*
+ * Cgroup retains root cgroups across [un]mount cycles making it necessary
+ * to verify sane_behavior flag on each mount attempt.
+ */
+static void mem_cgroup_bind(struct cgroup *root)
+{
+ /*
+ * use_hierarchy is forced with sane_behavior. cgroup core
+ * guarantees that @root doesn't have any children, so turning it
+ * on for the root memcg is enough.
+ */
+ if (cgroup_sane_behavior(root))
+ mem_cgroup_from_cont(root)->use_hierarchy = true;
+}
+
struct cgroup_subsys mem_cgroup_subsys = {
.name = "memory",
.subsys_id = mem_cgroup_subsys_id,
@@ -6797,6 +6934,7 @@ struct cgroup_subsys mem_cgroup_subsys = {
.can_attach = mem_cgroup_can_attach,
.cancel_attach = mem_cgroup_cancel_attach,
.attach = mem_cgroup_move_task,
+ .bind = mem_cgroup_bind,
.base_cftypes = mem_cgroup_files,
.early_init = 0,
.use_id = 1,