Skip to content

Commit

Permalink
mm: fork: fix kernel_stack memcg stats for various stack implementations
Browse files Browse the repository at this point in the history
Depending on CONFIG_VMAP_STACK and the THREAD_SIZE / PAGE_SIZE ratio the
space for task stacks can be allocated using __vmalloc_node_range(),
alloc_pages_node() and kmem_cache_alloc_node().

In the first and the second cases page->mem_cgroup pointer is set, but
in the third it's not: memcg membership of a slab page should be
determined using the memcg_from_slab_page() function, which looks at
page->slab_cache->memcg_params.memcg .  In this case, using
mod_memcg_page_state() (as in account_kernel_stack()) is incorrect:
page->mem_cgroup pointer is NULL even for pages charged to a non-root
memory cgroup.

It can lead to kernel_stack per-memcg counters permanently showing 0 on
some architectures (depending on the configuration).

In order to fix it, let's introduce a mod_memcg_obj_state() helper,
which takes a pointer to a kernel object as a first argument, uses
mem_cgroup_from_obj() to get a RCU-protected memcg pointer and calls
mod_memcg_state().  It allows to handle all possible configurations
(CONFIG_VMAP_STACK and various THREAD_SIZE/PAGE_SIZE values) without
spilling any memcg/kmem specifics into fork.c .

Note: This is a special version of the patch created for stable
backports.  It contains code from the following two patches:
  - mm: memcg/slab: introduce mem_cgroup_from_obj()
  - mm: fork: fix kernel_stack memcg stats for various stack implementations

[[email protected]: introduce mem_cgroup_from_obj()]
  Link: http://lkml.kernel.org/r/[email protected]
Fixes: 4d96ba3 ("mm: memcg/slab: stop setting page->mem_cgroup pointer for slab pages")
Signed-off-by: Roman Gushchin <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Reviewed-by: Shakeel Butt <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Bharata B Rao <[email protected]>
Cc: Shakeel Butt <[email protected]>
Cc: <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
rgushchin authored and torvalds committed Mar 29, 2020
1 parent 726b7bb commit 8380ce4
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 2 deletions.
12 changes: 12 additions & 0 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -695,6 +695,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
void mod_memcg_obj_state(void *p, int idx, int val);

static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
Expand Down Expand Up @@ -1123,6 +1124,10 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
__mod_node_page_state(page_pgdat(page), idx, val);
}

static inline void mod_memcg_obj_state(void *p, int idx, int val)
{
}

static inline
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
Expand Down Expand Up @@ -1427,6 +1432,8 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}

struct mem_cgroup *mem_cgroup_from_obj(void *p);

#else

static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
Expand Down Expand Up @@ -1468,6 +1475,11 @@ static inline void memcg_put_cache_ids(void)
{
}

static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
{
return NULL;
}

#endif /* CONFIG_MEMCG_KMEM */

#endif /* _LINUX_MEMCONTROL_H */
4 changes: 2 additions & 2 deletions kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -397,8 +397,8 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
THREAD_SIZE / 1024 * account);

mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
account * (THREAD_SIZE / 1024));
mod_memcg_obj_state(stack, MEMCG_KERNEL_STACK_KB,
account * (THREAD_SIZE / 1024));
}
}

Expand Down
38 changes: 38 additions & 0 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -777,6 +777,17 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
rcu_read_unlock();
}

void mod_memcg_obj_state(void *p, int idx, int val)
{
struct mem_cgroup *memcg;

rcu_read_lock();
memcg = mem_cgroup_from_obj(p);
if (memcg)
mod_memcg_state(memcg, idx, val);
rcu_read_unlock();
}

/**
* __count_memcg_events - account VM events in a cgroup
* @memcg: the memory cgroup
Expand Down Expand Up @@ -2661,6 +2672,33 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
}

#ifdef CONFIG_MEMCG_KMEM
/*
* Returns a pointer to the memory cgroup to which the kernel object is charged.
*
* The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
* cgroup_mutex, etc.
*/
struct mem_cgroup *mem_cgroup_from_obj(void *p)
{
struct page *page;

if (mem_cgroup_disabled())
return NULL;

page = virt_to_head_page(p);

/*
* Slab pages don't have page->mem_cgroup set because corresponding
* kmem caches can be reparented during the lifetime. That's why
* memcg_from_slab_page() should be used instead.
*/
if (PageSlab(page))
return memcg_from_slab_page(page);

/* All other pages use page->mem_cgroup */
return page->mem_cgroup;
}

static int memcg_alloc_cache_id(void)
{
int id, size;
Expand Down

0 comments on commit 8380ce4

Please sign in to comment.