memcg: cleanup racy sum avoidance code
We used to have per-cpu memcg and lruvec stats and the readers have to traverse and sum the stats from each cpu. This summing was racy and may expose transient negative values. So, an explicit check was added to avoid such scenarios. Now these stats are moved to rstat infrastructure and are no more per-cpu, so we can remove the fixup for transient negative values. Link: https://lkml.kernel.org/r/20210728012243.3369123-1-shakeelb@google.com Signed-off-by: Shakeel Butt <shakeelb@google.com> Acked-by: Roman Gushchin <guro@fb.com> Reviewed-by: David Hildenbrand <david@redhat.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ec403e2ae0
commit
96e51ccf1a
1 changed files with 2 additions and 13 deletions
|
@ -977,30 +977,19 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
|
||||||
|
|
||||||
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
|
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
|
||||||
{
|
{
|
||||||
long x = READ_ONCE(memcg->vmstats.state[idx]);
|
return READ_ONCE(memcg->vmstats.state[idx]);
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
if (x < 0)
|
|
||||||
x = 0;
|
|
||||||
#endif
|
|
||||||
return x;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
|
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
|
||||||
enum node_stat_item idx)
|
enum node_stat_item idx)
|
||||||
{
|
{
|
||||||
struct mem_cgroup_per_node *pn;
|
struct mem_cgroup_per_node *pn;
|
||||||
long x;
|
|
||||||
|
|
||||||
if (mem_cgroup_disabled())
|
if (mem_cgroup_disabled())
|
||||||
return node_page_state(lruvec_pgdat(lruvec), idx);
|
return node_page_state(lruvec_pgdat(lruvec), idx);
|
||||||
|
|
||||||
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
||||||
x = READ_ONCE(pn->lruvec_stats.state[idx]);
|
return READ_ONCE(pn->lruvec_stats.state[idx]);
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
if (x < 0)
|
|
||||||
x = 0;
|
|
||||||
#endif
|
|
||||||
return x;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
|
static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
|
||||||
|
|
Loading…
Add table
Reference in a new issue