diff options
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r-- | include/linux/memcontrol.h | 108 |
1 files changed, 52 insertions, 56 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 30561a954ee0..bc74d6a4407c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -94,8 +94,8 @@ enum mem_cgroup_events_target { MEM_CGROUP_NTARGETS, }; -struct mem_cgroup_stat_cpu { - long count[MEMCG_NR_STAT]; +struct memcg_vmstats_percpu { + long stat[MEMCG_NR_STAT]; unsigned long events[NR_VM_EVENT_ITEMS]; unsigned long nr_page_events; unsigned long targets[MEM_CGROUP_NTARGETS]; @@ -128,6 +128,7 @@ struct mem_cgroup_per_node { struct lruvec_stat __percpu *lruvec_stat_cpu; atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; + atomic_long_t lruvec_stat_local[NR_VM_NODE_STAT_ITEMS]; unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; @@ -274,13 +275,17 @@ struct mem_cgroup { struct task_struct *move_lock_task; /* memory.stat */ - struct mem_cgroup_stat_cpu __percpu *stat_cpu; + struct memcg_vmstats_percpu __percpu *vmstats_percpu; MEMCG_PADDING(_pad2_); - atomic_long_t stat[MEMCG_NR_STAT]; - atomic_long_t events[NR_VM_EVENT_ITEMS]; - atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; + atomic_long_t vmstats[MEMCG_NR_STAT]; + atomic_long_t vmstats_local[MEMCG_NR_STAT]; + + atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; + atomic_long_t vmevents_local[NR_VM_EVENT_ITEMS]; + + atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; unsigned long socket_pressure; @@ -554,10 +559,9 @@ void unlock_page_memcg(struct page *page); * idx can be of type enum memcg_stat_item or node_stat_item. * Keep in sync with memcg_exact_page_state(). */ -static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, - int idx) +static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) { - long x = atomic_long_read(&memcg->stat[idx]); + long x = atomic_long_read(&memcg->vmstats[idx]); #ifdef CONFIG_SMP if (x < 0) x = 0; @@ -565,23 +569,23 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, return x; } -/* idx can be of type enum memcg_stat_item or node_stat_item */ -static inline void __mod_memcg_state(struct mem_cgroup *memcg, - int idx, int val) +/* + * idx can be of type enum memcg_stat_item or node_stat_item. + * Keep in sync with memcg_exact_page_state(). + */ +static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, + int idx) { - long x; - - if (mem_cgroup_disabled()) - return; - - x = val + __this_cpu_read(memcg->stat_cpu->count[idx]); - if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { - atomic_long_add(x, &memcg->stat[idx]); + long x = atomic_long_read(&memcg->vmstats_local[idx]); +#ifdef CONFIG_SMP + if (x < 0) x = 0; - } - __this_cpu_write(memcg->stat_cpu->count[idx], x); +#endif + return x; } +void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); + /* idx can be of type enum memcg_stat_item or node_stat_item */ static inline void mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) @@ -642,32 +646,27 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec, return x; } -static inline void __mod_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) +static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, + enum node_stat_item idx) { struct mem_cgroup_per_node *pn; long x; - /* Update node */ - __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); - if (mem_cgroup_disabled()) - return; + return node_page_state(lruvec_pgdat(lruvec), idx); pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - - /* Update memcg */ - __mod_memcg_state(pn->memcg, idx, val); - - /* Update lruvec */ - x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); - if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { - atomic_long_add(x, &pn->lruvec_stat[idx]); + x = atomic_long_read(&pn->lruvec_stat_local[idx]); +#ifdef CONFIG_SMP + if (x < 0) x = 0; - } - __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); +#endif + return x; } +void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, + int val); + static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { @@ -708,22 +707,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); -static inline void __count_memcg_events(struct mem_cgroup *memcg, - enum vm_event_item idx, - unsigned long count) -{ - unsigned long x; - - if (mem_cgroup_disabled()) - return; - - x = count + __this_cpu_read(memcg->stat_cpu->events[idx]); - if (unlikely(x > MEMCG_CHARGE_BATCH)) { - atomic_long_add(x, &memcg->events[idx]); - x = 0; - } - __this_cpu_write(memcg->stat_cpu->events[idx], x); -} +void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, + unsigned long count); static inline void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, @@ -1011,8 +996,13 @@ static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) { } -static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, - int idx) +static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) +{ + return 0; +} + +static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, + int idx) { return 0; } @@ -1047,6 +1037,12 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec, return node_page_state(lruvec_pgdat(lruvec), idx); } +static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, + enum node_stat_item idx) +{ + return node_page_state(lruvec_pgdat(lruvec), idx); +} + static inline void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { |