diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-07 20:49:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-07 20:49:51 -0700 |
commit | 5af43c24ca59a448c9312dd4a4a51d27ec3b9a73 (patch) | |
tree | 65288caabc91fc04242acace38789a6dd5b86ed4 /mm | |
parent | 9affd6becbfb2c3f0d04e554bb87234761b37aba (diff) | |
parent | a27bb332c04cec8c4afd7912df0dc7890db27560 (diff) |
Merge branch 'akpm' (incoming from Andrew)
Merge more incoming from Andrew Morton:
- Various fixes which were stalled or which I picked up recently
- A large rotorooting of the AIO code. Allegedly to improve
performance but I don't really have good performance numbers (I might
have lost the email) and I can't raise Kent today. I held this out
of 3.9 and we could give it another cycle if it's all too late/scary.
I ended up taking only the first two thirds of the AIO rotorooting. I
left the percpu parts and the batch completion for later. - Linus
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (33 commits)
aio: don't include aio.h in sched.h
aio: kill ki_retry
aio: kill ki_key
aio: give shared kioctx fields their own cachelines
aio: kill struct aio_ring_info
aio: kill batch allocation
aio: change reqs_active to include unreaped completions
aio: use cancellation list lazily
aio: use flush_dcache_page()
aio: make aio_read_evt() more efficient, convert to hrtimers
wait: add wait_event_hrtimeout()
aio: refcounting cleanup
aio: make aio_put_req() lockless
aio: do fget() after aio_get_req()
aio: dprintk() -> pr_debug()
aio: move private stuff out of aio.h
aio: add kiocb_cancel()
aio: kill return value of aio_complete()
char: add aio_{read,write} to /dev/{null,zero}
aio: remove retry-based AIO
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 36 | ||||
-rw-r--r-- | mm/mmap.c | 7 | ||||
-rw-r--r-- | mm/mmu_context.c | 3 | ||||
-rw-r--r-- | mm/page_io.c | 1 | ||||
-rw-r--r-- | mm/shmem.c | 1 | ||||
-rw-r--r-- | mm/swap.c | 1 | ||||
-rw-r--r-- | mm/vmalloc.c | 2 |
7 files changed, 37 insertions, 14 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0f1d92163f30..cb1c9dedf9b6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -92,16 +92,18 @@ enum mem_cgroup_stat_index { /* * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. */ - MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ - MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ - MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ - MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ + MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ + MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ + MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ + MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ + MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ MEM_CGROUP_STAT_NSTATS, }; static const char * const mem_cgroup_stat_names[] = { "cache", "rss", + "rss_huge", "mapped_file", "swap", }; @@ -917,6 +919,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, } static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, + struct page *page, bool anon, int nr_pages) { preempt_disable(); @@ -932,6 +935,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages); + if (PageTransHuge(page)) + __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], + nr_pages); + /* pagein of a big page is an event. So, ignore page size */ if (nr_pages > 0) __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); @@ -2914,7 +2921,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, else anon = false; - mem_cgroup_charge_statistics(memcg, anon, nr_pages); + mem_cgroup_charge_statistics(memcg, page, anon, nr_pages); unlock_page_cgroup(pc); /* @@ -3708,16 +3715,21 @@ void mem_cgroup_split_huge_fixup(struct page *head) { struct page_cgroup *head_pc = lookup_page_cgroup(head); struct page_cgroup *pc; + struct mem_cgroup *memcg; int i; if (mem_cgroup_disabled()) return; + + memcg = head_pc->mem_cgroup; for (i = 1; i < HPAGE_PMD_NR; i++) { pc = head_pc + i; - pc->mem_cgroup = head_pc->mem_cgroup; + pc->mem_cgroup = memcg; smp_wmb();/* see __commit_charge() */ pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; } + __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], + HPAGE_PMD_NR); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -3773,11 +3785,11 @@ static int mem_cgroup_move_account(struct page *page, __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); preempt_enable(); } - mem_cgroup_charge_statistics(from, anon, -nr_pages); + mem_cgroup_charge_statistics(from, page, anon, -nr_pages); /* caller should have done css_get */ pc->mem_cgroup = to; - mem_cgroup_charge_statistics(to, anon, nr_pages); + mem_cgroup_charge_statistics(to, page, anon, nr_pages); move_unlock_mem_cgroup(from, &flags); ret = 0; unlock: @@ -4152,7 +4164,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype, break; } - mem_cgroup_charge_statistics(memcg, anon, -nr_pages); + mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages); ClearPageCgroupUsed(pc); /* @@ -4502,7 +4514,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage, lock_page_cgroup(pc); if (PageCgroupUsed(pc)) { memcg = pc->mem_cgroup; - mem_cgroup_charge_statistics(memcg, false, -1); + mem_cgroup_charge_statistics(memcg, oldpage, false, -1); ClearPageCgroupUsed(pc); } unlock_page_cgroup(pc); @@ -5030,6 +5042,10 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) return res_counter_read_u64(&memcg->memsw, RES_USAGE); } + /* + * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS + * as well as in MEM_CGROUP_STAT_RSS_HUGE. + */ val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); diff --git a/mm/mmap.c b/mm/mmap.c index da3e9c04bf37..1ae21d645c68 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1363,15 +1363,20 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, file = fget(fd); if (!file) goto out; + if (is_file_hugepages(file)) + len = ALIGN(len, huge_page_size(hstate_file(file))); } else if (flags & MAP_HUGETLB) { struct user_struct *user = NULL; + + len = ALIGN(len, huge_page_size(hstate_sizelog( + (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK))); /* * VM_NORESERVE is used because the reservations will be * taken when vm_ops->mmap() is called * A dummy user value is used because we are not locking * memory so no accounting is necessary */ - file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len, + file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 3dcfaf4ed355..8a8cd0265e52 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -14,9 +14,6 @@ * use_mm * Makes the calling kernel thread take on the specified * mm context. - * Called by the retry thread execute retries within the - * iocb issuer's mm context, so that copy_from/to_user - * operations work seamlessly for aio. * (Note: this routine is intended to be called only * from a kernel thread context) */ diff --git a/mm/page_io.c b/mm/page_io.c index bb5d75274686..06a8842a6ec6 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -20,6 +20,7 @@ #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/frontswap.h> +#include <linux/aio.h> #include <asm/pgtable.h> static struct bio *get_swap_bio(gfp_t gfp_flags, diff --git a/mm/shmem.c b/mm/shmem.c index 39b2a0b86fe8..5e6a8422658b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -31,6 +31,7 @@ #include <linux/mm.h> #include <linux/export.h> #include <linux/swap.h> +#include <linux/aio.h> static struct vfsmount *shm_mnt; diff --git a/mm/swap.c b/mm/swap.c index acd40bfffa82..dfd7d71d6841 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -30,6 +30,7 @@ #include <linux/backing-dev.h> #include <linux/memcontrol.h> #include <linux/gfp.h> +#include <linux/uio.h> #include "internal.h" diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b12fd8612604..d365724feb05 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1522,6 +1522,8 @@ static void __vunmap(const void *addr, int deallocate_pages) * Must not be called in NMI context (strictly speaking, only if we don't * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling * conventions for vfree() arch-depenedent would be a really bad idea) + * + * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node) * */ void vfree(const void *addr) |