diff options
author | Alexander Shishkin <alexander.shishkin@linux.intel.com> | 2015-01-14 14:18:12 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-04-02 17:14:08 +0200 |
commit | 0a4e38e64f5e91ce131cc42ee5bb3925377ec840 (patch) | |
tree | 726e06adcb05c8e44e93ad072fdcba6be38450e3 /kernel | |
parent | 45bfb2e50471abbbfd83d40d28c986078b0d24ff (diff) |
perf: Support high-order allocations for AUX space
Some pmus (such as BTS or Intel PT without multiple-entry ToPA capability)
don't support scatter-gather and will prefer larger contiguous areas for
their output regions.
This patch adds a new pmu capability to request higher order allocations.
Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kaixu Xia <kaixu.xia@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Robert Richter <rric@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: acme@infradead.org
Cc: adrian.hunter@intel.com
Cc: kan.liang@intel.com
Cc: markus.t.metzger@intel.com
Cc: mathieu.poirier@linaro.org
Link: http://lkml.kernel.org/r/1421237903-181015-4-git-send-email-alexander.shishkin@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/ring_buffer.c | 56 |
1 files changed, 50 insertions, 6 deletions
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 3de9c4e9ea9f..ed0859e33b2f 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -243,30 +243,74 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) spin_lock_init(&rb->event_lock); } +#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY) + +static struct page *rb_alloc_aux_page(int node, int order) +{ + struct page *page; + + if (order > MAX_ORDER) + order = MAX_ORDER; + + do { + page = alloc_pages_node(node, PERF_AUX_GFP, order); + } while (!page && order--); + + if (page && order) { + /* + * Communicate the allocation size to the driver + */ + split_page(page, order); + SetPagePrivate(page); + set_page_private(page, order); + } + + return page; +} + +static void rb_free_aux_page(struct ring_buffer *rb, int idx) +{ + struct page *page = virt_to_page(rb->aux_pages[idx]); + + ClearPagePrivate(page); + page->mapping = NULL; + __free_page(page); +} + int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, pgoff_t pgoff, int nr_pages, int flags) { bool overwrite = !(flags & RING_BUFFER_WRITABLE); int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu); - int ret = -ENOMEM; + int ret = -ENOMEM, max_order = 0; if (!has_aux(event)) return -ENOTSUPP; + if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) + /* + * We need to start with the max_order that fits in nr_pages, + * not the other way around, hence ilog2() and not get_order. + */ + max_order = ilog2(nr_pages); + rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node); if (!rb->aux_pages) return -ENOMEM; rb->free_aux = event->pmu->free_aux; - for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages; - rb->aux_nr_pages++) { + for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { struct page *page; + int last, order; - page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); + page = rb_alloc_aux_page(node, order); if (!page) goto out; - rb->aux_pages[rb->aux_nr_pages] = page_address(page); + for (last = rb->aux_nr_pages + (1 << page_private(page)); + last > rb->aux_nr_pages; rb->aux_nr_pages++) + rb->aux_pages[rb->aux_nr_pages] = page_address(page++); } rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, @@ -304,7 +348,7 @@ static void __rb_free_aux(struct ring_buffer *rb) } for (pg = 0; pg < rb->aux_nr_pages; pg++) - free_page((unsigned long)rb->aux_pages[pg]); + rb_free_aux_page(rb, pg); kfree(rb->aux_pages); rb->aux_nr_pages = 0; |