summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2019-06-04 16:00:08 +0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2019-06-10 16:20:12 -0300
commitda9000ae35027fb7305b8cad0b37df71937ad578 (patch)
tree2e52e7ea942e9c7de19e204bd41aba1f2e9f975f
parente96f7df8807615b96af59f8f8bc6263a7adc27b7 (diff)
perf intel-pt: Add support for lookahead
Implement the lookahead callback to let the decoder access subsequent buffers. intel_pt_lookahead() manages the buffer lifetime and calls the decoder for each buffer until the decoder returns a non-zero value. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Link: http://lkml.kernel.org/r/20190604130017.31207-11-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--tools/perf/util/intel-pt.c59
1 files changed, 58 insertions, 1 deletions
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 4a61c73c9711..3e3a01318b76 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -278,7 +278,63 @@ static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
return 0;
}
-/* This function assumes data is processed sequentially only */
+/* Do not drop buffers with references - refer intel_pt_get_trace() */
+static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
+ struct auxtrace_buffer *buffer)
+{
+ if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
+ return;
+
+ auxtrace_buffer__drop_data(buffer);
+}
+
+/* Must be serialized with respect to intel_pt_get_trace() */
+static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
+ void *cb_data)
+{
+ struct intel_pt_queue *ptq = data;
+ struct auxtrace_buffer *buffer = ptq->buffer;
+ struct auxtrace_buffer *old_buffer = ptq->old_buffer;
+ struct auxtrace_queue *queue;
+ int err = 0;
+
+ queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
+
+ while (1) {
+ struct intel_pt_buffer b = { .len = 0 };
+
+ buffer = auxtrace_buffer__next(queue, buffer);
+ if (!buffer)
+ break;
+
+ err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
+ if (err)
+ break;
+
+ if (b.len) {
+ intel_pt_lookahead_drop_buffer(ptq, old_buffer);
+ old_buffer = buffer;
+ } else {
+ intel_pt_lookahead_drop_buffer(ptq, buffer);
+ continue;
+ }
+
+ err = cb(&b, cb_data);
+ if (err)
+ break;
+ }
+
+ if (buffer != old_buffer)
+ intel_pt_lookahead_drop_buffer(ptq, buffer);
+ intel_pt_lookahead_drop_buffer(ptq, old_buffer);
+
+ return err;
+}
+
+/*
+ * This function assumes data is processed sequentially only.
+ * Must be serialized with respect to intel_pt_lookahead()
+ */
static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
{
struct intel_pt_queue *ptq = data;
@@ -827,6 +883,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
params.get_trace = intel_pt_get_trace;
params.walk_insn = intel_pt_walk_next_insn;
+ params.lookahead = intel_pt_lookahead;
params.data = ptq;
params.return_compression = intel_pt_return_compression(pt);
params.branch_enable = intel_pt_branch_enable(pt);