summaryrefslogtreecommitdiff
path: root/tools/include/linux/ring_buffer.h
blob: a74c397359c7e7dbd60868ea75f8e42317e88721 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
#ifndef _TOOLS_LINUX_RING_BUFFER_H_
#define _TOOLS_LINUX_RING_BUFFER_H_

#include <asm/barrier.h>
#include <linux/perf_event.h>

/*
 * Contract with kernel for walking the perf ring buffer from
 * user space requires the following barrier pairing (quote
 * from kernel/events/ring_buffer.c):
 *
 *   Since the mmap() consumer (userspace) can run on a
 *   different CPU:
 *
 *   kernel                             user
 *
 *   if (LOAD ->data_tail) {            LOAD ->data_head
 *                      (A)             smp_rmb()       (C)
 *      STORE $data                     LOAD $data
 *      smp_wmb()       (B)             smp_mb()        (D)
 *      STORE ->data_head               STORE ->data_tail
 *   }
 *
 *   Where A pairs with D, and B pairs with C.
 *
 *   In our case A is a control dependency that separates the
 *   load of the ->data_tail and the stores of $data. In case
 *   ->data_tail indicates there is no room in the buffer to
 *   store $data we do not.
 *
 *   D needs to be a full barrier since it separates the data
 *   READ from the tail WRITE.
 *
 *   For B a WMB is sufficient since it separates two WRITEs,
 *   and for C an RMB is sufficient since it separates two READs.
 *
 * Note, instead of B, C, D we could also use smp_store_release()
 * in B and D as well as smp_load_acquire() in C.
 *
 * However, this optimization does not make sense for all kernel
 * supported architectures since for a fair number it would
 * resolve into READ_ONCE() + smp_mb() pair for smp_load_acquire(),
 * and smp_mb() + WRITE_ONCE() pair for smp_store_release().
 *
 * Thus for those smp_wmb() in B and smp_rmb() in C would still
 * be less expensive. For the case of D this has either the same
 * cost or is less expensive, for example, due to TSO x86 can
 * avoid the CPU barrier entirely.
 */

static inline u64 ring_buffer_read_head(struct perf_event_mmap_page *base)
{
/*
 * Architectures where smp_load_acquire() does not fallback to
 * READ_ONCE() + smp_mb() pair.
 */
#if defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__) || \
    defined(__ia64__) || defined(__sparc__) && defined(__arch64__) || defined(__riscv)
	return smp_load_acquire(&base->data_head);
#else
	u64 head = READ_ONCE(base->data_head);

	smp_rmb();
	return head;
#endif
}

static inline void ring_buffer_write_tail(struct perf_event_mmap_page *base,
					  u64 tail)
{
	smp_store_release(&base->data_tail, tail);
}

#endif /* _TOOLS_LINUX_RING_BUFFER_H_ */