1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
|
// SPDX-License-Identifier: GPL-2.0
/* GPLv2, Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. */
#include "xdp_sample.bpf.h"
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_helpers.h>
array_map rx_cnt SEC(".maps");
array_map redir_err_cnt SEC(".maps");
const volatile int nr_cpus = 0;
/* These can be set before loading so that redundant comparisons can be DCE'd by
* the verifier, and only actual matches are tried after loading tp_btf program.
* This allows sample to filter tracepoint stats based on net_device.
*/
const volatile int from_match[32] = {};
const volatile int to_match[32] = {};
/* Find if b is part of set a, but if a is empty set then evaluate to true */
#define IN_SET(a, b) \
({ \
bool __res = !(a)[0]; \
for (int i = 0; i < ARRAY_SIZE(a) && (a)[i]; i++) { \
__res = (a)[i] == (b); \
if (__res) \
break; \
} \
__res; \
})
static __always_inline __u32 xdp_get_err_key(int err)
{
switch (err) {
case 0:
return 0;
case -EINVAL:
return 2;
case -ENETDOWN:
return 3;
case -EMSGSIZE:
return 4;
case -EOPNOTSUPP:
return 5;
case -ENOSPC:
return 6;
default:
return 1;
}
}
static __always_inline int xdp_redirect_collect_stat(int from, int err)
{
u32 cpu = bpf_get_smp_processor_id();
u32 key = XDP_REDIRECT_ERROR;
struct datarec *rec;
u32 idx;
if (!IN_SET(from_match, from))
return 0;
key = xdp_get_err_key(err);
idx = key * nr_cpus + cpu;
rec = bpf_map_lookup_elem(&redir_err_cnt, &idx);
if (!rec)
return 0;
if (key)
NO_TEAR_INC(rec->dropped);
else
NO_TEAR_INC(rec->processed);
return 0; /* Indicate event was filtered (no further processing)*/
/*
* Returning 1 here would allow e.g. a perf-record tracepoint
* to see and record these events, but it doesn't work well
* in-practice as stopping perf-record also unload this
* bpf_prog. Plus, there is additional overhead of doing so.
*/
}
SEC("tp_btf/xdp_redirect_err")
int BPF_PROG(tp_xdp_redirect_err, const struct net_device *dev,
const struct bpf_prog *xdp, const void *tgt, int err,
const struct bpf_map *map, u32 index)
{
return xdp_redirect_collect_stat(dev->ifindex, err);
}
SEC("tp_btf/xdp_redirect_map_err")
int BPF_PROG(tp_xdp_redirect_map_err, const struct net_device *dev,
const struct bpf_prog *xdp, const void *tgt, int err,
const struct bpf_map *map, u32 index)
{
return xdp_redirect_collect_stat(dev->ifindex, err);
}
SEC("tp_btf/xdp_redirect")
int BPF_PROG(tp_xdp_redirect, const struct net_device *dev,
const struct bpf_prog *xdp, const void *tgt, int err,
const struct bpf_map *map, u32 index)
{
return xdp_redirect_collect_stat(dev->ifindex, err);
}
SEC("tp_btf/xdp_redirect_map")
int BPF_PROG(tp_xdp_redirect_map, const struct net_device *dev,
const struct bpf_prog *xdp, const void *tgt, int err,
const struct bpf_map *map, u32 index)
{
return xdp_redirect_collect_stat(dev->ifindex, err);
}
|