summaryrefslogtreecommitdiff
path: root/tools/perf/util/bpf_skel/off_cpu.bpf.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/bpf_skel/off_cpu.bpf.c')
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c38
1 files changed, 37 insertions, 1 deletions
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
index cc6d7fd55118..c4ba2bcf179f 100644
--- a/tools/perf/util/bpf_skel/off_cpu.bpf.c
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -12,6 +12,9 @@
#define TASK_INTERRUPTIBLE 0x0001
#define TASK_UNINTERRUPTIBLE 0x0002
+/* create a new thread */
+#define CLONE_THREAD 0x10000
+
#define MAX_STACKS 32
#define MAX_ENTRIES 102400
@@ -85,6 +88,7 @@ int enabled = 0;
int has_cpu = 0;
int has_task = 0;
int has_cgroup = 0;
+int uses_tgid = 0;
const volatile bool has_prev_state = false;
const volatile bool needs_cgroup = false;
@@ -144,7 +148,12 @@ static inline int can_record(struct task_struct *t, int state)
if (has_task) {
__u8 *ok;
- __u32 pid = t->pid;
+ __u32 pid;
+
+ if (uses_tgid)
+ pid = t->tgid;
+ else
+ pid = t->pid;
ok = bpf_map_lookup_elem(&task_filter, &pid);
if (!ok)
@@ -214,6 +223,33 @@ next:
return 0;
}
+SEC("tp_btf/task_newtask")
+int on_newtask(u64 *ctx)
+{
+ struct task_struct *task;
+ u64 clone_flags;
+ u32 pid;
+ u8 val = 1;
+
+ if (!uses_tgid)
+ return 0;
+
+ task = (struct task_struct *)bpf_get_current_task();
+
+ pid = BPF_CORE_READ(task, tgid);
+ if (!bpf_map_lookup_elem(&task_filter, &pid))
+ return 0;
+
+ task = (struct task_struct *)ctx[0];
+ clone_flags = ctx[1];
+
+ pid = task->tgid;
+ if (!(clone_flags & CLONE_THREAD))
+ bpf_map_update_elem(&task_filter, &pid, &val, BPF_NOEXIST);
+
+ return 0;
+}
+
SEC("tp_btf/sched_switch")
int on_switch(u64 *ctx)
{