summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c338
1 files changed, 299 insertions, 39 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index 616441fdd7f2..2ee17ef1dae2 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -8,6 +8,11 @@
#include "uprobe_multi_usdt.skel.h"
#include "uprobe_multi_consumers.skel.h"
#include "uprobe_multi_pid_filter.skel.h"
+#include "uprobe_multi_session.skel.h"
+#include "uprobe_multi_session_single.skel.h"
+#include "uprobe_multi_session_cookie.skel.h"
+#include "uprobe_multi_session_recursive.skel.h"
+#include "uprobe_multi_verifier.skel.h"
#include "bpf/libbpf_internal.h"
#include "testing_helpers.h"
#include "../sdt.h"
@@ -34,6 +39,12 @@ noinline void usdt_trigger(void)
STAP_PROBE(test, pid_filter_usdt);
}
+noinline void uprobe_session_recursive(int i)
+{
+ if (i)
+ uprobe_session_recursive(i - 1);
+}
+
struct child {
int go[2];
int c2p[2]; /* child -> parent channel */
@@ -778,7 +789,7 @@ get_link(struct uprobe_multi_consumers *skel, int link)
}
}
-static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx)
+static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx, unsigned long offset)
{
struct bpf_program *prog = get_program(skel, idx);
struct bpf_link **link = get_link(skel, idx);
@@ -787,15 +798,19 @@ static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx)
if (!prog || !link)
return -1;
+ opts.offsets = &offset;
+ opts.cnt = 1;
+
/*
- * bit/prog: 0,1 uprobe entry
- * bit/prog: 2,3 uprobe return
+ * bit/prog: 0 uprobe entry
+ * bit/prog: 1 uprobe return
+ * bit/prog: 2 uprobe session without return
+ * bit/prog: 3 uprobe session with return
*/
- opts.retprobe = idx == 2 || idx == 3;
+ opts.retprobe = idx == 1;
+ opts.session = idx == 2 || idx == 3;
- *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe",
- "uprobe_consumer_test",
- &opts);
+ *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe", NULL, &opts);
if (!ASSERT_OK_PTR(*link, "bpf_program__attach_uprobe_multi"))
return -1;
return 0;
@@ -816,7 +831,8 @@ static bool test_bit(int bit, unsigned long val)
noinline int
uprobe_consumer_test(struct uprobe_multi_consumers *skel,
- unsigned long before, unsigned long after)
+ unsigned long before, unsigned long after,
+ unsigned long offset)
{
int idx;
@@ -829,15 +845,43 @@ uprobe_consumer_test(struct uprobe_multi_consumers *skel,
/* ... and attach all new programs in 'after' state */
for (idx = 0; idx < 4; idx++) {
if (!test_bit(idx, before) && test_bit(idx, after)) {
- if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_after"))
+ if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_after"))
return -1;
}
}
return 0;
}
+/*
+ * We generate 16 consumer_testX functions that will have uprobe installed on
+ * and will be called in separate threads. All function pointer are stored in
+ * "consumers" section and each thread will pick one function based on index.
+ */
+
+extern const void *__start_consumers;
+
+#define __CONSUMER_TEST(func) \
+noinline int func(struct uprobe_multi_consumers *skel, unsigned long before, \
+ unsigned long after, unsigned long offset) \
+{ \
+ return uprobe_consumer_test(skel, before, after, offset); \
+} \
+void *__ ## func __used __attribute__((section("consumers"))) = (void *) func;
+
+#define CONSUMER_TEST(func) __CONSUMER_TEST(func)
+
+#define C1 CONSUMER_TEST(__PASTE(consumer_test, __COUNTER__))
+#define C4 C1 C1 C1 C1
+#define C16 C4 C4 C4 C4
+
+C16
+
+typedef int (*test_t)(struct uprobe_multi_consumers *, unsigned long,
+ unsigned long, unsigned long);
+
static int consumer_test(struct uprobe_multi_consumers *skel,
- unsigned long before, unsigned long after)
+ unsigned long before, unsigned long after,
+ test_t test, unsigned long offset)
{
int err, idx, ret = -1;
@@ -846,41 +890,65 @@ static int consumer_test(struct uprobe_multi_consumers *skel,
/* 'before' is each, we attach uprobe for every set idx */
for (idx = 0; idx < 4; idx++) {
if (test_bit(idx, before)) {
- if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_before"))
+ if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_before"))
goto cleanup;
}
}
- err = uprobe_consumer_test(skel, before, after);
+ err = test(skel, before, after, offset);
if (!ASSERT_EQ(err, 0, "uprobe_consumer_test"))
goto cleanup;
for (idx = 0; idx < 4; idx++) {
+ bool uret_stays, uret_survives;
const char *fmt = "BUG";
__u64 val = 0;
- if (idx < 2) {
+ switch (idx) {
+ case 0:
/*
* uprobe entry
* +1 if define in 'before'
*/
if (test_bit(idx, before))
val++;
- fmt = "prog 0/1: uprobe";
- } else {
+ fmt = "prog 0: uprobe";
+ break;
+ case 1:
/*
* To trigger uretprobe consumer, the uretprobe under test either stayed from
* before to after (uret_stays + test_bit) or uretprobe instance survived and
* we have uretprobe active in after (uret_survives + test_bit)
*/
-
- bool uret_stays = before & after & 0b1100;
- bool uret_survives = (before & 0b1100) && (after & 0b1100) && (before & 0b0011);
+ uret_stays = before & after & 0b0110;
+ uret_survives = ((before & 0b0110) && (after & 0b0110) && (before & 0b1001));
if ((uret_stays || uret_survives) && test_bit(idx, after))
val++;
-
- fmt = "idx 2/3: uretprobe";
+ fmt = "prog 1: uretprobe";
+ break;
+ case 2:
+ /*
+ * session with return
+ * +1 if defined in 'before'
+ * +1 if defined in 'after'
+ */
+ if (test_bit(idx, before)) {
+ val++;
+ if (test_bit(idx, after))
+ val++;
+ }
+ fmt = "prog 2: session with return";
+ break;
+ case 3:
+ /*
+ * session without return
+ * +1 if defined in 'before'
+ */
+ if (test_bit(idx, before))
+ val++;
+ fmt = "prog 3: session with NO return";
+ break;
}
if (!ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt))
@@ -896,21 +964,55 @@ cleanup:
return ret;
}
-static void test_consumers(void)
+#define CONSUMER_MAX 16
+
+/*
+ * Each thread runs 1/16 of the load by running test for single
+ * 'before' number (based on thread index) and full scale of
+ * 'after' numbers.
+ */
+static void *consumer_thread(void *arg)
{
+ unsigned long idx = (unsigned long) arg;
struct uprobe_multi_consumers *skel;
- int before, after;
+ unsigned long offset;
+ const void *func;
+ int after;
skel = uprobe_multi_consumers__open_and_load();
if (!ASSERT_OK_PTR(skel, "uprobe_multi_consumers__open_and_load"))
- return;
+ return NULL;
+
+ func = *((&__start_consumers) + idx);
+
+ offset = get_uprobe_offset(func);
+ if (!ASSERT_GE(offset, 0, "uprobe_offset"))
+ goto out;
+
+ for (after = 0; after < CONSUMER_MAX; after++)
+ if (consumer_test(skel, idx, after, func, offset))
+ goto out;
+
+out:
+ uprobe_multi_consumers__destroy(skel);
+ return NULL;
+}
+
+
+static void test_consumers(void)
+{
+ pthread_t pt[CONSUMER_MAX];
+ unsigned long idx;
+ int err;
/*
* The idea of this test is to try all possible combinations of
* uprobes consumers attached on single function.
*
- * - 2 uprobe entry consumer
- * - 2 uprobe exit consumers
+ * - 1 uprobe entry consumer
+ * - 1 uprobe exit consumer
+ * - 1 uprobe session with return
+ * - 1 uprobe session without return
*
* The test uses 4 uprobes attached on single function, but that
* translates into single uprobe with 4 consumers in kernel.
@@ -918,39 +1020,38 @@ static void test_consumers(void)
* The before/after values present the state of attached consumers
* before and after the probed function:
*
- * bit/prog 0,1 : uprobe entry
- * bit/prog 2,3 : uprobe return
+ * bit/prog 0 : uprobe entry
+ * bit/prog 1 : uprobe return
*
* For example for:
*
- * before = 0b0101
- * after = 0b0110
+ * before = 0b01
+ * after = 0b10
*
* it means that before we call 'uprobe_consumer_test' we attach
* uprobes defined in 'before' value:
*
- * - bit/prog 0: uprobe entry
- * - bit/prog 2: uprobe return
+ * - bit/prog 1: uprobe entry
*
* uprobe_consumer_test is called and inside it we attach and detach
* uprobes based on 'after' value:
*
- * - bit/prog 0: stays untouched
- * - bit/prog 2: uprobe return is detached
+ * - bit/prog 0: is detached
+ * - bit/prog 1: is attached
*
* uprobe_consumer_test returns and we check counters values increased
* by bpf programs on each uprobe to match the expected count based on
* before/after bits.
*/
- for (before = 0; before < 16; before++) {
- for (after = 0; after < 16; after++)
- if (consumer_test(skel, before, after))
- goto out;
+ for (idx = 0; idx < CONSUMER_MAX; idx++) {
+ err = pthread_create(&pt[idx], NULL, consumer_thread, (void *) idx);
+ if (!ASSERT_OK(err, "pthread_create"))
+ break;
}
-out:
- uprobe_multi_consumers__destroy(skel);
+ while (idx)
+ pthread_join(pt[--idx], NULL);
}
static struct bpf_program *uprobe_multi_program(struct uprobe_multi_pid_filter *skel, int idx)
@@ -1017,6 +1118,156 @@ static void test_pid_filter_process(bool clone_vm)
uprobe_multi_pid_filter__destroy(skel);
}
+static void test_session_skel_api(void)
+{
+ struct uprobe_multi_session *skel = NULL;
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ struct bpf_link *link = NULL;
+ int err;
+
+ skel = uprobe_multi_session__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+ skel->bss->user_ptr = test_data;
+
+ err = uprobe_multi_session__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session__attach"))
+ goto cleanup;
+
+ /* trigger all probes */
+ skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
+ skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
+ skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
+
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+
+ /*
+ * We expect 2 for uprobe_multi_func_2 because it runs both entry/return probe,
+ * uprobe_multi_func_[13] run just the entry probe. All expected numbers are
+ * doubled, because we run extra test for sleepable session.
+ */
+ ASSERT_EQ(skel->bss->uprobe_session_result[0], 2, "uprobe_multi_func_1_result");
+ ASSERT_EQ(skel->bss->uprobe_session_result[1], 4, "uprobe_multi_func_2_result");
+ ASSERT_EQ(skel->bss->uprobe_session_result[2], 2, "uprobe_multi_func_3_result");
+
+ /* We expect increase in 3 entry and 1 return session calls -> 4 */
+ ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 4, "uprobe_multi_sleep_result");
+
+cleanup:
+ bpf_link__destroy(link);
+ uprobe_multi_session__destroy(skel);
+}
+
+static void test_session_single_skel_api(void)
+{
+ struct uprobe_multi_session_single *skel = NULL;
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ int err;
+
+ skel = uprobe_multi_session_single__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_single__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+
+ err = uprobe_multi_session_single__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session_single__attach"))
+ goto cleanup;
+
+ uprobe_multi_func_1();
+
+ /*
+ * We expect consumer 0 and 2 to trigger just entry handler (value 1)
+ * and consumer 1 to hit both (value 2).
+ */
+ ASSERT_EQ(skel->bss->uprobe_session_result[0], 1, "uprobe_session_result_0");
+ ASSERT_EQ(skel->bss->uprobe_session_result[1], 2, "uprobe_session_result_1");
+ ASSERT_EQ(skel->bss->uprobe_session_result[2], 1, "uprobe_session_result_2");
+
+cleanup:
+ uprobe_multi_session_single__destroy(skel);
+}
+
+static void test_session_cookie_skel_api(void)
+{
+ struct uprobe_multi_session_cookie *skel = NULL;
+ int err;
+
+ skel = uprobe_multi_session_cookie__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_cookie__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+
+ err = uprobe_multi_session_cookie__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session_cookie__attach"))
+ goto cleanup;
+
+ /* trigger all probes */
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+
+ ASSERT_EQ(skel->bss->test_uprobe_1_result, 1, "test_uprobe_1_result");
+ ASSERT_EQ(skel->bss->test_uprobe_2_result, 2, "test_uprobe_2_result");
+ ASSERT_EQ(skel->bss->test_uprobe_3_result, 3, "test_uprobe_3_result");
+
+cleanup:
+ uprobe_multi_session_cookie__destroy(skel);
+}
+
+static void test_session_recursive_skel_api(void)
+{
+ struct uprobe_multi_session_recursive *skel = NULL;
+ int i, err;
+
+ skel = uprobe_multi_session_recursive__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_recursive__open_and_load"))
+ goto cleanup;
+
+ skel->bss->pid = getpid();
+
+ err = uprobe_multi_session_recursive__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_session_recursive__attach"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(skel->bss->test_uprobe_cookie_entry); i++)
+ skel->bss->test_uprobe_cookie_entry[i] = i + 1;
+
+ uprobe_session_recursive(5);
+
+ /*
+ * entry uprobe:
+ * uprobe_session_recursive(5) { *cookie = 1, return 0
+ * uprobe_session_recursive(4) { *cookie = 2, return 1
+ * uprobe_session_recursive(3) { *cookie = 3, return 0
+ * uprobe_session_recursive(2) { *cookie = 4, return 1
+ * uprobe_session_recursive(1) { *cookie = 5, return 0
+ * uprobe_session_recursive(0) { *cookie = 6, return 1
+ * return uprobe:
+ * } i = 0 not executed
+ * } i = 1 test_uprobe_cookie_return[0] = 5
+ * } i = 2 not executed
+ * } i = 3 test_uprobe_cookie_return[1] = 3
+ * } i = 4 not executed
+ * } i = 5 test_uprobe_cookie_return[2] = 1
+ */
+
+ ASSERT_EQ(skel->bss->idx_entry, 6, "idx_entry");
+ ASSERT_EQ(skel->bss->idx_return, 3, "idx_return");
+
+ ASSERT_EQ(skel->bss->test_uprobe_cookie_return[0], 5, "test_uprobe_cookie_return[0]");
+ ASSERT_EQ(skel->bss->test_uprobe_cookie_return[1], 3, "test_uprobe_cookie_return[1]");
+ ASSERT_EQ(skel->bss->test_uprobe_cookie_return[2], 1, "test_uprobe_cookie_return[2]");
+
+cleanup:
+ uprobe_multi_session_recursive__destroy(skel);
+}
+
static void test_bench_attach_uprobe(void)
{
long attach_start_ns = 0, attach_end_ns = 0;
@@ -1113,4 +1364,13 @@ void test_uprobe_multi_test(void)
test_pid_filter_process(false);
if (test__start_subtest("filter_clone_vm"))
test_pid_filter_process(true);
+ if (test__start_subtest("session"))
+ test_session_skel_api();
+ if (test__start_subtest("session_single"))
+ test_session_single_skel_api();
+ if (test__start_subtest("session_cookie"))
+ test_session_cookie_skel_api();
+ if (test__start_subtest("session_cookie_recursive"))
+ test_session_recursive_skel_api();
+ RUN_TESTS(uprobe_multi_verifier);
}