diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-18 12:19:20 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-18 12:19:20 -0700 |
commit | 91bd008d4e2b4962ecb9a10e40c2fb666b0aeb92 (patch) | |
tree | c63456ce31eef7c07c1313715a0e24242bf4c92e /tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c | |
parent | cb273eb7c8390c70a484db6c79a797e377db09b5 (diff) | |
parent | c26b1b89b8a9fd8665e79cd798bd970e233772b6 (diff) | |
download | linux-91bd008d4e2b4962ecb9a10e40c2fb666b0aeb92.tar.gz linux-91bd008d4e2b4962ecb9a10e40c2fb666b0aeb92.tar.bz2 linux-91bd008d4e2b4962ecb9a10e40c2fb666b0aeb92.zip |
Merge tag 'probes-v6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull probes updates from Masami Hiramatsu:
"Uprobes:
- x86/shstk: Make return uprobe work with shadow stack
- Add uretprobe syscall which speeds up the uretprobe 10-30% faster.
This syscall is automatically used from user-space trampolines
which are generated by the uretprobe. If this syscall is used by
normal user program, it will cause SIGILL. Note that this is
currently only implemented on x86_64.
(This also has two fixes for adjusting the syscall number to avoid
conflict with new *attrat syscalls.)
- uprobes/perf: fix user stack traces in the presence of pending
uretprobe. This corrects the uretprobe's trampoline address in the
stacktrace with correct return address
- selftests/x86: Add a return uprobe with shadow stack test
- selftests/bpf: Add uretprobe syscall related tests.
- test case for register integrity check
- test case with register changing case
- test case for uretprobe syscall without uprobes (expected to fail)
- test case for uretprobe with shadow stack
- selftests/bpf: add test validating uprobe/uretprobe stack traces
- MAINTAINERS: Add uprobes entry. This does not specify the tree but
to clarify who maintains and reviews the uprobes
Kprobes:
- tracing/kprobes: Test case cleanups.
Replace redundant WARN_ON_ONCE() + pr_warn() with WARN_ONCE() and
remove unnecessary code from selftest
- tracing/kprobes: Add symbol counting check when module loads.
This checks the uniqueness of the probed symbol on modules. The
same check has already done for kernel symbols
(This also has a fix for build error with CONFIG_MODULES=n)
Cleanup:
- Add MODULE_DESCRIPTION() macros for fprobe and kprobe examples"
* tag 'probes-v6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
MAINTAINERS: Add uprobes entry
selftests/bpf: Change uretprobe syscall number in uprobe_syscall test
uprobe: Change uretprobe syscall scope and number
tracing/kprobes: Fix build error when find_module() is not available
tracing/kprobes: Add symbol counting check when module loads
selftests/bpf: add test validating uprobe/uretprobe stack traces
perf,uprobes: fix user stack traces in the presence of pending uretprobes
tracing/kprobe: Remove cleanup code unrelated to selftest
tracing/kprobe: Integrate test warnings into WARN_ONCE
selftests/bpf: Add uretprobe shadow stack test
selftests/bpf: Add uretprobe syscall call from user space test
selftests/bpf: Add uretprobe syscall test for regs changes
selftests/bpf: Add uretprobe syscall test for regs integrity
selftests/x86: Add return uprobe shadow stack test
uprobe: Add uretprobe syscall to speed up return probe
uprobe: Wire up uretprobe system call
x86/shstk: Make return uprobe work with shadow stack
samples: kprobes: add missing MODULE_DESCRIPTION() macros
fprobe: add missing MODULE_DESCRIPTION() macro
Diffstat (limited to 'tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c')
-rw-r--r-- | tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 123 |
1 files changed, 122 insertions, 1 deletions
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index f8962a1dd397..fd28c1157bd3 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -18,6 +18,7 @@ #include <linux/in6.h> #include <linux/un.h> #include <net/sock.h> +#include <linux/namei.h> #include "bpf_testmod.h" #include "bpf_testmod_kfunc.h" @@ -413,6 +414,119 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .write = bpf_testmod_test_write, }; +/* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only, + * please see test_uretprobe_regs_change test + */ +#ifdef __x86_64__ + +static int +uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func, + struct pt_regs *regs) + +{ + regs->ax = 0x12345678deadbeef; + regs->cx = 0x87654321feebdaed; + regs->r11 = (u64) -1; + return true; +} + +struct testmod_uprobe { + struct path path; + loff_t offset; + struct uprobe_consumer consumer; +}; + +static DEFINE_MUTEX(testmod_uprobe_mutex); + +static struct testmod_uprobe uprobe = { + .consumer.ret_handler = uprobe_ret_handler, +}; + +static int testmod_register_uprobe(loff_t offset) +{ + int err = -EBUSY; + + if (uprobe.offset) + return -EBUSY; + + mutex_lock(&testmod_uprobe_mutex); + + if (uprobe.offset) + goto out; + + err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path); + if (err) + goto out; + + err = uprobe_register_refctr(d_real_inode(uprobe.path.dentry), + offset, 0, &uprobe.consumer); + if (err) + path_put(&uprobe.path); + else + uprobe.offset = offset; + +out: + mutex_unlock(&testmod_uprobe_mutex); + return err; +} + +static void testmod_unregister_uprobe(void) +{ + mutex_lock(&testmod_uprobe_mutex); + + if (uprobe.offset) { + uprobe_unregister(d_real_inode(uprobe.path.dentry), + uprobe.offset, &uprobe.consumer); + uprobe.offset = 0; + } + + mutex_unlock(&testmod_uprobe_mutex); +} + +static ssize_t +bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t len) +{ + unsigned long offset = 0; + int err = 0; + + if (kstrtoul(buf, 0, &offset)) + return -EINVAL; + + if (offset) + err = testmod_register_uprobe(offset); + else + testmod_unregister_uprobe(); + + return err ?: strlen(buf); +} + +static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = { + .attr = { .name = "bpf_testmod_uprobe", .mode = 0666, }, + .write = bpf_testmod_uprobe_write, +}; + +static int register_bpf_testmod_uprobe(void) +{ + return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); +} + +static void unregister_bpf_testmod_uprobe(void) +{ + testmod_unregister_uprobe(); + sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); +} + +#else +static int register_bpf_testmod_uprobe(void) +{ + return 0; +} + +static void unregister_bpf_testmod_uprobe(void) { } +#endif + BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) @@ -983,7 +1097,13 @@ static int bpf_testmod_init(void) return -EINVAL; sock = NULL; mutex_init(&sock_lock); - return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + if (ret < 0) + return ret; + ret = register_bpf_testmod_uprobe(); + if (ret < 0) + return ret; + return 0; } static void bpf_testmod_exit(void) @@ -998,6 +1118,7 @@ static void bpf_testmod_exit(void) bpf_kfunc_close_sock(); sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + unregister_bpf_testmod_uprobe(); } module_init(bpf_testmod_init); |