@@ -653,7 +653,8 @@ static __always_inline unsigned int bpf_dispatcher_nop_func(
#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr,
struct bpf_trampoline_batch *batch);
-int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
+int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr,
+ struct bpf_trampoline_batch *batch);
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
@@ -126,6 +126,7 @@ enum bpf_cmd {
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
BPF_TRAMPOLINE_BATCH_ATTACH,
+ BPF_TRAMPOLINE_BATCH_DETACH,
};
enum bpf_map_type {
@@ -632,7 +633,7 @@ union bpf_attr {
__u32 prog_fd;
} raw_tracepoint;
- struct { /* anonymous struct used by BPF_TRAMPOLINE_BATCH_ATTACH */
+ struct { /* anonymous struct used by BPF_TRAMPOLINE_BATCH_* */
__aligned_u64 in;
__aligned_u64 out;
__u32 count;
@@ -2505,7 +2505,7 @@ static void bpf_tracing_link_release(struct bpf_link *link)
container_of(link, struct bpf_tracing_link, link);
WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
- tr_link->trampoline));
+ tr_link->trampoline, NULL));
bpf_trampoline_put(tr_link->trampoline);
@@ -2940,10 +2940,33 @@ static int bpf_trampoline_batch(const union bpf_attr *attr, int cmd)
goto out_clean;
out[i] = fd;
+ } else {
+ struct bpf_tracing_link *tr_link;
+ struct bpf_link *link;
+
+ link = bpf_link_get_from_fd(in[i]);
+ if (IS_ERR(link)) {
+ ret = PTR_ERR(link);
+ goto out_clean;
+ }
+
+ if (link->type != BPF_LINK_TYPE_TRACING) {
+ ret = -EINVAL;
+ bpf_link_put(link);
+ goto out_clean;
+ }
+
+ tr_link = container_of(link, struct bpf_tracing_link, link);
+ bpf_trampoline_unlink_prog(link->prog, tr_link->trampoline, batch);
+ bpf_link_put(link);
}
}
- ret = register_ftrace_direct_ips(batch->ips, batch->addrs, batch->idx);
+ if (cmd == BPF_TRAMPOLINE_BATCH_ATTACH)
+ ret = register_ftrace_direct_ips(batch->ips, batch->addrs, batch->idx);
+ else
+ ret = unregister_ftrace_direct_ips(batch->ips, batch->addrs, batch->idx);
+
if (!ret)
WARN_ON_ONCE(copy_to_user(uout, out, count * sizeof(u32)));
@@ -4515,6 +4538,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = bpf_raw_tracepoint_open(&attr);
break;
case BPF_TRAMPOLINE_BATCH_ATTACH:
+ case BPF_TRAMPOLINE_BATCH_DETACH:
err = bpf_trampoline_batch(&attr, cmd);
break;
case BPF_BTF_LOAD:
@@ -164,14 +164,18 @@ static int is_ftrace_location(void *ip)
return 1;
}
-static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
+static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr,
+ struct bpf_trampoline_batch *batch)
{
void *ip = tr->func.addr;
int ret;
- if (tr->func.ftrace_managed)
- ret = unregister_ftrace_direct((long)ip, (long)old_addr);
- else
+ if (tr->func.ftrace_managed) {
+ if (batch)
+ ret = bpf_trampoline_batch_add(batch, (long)ip, (long)old_addr);
+ else
+ ret = unregister_ftrace_direct((long)ip, (long)old_addr);
+ } else
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
return ret;
}
@@ -248,7 +252,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr,
return PTR_ERR(tprogs);
if (total == 0) {
- err = unregister_fentry(tr, old_image);
+ err = unregister_fentry(tr, old_image, batch);
tr->selector = 0;
goto out;
}
@@ -361,13 +365,16 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr,
}
/* bpf_trampoline_unlink_prog() should never fail. */
-int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
+int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr,
+ struct bpf_trampoline_batch *batch)
{
enum bpf_tramp_prog_type kind;
- int err;
+ int err = 0;
kind = bpf_attach_type_to_tramp(prog);
mutex_lock(&tr->mutex);
+ if (hlist_unhashed(&prog->aux->tramp_hlist))
+ goto out;
if (kind == BPF_TRAMP_REPLACE) {
WARN_ON_ONCE(!tr->extension_prog);
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
@@ -375,9 +382,9 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
tr->extension_prog = NULL;
goto out;
}
- hlist_del(&prog->aux->tramp_hlist);
+ hlist_del_init(&prog->aux->tramp_hlist);
tr->progs_cnt[kind]--;
- err = bpf_trampoline_update(tr, NULL);
+ err = bpf_trampoline_update(tr, batch);
out:
mutex_unlock(&tr->mutex);
return err;
Adding BPF_TRAMPOLINE_BATCH_DETACH support, that allows to detach tracing multiple fentry/fexit pograms from trampolines within one syscall. The new BPF_TRAMPOLINE_BATCH_DETACH syscall command expects following data in union bpf_attr: struct { __aligned_u64 in; __aligned_u64 out; __u32 count; } trampoline_batch; in - pointer to user space array with link descrptors of attached bpf programs to detach out - pointer to user space array for resulting error code count - number of 'in/out' file descriptors Basically the new code gets programs from 'in' link descriptors and detaches them the same way the current code does, apart from the last step that unregisters probe ip with trampoline. This is done at the end with new unregister_ftrace_direct function. The resulting error codes are written in 'out' array and match 'in' array link descriptors order. Signed-off-by: Jiri Olsa <jolsa@kernel.org> --- include/linux/bpf.h | 3 ++- include/uapi/linux/bpf.h | 3 ++- kernel/bpf/syscall.c | 28 ++++++++++++++++++++++++++-- kernel/bpf/trampoline.c | 25 ++++++++++++++++--------- 4 files changed, 46 insertions(+), 13 deletions(-)