@@ -427,6 +427,8 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr,
const struct bpf_func_proto *
cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
#else
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
@@ -463,6 +465,12 @@ cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL;
}
+static inline const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
@@ -1653,6 +1653,10 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
if (func_proto)
return func_proto;
+ func_proto = cgroup_current_func_proto(func_id, prog);
+ if (func_proto)
+ return func_proto;
+
switch (func_id) {
case BPF_FUNC_perf_event_output:
return &bpf_event_output_data_proto;
@@ -2200,6 +2204,10 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
if (func_proto)
return func_proto;
+ func_proto = cgroup_current_func_proto(func_id, prog);
+ if (func_proto)
+ return func_proto;
+
switch (func_id) {
case BPF_FUNC_sysctl_get_name:
return &bpf_sysctl_get_name_proto;
@@ -2343,6 +2351,10 @@ cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
if (func_proto)
return func_proto;
+ func_proto = cgroup_current_func_proto(func_id, prog);
+ if (func_proto)
+ return func_proto;
+
switch (func_id) {
#ifdef CONFIG_NET
case BPF_FUNC_get_netns_cookie:
@@ -2589,3 +2601,16 @@ cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL;
}
}
+
+const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ case BPF_FUNC_get_cgroup_classid:
+ return &bpf_get_cgroup_classid_curr_proto;
+#endif
+ default:
+ return NULL;
+ }
+}
@@ -2024,10 +2024,6 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_current_ancestor_cgroup_id_proto;
case BPF_FUNC_current_task_under_cgroup:
return &bpf_current_task_under_cgroup_proto;
-#endif
-#ifdef CONFIG_CGROUP_NET_CLASSID
- case BPF_FUNC_get_cgroup_classid:
- return &bpf_get_cgroup_classid_curr_proto;
#endif
case BPF_FUNC_task_storage_get:
if (bpf_prog_check_recur(prog))
A previous commit expanded the usage scope of bpf_get_cgroup_classid() to all contexts (see Fixes tag), but this was inappropriate. First, syzkaller reported a bug [1]. Second, it uses skb as an argument, but its implementation varies across different bpf prog types. For example, in sock_filter and sock_addr, it retrieves the classid from the current context (bpf_get_cgroup_classid_curr_proto) instead of from skb. In tc egress and lwt, it fetches the classid from skb->sk, but in tc ingress, it returns 0. In summary, the definition of bpf_get_cgroup_classid() is ambiguous and its usage scenarios are limited. It should not be treated as a general-purpose helper. This patch reverts part of the previous commit. [1] https://syzkaller.appspot.com/bug?extid=9767c7ed68b95cfa69e6 Fixes: ee971630f20f ("bpf: Allow some trace helpers for all prog types") Reported-by: syzbot+9767c7ed68b95cfa69e6@syzkaller.appspotmail.com Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev> --- include/linux/bpf-cgroup.h | 8 ++++++++ kernel/bpf/cgroup.c | 25 +++++++++++++++++++++++++ kernel/bpf/helpers.c | 4 ---- 3 files changed, 33 insertions(+), 4 deletions(-)