Skip to content

Commit dc4bb0e

Browse files
iamkafaidavem330
authored andcommitted
bpf: Introduce bpf_prog ID
This patch generates an unique ID for each BPF_PROG_LOAD-ed prog. It is worth to note that each BPF_PROG_LOAD-ed prog will have a different ID even they have the same bpf instructions. The ID is generated by the existing idr_alloc_cyclic(). The ID is ranged from [1, INT_MAX). It is allocated in cyclic manner, so an ID will get reused every 2 billion BPF_PROG_LOAD. The bpf_prog_alloc_id() is done after bpf_prog_select_runtime() because the jit process may have allocated a new prog. Hence, we need to ensure the value of pointer 'prog' will not be changed any more before storing the prog to the prog_idr. After bpf_prog_select_runtime(), the prog is read-only. Hence, the id is stored in 'struct bpf_prog_aux'. Signed-off-by: Martin KaFai Lau <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 8ea4fae commit dc4bb0e

File tree

2 files changed

+40
-1
lines changed

2 files changed

+40
-1
lines changed

include/linux/bpf.h

+1
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,7 @@ struct bpf_prog_aux {
172172
u32 used_map_cnt;
173173
u32 max_ctx_offset;
174174
u32 stack_depth;
175+
u32 id;
175176
struct latch_tree_node ksym_tnode;
176177
struct list_head ksym_lnode;
177178
const struct bpf_verifier_ops *ops;

kernel/bpf/syscall.c

+39-1
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,11 @@
2222
#include <linux/filter.h>
2323
#include <linux/version.h>
2424
#include <linux/kernel.h>
25+
#include <linux/idr.h>
2526

2627
DEFINE_PER_CPU(int, bpf_prog_active);
28+
static DEFINE_IDR(prog_idr);
29+
static DEFINE_SPINLOCK(prog_idr_lock);
2730

2831
int sysctl_unprivileged_bpf_disabled __read_mostly;
2932

@@ -650,6 +653,34 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
650653
free_uid(user);
651654
}
652655

656+
static int bpf_prog_alloc_id(struct bpf_prog *prog)
657+
{
658+
int id;
659+
660+
spin_lock_bh(&prog_idr_lock);
661+
id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
662+
if (id > 0)
663+
prog->aux->id = id;
664+
spin_unlock_bh(&prog_idr_lock);
665+
666+
/* id is in [1, INT_MAX) */
667+
if (WARN_ON_ONCE(!id))
668+
return -ENOSPC;
669+
670+
return id > 0 ? 0 : id;
671+
}
672+
673+
static void bpf_prog_free_id(struct bpf_prog *prog)
674+
{
675+
/* cBPF to eBPF migrations are currently not in the idr store. */
676+
if (!prog->aux->id)
677+
return;
678+
679+
spin_lock_bh(&prog_idr_lock);
680+
idr_remove(&prog_idr, prog->aux->id);
681+
spin_unlock_bh(&prog_idr_lock);
682+
}
683+
653684
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
654685
{
655686
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
@@ -663,6 +694,7 @@ void bpf_prog_put(struct bpf_prog *prog)
663694
{
664695
if (atomic_dec_and_test(&prog->aux->refcnt)) {
665696
trace_bpf_prog_put_rcu(prog);
697+
bpf_prog_free_id(prog);
666698
bpf_prog_kallsyms_del(prog);
667699
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
668700
}
@@ -857,15 +889,21 @@ static int bpf_prog_load(union bpf_attr *attr)
857889
if (err < 0)
858890
goto free_used_maps;
859891

892+
err = bpf_prog_alloc_id(prog);
893+
if (err)
894+
goto free_used_maps;
895+
860896
err = bpf_prog_new_fd(prog);
861897
if (err < 0)
862898
/* failed to allocate fd */
863-
goto free_used_maps;
899+
goto free_id;
864900

865901
bpf_prog_kallsyms_add(prog);
866902
trace_bpf_prog_load(prog, err);
867903
return err;
868904

905+
free_id:
906+
bpf_prog_free_id(prog);
869907
free_used_maps:
870908
free_used_maps(prog->aux);
871909
free_prog:

0 commit comments

Comments
 (0)