Skip to content

Commit

Permalink
bpf: add verifier stats and log_level bit 2
Browse files Browse the repository at this point in the history
commit 06ee711 upstream

ANBZ: torvalds#200

In order to understand the verifier bottlenecks add various stats
and extend log_level:
log_level 1 and 2 are kept as-is:
bit 0 - level=1 - print every insn and verifier state at branch points
bit 1 - level=2 - print every insn and verifier state at every insn
bit 2 - level=4 - print verifier error and stats at the end of verification

When verifier rejects the program the libbpf is trying to load the program twice.
Once with log_level=0 (no messages, only error code is reported to user space)
and second time with log_level=1 to tell the user why the verifier rejected it.

With introduction of bit 2 - level=4 the libbpf can choose to always use that
level and load programs once, since the verification speed is not affected and
in case of error the verbose message will be available.

Note that the verifier stats are not part of uapi just like all other
verbose messages. They're expected to change in the future.

Signed-off-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Signed-off-by: Qiao Ma <[email protected]>
Acked-by: Tony Lu <[email protected]>
  • Loading branch information
Alexei Starovoitov authored and shiloong committed Dec 31, 2021
1 parent c1a56c7 commit 0ac9ae4
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 24 deletions.
21 changes: 21 additions & 0 deletions include/linux/bpf_verifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,12 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
return log->len_used >= log->len_total - 1;
}

#define BPF_LOG_LEVEL1 1
#define BPF_LOG_LEVEL2 2
#define BPF_LOG_STATS 4
#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)

static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
{
return log->level && log->ubuf && !bpf_verifier_log_full(log);
Expand Down Expand Up @@ -273,6 +279,21 @@ struct bpf_verifier_env {
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
u32 insn_processed;
/* total verification time */
u64 verification_time;
/* maximum number of verifier states kept in 'branching' instructions */
u32 max_states_per_insn;
/* total number of allocated verifier states */
u32 total_states;
/* some states are freed during program analysis.
* this is peak number of states. this number dominates kernel
* memory consumption during verification
*/
u32 peak_states;
/* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk;
};

__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
Expand Down
76 changes: 52 additions & 24 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -1076,7 +1076,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
*/
subprog[env->subprog_cnt].start = insn_cnt;

if (env->log.level > 1)
if (env->log.level & BPF_LOG_LEVEL2)
for (i = 0; i < env->subprog_cnt; i++)
verbose(env, "func#%d @%d\n", i, subprog[i].start);

Expand Down Expand Up @@ -1123,6 +1123,7 @@ static int mark_reg_read(struct bpf_verifier_env *env,
struct bpf_reg_state *parent)
{
bool writes = parent == state->parent; /* Observe write marks */
int cnt = 0;

while (parent) {
/* if read wasn't screened by an earlier write ... */
Expand All @@ -1139,7 +1140,11 @@ static int mark_reg_read(struct bpf_verifier_env *env,
state = parent;
parent = state->parent;
writes = true;
cnt++;
}

if (env->longest_mark_read_walk < cnt)
env->longest_mark_read_walk = cnt;
return 0;
}

Expand Down Expand Up @@ -1870,7 +1875,7 @@ static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
* need to try adding each of min_value and max_value to off
* to make sure our theoretical access will be safe.
*/
if (env->log.level)
if (env->log.level & BPF_LOG_LEVEL)
print_verifier_state(env, state);

/* The minimum value is only important with signed
Expand Down Expand Up @@ -3220,7 +3225,7 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
/* and go analyze first insn of the callee */
*insn_idx = target_insn;

if (env->log.level) {
if (env->log.level & BPF_LOG_LEVEL) {
verbose(env, "caller:\n");
print_verifier_state(env, caller);
verbose(env, "callee:\n");
Expand Down Expand Up @@ -3260,7 +3265,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
return err;

*insn_idx = callee->callsite + 1;
if (env->log.level) {
if (env->log.level & BPF_LOG_LEVEL) {
verbose(env, "returning from callee:\n");
print_verifier_state(env, callee);
verbose(env, "to caller at %d:\n", *insn_idx);
Expand Down Expand Up @@ -5253,7 +5258,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
insn->dst_reg);
return -EACCES;
}
if (env->log.level)
if (env->log.level & BPF_LOG_LEVEL)
print_verifier_state(env, this_branch->frame[this_branch->curframe]);
return 0;
}
Expand Down Expand Up @@ -6137,6 +6142,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
states_cnt++;
}

if (env->max_states_per_insn < states_cnt)
env->max_states_per_insn = states_cnt;

if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
return push_jmp_history(env, cur);

Expand All @@ -6150,6 +6158,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
if (!new_sl)
return -ENOMEM;
env->total_states++;
env->peak_states++;

/* add new state to the head of linked list */
new = &new_sl->state;
Expand Down Expand Up @@ -6226,8 +6236,7 @@ static int do_check(struct bpf_verifier_env *env)
struct bpf_verifier_state *state;
struct bpf_insn *insns = env->prog->insnsi;
struct bpf_reg_state *regs;
int insn_cnt = env->prog->len, i;
int insn_processed = 0;
int insn_cnt = env->prog->len;
bool do_print_state = false;
int prev_insn_idx = -1;

Expand Down Expand Up @@ -6262,10 +6271,10 @@ static int do_check(struct bpf_verifier_env *env)
insn = &insns[env->insn_idx];
class = BPF_CLASS(insn->code);

if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
verbose(env,
"BPF program is too large. Processed %d insn\n",
insn_processed);
env->insn_processed);
return -E2BIG;
}

Expand All @@ -6274,7 +6283,7 @@ static int do_check(struct bpf_verifier_env *env)
return err;
if (err == 1) {
/* found equivalent state, can prune the search */
if (env->log.level) {
if (env->log.level & BPF_LOG_LEVEL) {
if (do_print_state)
verbose(env, "\nfrom %d to %d%s: safe\n",
env->prev_insn_idx, env->insn_idx,
Expand All @@ -6292,8 +6301,9 @@ static int do_check(struct bpf_verifier_env *env)
if (need_resched())
cond_resched();

if (env->log.level > 1 || (env->log.level && do_print_state)) {
if (env->log.level > 1)
if (env->log.level & BPF_LOG_LEVEL2 ||
(env->log.level & BPF_LOG_LEVEL && do_print_state)) {
if (env->log.level & BPF_LOG_LEVEL2)
verbose(env, "%d:", env->insn_idx);
else
verbose(env, "\nfrom %d to %d%s:",
Expand All @@ -6304,7 +6314,7 @@ static int do_check(struct bpf_verifier_env *env)
do_print_state = false;
}

if (env->log.level) {
if (env->log.level & BPF_LOG_LEVEL) {
const struct bpf_insn_cbs cbs = {
.cb_print = verbose,
.private_data = env,
Expand Down Expand Up @@ -6553,16 +6563,6 @@ static int do_check(struct bpf_verifier_env *env)
env->insn_idx++;
}

verbose(env, "processed %d insns (limit %d), stack depth ",
insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
for (i = 0; i < env->subprog_cnt; i++) {
u32 depth = env->subprog_info[i].stack_depth;

verbose(env, "%d", depth);
if (i + 1 < env->subprog_cnt)
verbose(env, "+");
}
verbose(env, "\n");
env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
return 0;
}
Expand Down Expand Up @@ -7474,8 +7474,33 @@ static void free_states(struct bpf_verifier_env *env)
kfree(env->explored_states);
}

static void print_verification_stats(struct bpf_verifier_env *env)
{
int i;

if (env->log.level & BPF_LOG_STATS) {
verbose(env, "verification time %lld usec\n",
div_u64(env->verification_time, 1000));
verbose(env, "stack depth ");
for (i = 0; i < env->subprog_cnt; i++) {
u32 depth = env->subprog_info[i].stack_depth;

verbose(env, "%d", depth);
if (i + 1 < env->subprog_cnt)
verbose(env, "+");
}
verbose(env, "\n");
}
verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
"total_states %d peak_states %d mark_read %d\n",
env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
env->max_states_per_insn, env->total_states,
env->peak_states, env->longest_mark_read_walk);
}

int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
{
u64 start_time = ktime_get_ns();
struct bpf_verifier_env *env;
struct bpf_verifier_log *log;
int ret = -EINVAL;
Expand Down Expand Up @@ -7515,7 +7540,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
ret = -EINVAL;
/* log attributes have to be sane */
if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
!log->level || !log->ubuf)
!log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
goto err_unlock;
}

Expand Down Expand Up @@ -7572,6 +7597,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (ret == 0)
ret = fixup_call_args(env);

env->verification_time = ktime_get_ns() - start_time;
print_verification_stats(env);

if (log->level && bpf_verifier_log_full(log))
ret = -ENOSPC;
if (log->level && !log->ubuf) {
Expand Down

0 comments on commit 0ac9ae4

Please sign in to comment.