From dbb0634f038633395c4f783fc838179b647fe7ad Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 25 Jul 2022 15:56:56 +0200 Subject: [PATCH] bpf, selftests: Add various BPF tc link selftests [ Commit msg tbd ] todo: - selftest with old/legacy tc api - selftest with chaining & termination # ./vmtest.sh -- ./test_progs -t tc_link [...] [ 1.486605] clocksource: Switched to clocksource tsc ./test_progs -t tc_link [ 1.542394] bpf_testmod: loading out-of-tree module taints kernel. [ 1.543803] bpf_testmod: module verification failed: signature and/or required key missing - tainting kernel #179 tc_link_base:OK #180 tc_link_mix:OK #181 tc_link_opts:OK #182 tc_link_run:OK Summary: 4/0 PASSED, 0 SKIPPED, 0 FAILED [...] Signed-off-by: Daniel Borkmann --- .../selftests/bpf/prog_tests/tc_link.c | 600 ++++++++++++++++++ .../selftests/bpf/progs/test_tc_link.c | 30 + 2 files changed, 630 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/tc_link.c create mode 100644 tools/testing/selftests/bpf/progs/test_tc_link.c diff --git a/tools/testing/selftests/bpf/prog_tests/tc_link.c b/tools/testing/selftests/bpf/prog_tests/tc_link.c new file mode 100644 index 00000000000000..428f0ed3cddcc0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_link.c @@ -0,0 +1,600 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Isovalent */ + +#include +#include + +#include "test_tc_link.skel.h" + +#define loopback 1 +#define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null" + +void serial_test_tc_link_base(void) +{ + struct test_tc_link *skel1 = NULL, *skel2 = NULL; + __u32 prog_fd1, prog_fd2, prog_fd3, prog_fd4; + __u32 id0 = 0, id1, id2, id3, id4, id5, id6, id7; + struct bpf_prog_info prog_info; + struct bpf_link_info link_info; + __u32 link_info_len = sizeof(link_info); + __u32 prog_info_len = sizeof(prog_info); + __u32 prog_cnt, attach_flags = 0; + struct bpf_link *link; + int err; + struct { + u32 prog_id; + u32 link_id; + u32 prio; + } __packed progs[4]; + + skel1 = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel1, "skel_load")) + goto cleanup; + prog_fd1 = bpf_program__fd(skel1->progs.tc_handler_in); + prog_fd2 = bpf_program__fd(skel1->progs.tc_handler_eg); + + skel2 = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel1, "skel_load")) + goto cleanup; + prog_fd3 = bpf_program__fd(skel2->progs.tc_handler_in); + prog_fd4 = bpf_program__fd(skel2->progs.tc_handler_eg); + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "fd_info1")) + goto cleanup; + id1 = prog_info.id; + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "fd_info2")) + goto cleanup; + id2 = prog_info.id; + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd3, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "fd_info3")) + goto cleanup; + id3 = prog_info.id; + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd4, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "fd_info4")) + goto cleanup; + id4 = prog_info.id; + + /* Sanity check that we have distinct programs. */ + ASSERT_NEQ(id1, id3, "prog_ids_1_3"); + ASSERT_NEQ(id2, id4, "prog_ids_2_4"); + ASSERT_NEQ(id1, id4, "prog_ids_1_4"); + + link = bpf_program__attach_tc(skel1->progs.tc_handler_in, loopback, 1); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + skel1->links.tc_handler_in = link; + + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + if (!ASSERT_OK(err, "link_info")) + goto cleanup; + + /* Sanity check that attached ingress BPF link looks as expected. */ + ASSERT_EQ(link_info.type, BPF_LINK_TYPE_TC, "link_type"); + ASSERT_EQ(link_info.prog_id, id1, "link_prog_id"); + ASSERT_EQ(link_info.tc.ifindex, loopback, "link_ifindex"); + ASSERT_EQ(link_info.tc.attach_type, BPF_NET_INGRESS, "link_attach_type"); + ASSERT_EQ(link_info.tc.priority, 1, "link_priority"); + ASSERT_NEQ(link_info.id, id0, "link_id"); + id5 = link_info.id; + + /* Updating program under active ingress BPF link works as expected. */ + err = bpf_link__update_program(link, skel2->progs.tc_handler_in); + if (!ASSERT_OK(err, "link_upd_invalid")) + goto cleanup; + + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + if (!ASSERT_OK(err, "link_info")) + goto cleanup; + + ASSERT_EQ(link_info.id, id5, "link_id"); + ASSERT_EQ(link_info.prog_id, id3, "link_prog_id"); + + link = bpf_program__attach_tc(skel1->progs.tc_handler_eg, loopback, 1); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + skel1->links.tc_handler_eg = link; + + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + if (!ASSERT_OK(err, "link_info")) + goto cleanup; + + /* Sanity check that attached egress BPF link looks as expected. */ + ASSERT_EQ(link_info.type, BPF_LINK_TYPE_TC, "link_type"); + ASSERT_EQ(link_info.prog_id, id2, "link_prog_id"); + ASSERT_EQ(link_info.tc.ifindex, loopback, "link_ifindex"); + ASSERT_EQ(link_info.tc.attach_type, BPF_NET_EGRESS, "link_attach_type"); + ASSERT_EQ(link_info.tc.priority, 1, "link_priority"); + ASSERT_NEQ(link_info.id, id0, "link_id"); + ASSERT_NEQ(link_info.id, id5, "link_id"); + id6 = link_info.id; + + /* Updating program under active egress BPF link works as expected. */ + err = bpf_link__update_program(link, skel2->progs.tc_handler_eg); + if (!ASSERT_OK(err, "link_upd_invalid")) + goto cleanup; + + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + if (!ASSERT_OK(err, "link_info")) + goto cleanup; + + ASSERT_EQ(link_info.id, id6, "link_id"); + ASSERT_EQ(link_info.prog_id, id4, "link_prog_id"); + + /* BPF link is not allowed to replace another BPF link. */ + link = bpf_program__attach_tc(skel2->progs.tc_handler_eg, loopback, 1); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + /* BPF link can be attached with different prio to available slot however. */ + link = bpf_program__attach_tc(skel2->progs.tc_handler_eg, loopback, 2); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + if (!ASSERT_OK(err, "link_info")) { + bpf_link__destroy(link); + goto cleanup; + } + + /* Sanity check that 2nd attached egress BPF link looks as expected. */ + ASSERT_EQ(link_info.type, BPF_LINK_TYPE_TC, "link_type"); + ASSERT_EQ(link_info.prog_id, id4, "link_prog_id"); + ASSERT_EQ(link_info.tc.ifindex, loopback, "link_ifindex"); + ASSERT_EQ(link_info.tc.attach_type, BPF_NET_EGRESS, "link_attach_type"); + ASSERT_EQ(link_info.tc.priority, 2, "link_priority"); + ASSERT_NEQ(link_info.id, id6, "link_id"); + + /* We destroy link, and reattach with auto-allocated prio. */ + bpf_link__destroy(link); + + link = bpf_program__attach_tc(skel2->progs.tc_handler_eg, loopback, 0); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + if (!ASSERT_OK(err, "link_info")) + goto cleanup_link; + + /* Sanity check that egress BPF link looks as expected and got prio 2. */ + ASSERT_EQ(link_info.type, BPF_LINK_TYPE_TC, "link_type"); + ASSERT_EQ(link_info.prog_id, id4, "link_prog_id"); + ASSERT_EQ(link_info.tc.ifindex, loopback, "link_ifindex"); + ASSERT_EQ(link_info.tc.attach_type, BPF_NET_EGRESS, "link_attach_type"); + ASSERT_EQ(link_info.tc.priority, 2, "link_priority"); + ASSERT_NEQ(link_info.id, id6, "link_id"); + id7 = link_info.id; + + /* Sanity check query API on what progs we have attached. */ + prog_cnt = 0; + err = bpf_prog_query(loopback, BPF_NET_EGRESS, 0, &attach_flags, + NULL, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_link; + + ASSERT_EQ(prog_cnt, 2, "prog_cnt"); + + memset(progs, 0, sizeof(progs)); + prog_cnt = ARRAY_SIZE(progs); + err = bpf_prog_query(loopback, BPF_NET_EGRESS, 0, &attach_flags, + progs, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_link; + + ASSERT_EQ(prog_cnt, 2, "prog_cnt"); + ASSERT_EQ(progs[0].prog_id, id4, "prog[0]_id"); + ASSERT_EQ(progs[0].link_id, id6, "prog[0]_link"); + ASSERT_EQ(progs[0].prio, 1, "prog[0]_prio"); + ASSERT_EQ(progs[1].prog_id, id4, "prog[1]_id"); + ASSERT_EQ(progs[1].link_id, id7, "prog[1]_link"); + ASSERT_EQ(progs[1].prio, 2, "prog[1]_prio"); + ASSERT_EQ(progs[2].prog_id, 0, "prog[2]_id"); + ASSERT_EQ(progs[2].link_id, 0, "prog[2]_link"); + ASSERT_EQ(progs[2].prio, 0, "prog[2]_prio"); + + memset(progs, 0, sizeof(progs)); + prog_cnt = ARRAY_SIZE(progs); + err = bpf_prog_query(loopback, BPF_NET_INGRESS, 0, &attach_flags, + progs, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_link; + + ASSERT_EQ(prog_cnt, 1, "prog_cnt"); + ASSERT_EQ(progs[0].prog_id, id3, "prog[0]_id"); + ASSERT_EQ(progs[0].link_id, id5, "prog[0]_link"); + ASSERT_EQ(progs[0].prio, 1, "prog[0]_prio"); + ASSERT_EQ(progs[1].prog_id, 0, "prog[1]_id"); + ASSERT_EQ(progs[1].link_id, 0, "prog[1]_link"); + ASSERT_EQ(progs[1].prio, 0, "prog[1]_prio"); + +cleanup_link: + bpf_link__destroy(link); +cleanup: + test_tc_link__destroy(skel1); + test_tc_link__destroy(skel2); +} + +void serial_test_tc_link_opts(void) +{ + DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opta); + DECLARE_LIBBPF_OPTS(bpf_prog_detach_opts, optd); + __u32 prog_fd1, prog_fd2, id1, id2; + struct bpf_prog_info prog_info; + struct test_tc_link *skel; + __u32 prog_info_len = sizeof(prog_info); + __u32 prog_cnt, attach_flags = 0; + int err, prio; + struct { + u32 prog_id; + u32 link_id; + u32 prio; + } __packed progs[4]; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + prog_fd1 = bpf_program__fd(skel->progs.tc_handler_in); + prog_fd2 = bpf_program__fd(skel->progs.tc_handler_eg); + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "fd_info1")) + goto cleanup; + id1 = prog_info.id; + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "fd_info2")) + goto cleanup; + id2 = prog_info.id; + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + + /* Sanity check query API that nothing is attached. */ + prog_cnt = 0; + err = bpf_prog_query(loopback, BPF_NET_INGRESS, 0, &attach_flags, + NULL, &prog_cnt); + ASSERT_EQ(prog_cnt, 0, "prog_cnt"); + ASSERT_EQ(err, -ENOENT, "prog_query"); + + prog_cnt = 0; + err = bpf_prog_query(loopback, BPF_NET_EGRESS, 0, &attach_flags, + NULL, &prog_cnt); + ASSERT_EQ(prog_cnt, 0, "prog_cnt"); + ASSERT_EQ(err, -ENOENT, "prog_query"); + + /* Sanity check that attaching with given prio works. */ + opta.flags = 0; + opta.attach_priority = prio = 1; + err = bpf_prog_attach_opts(prog_fd1, loopback, BPF_NET_INGRESS, &opta); + if (!ASSERT_EQ(err, opta.attach_priority, "prog_attach")) + goto cleanup; + + prog_cnt = 0; + err = bpf_prog_query(loopback, BPF_NET_INGRESS, 0, &attach_flags, + NULL, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_detach; + + ASSERT_EQ(prog_cnt, 1, "prog_cnt"); + + memset(progs, 0, sizeof(progs)); + prog_cnt = ARRAY_SIZE(progs); + err = bpf_prog_query(loopback, BPF_NET_INGRESS, 0, &attach_flags, + progs, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_detach; + + ASSERT_EQ(prog_cnt, 1, "prog_cnt"); + ASSERT_EQ(progs[0].prog_id, id1, "prog[0]_id"); + ASSERT_EQ(progs[0].link_id, 0, "prog[0]_link"); + ASSERT_EQ(progs[0].prio, 1, "prog[0]_prio"); + ASSERT_EQ(progs[1].prog_id, 0, "prog[1]_id"); + ASSERT_EQ(progs[1].link_id, 0, "prog[1]_link"); + ASSERT_EQ(progs[1].prio, 0, "prog[1]_prio"); + + /* We cannot override unless we add replace flag. */ + opta.flags = 0; + opta.attach_priority = 1; + err = bpf_prog_attach_opts(prog_fd2, loopback, BPF_NET_INGRESS, &opta); + if (!ASSERT_ERR(err, "prog_attach_fail")) + goto cleanup_detach; + + opta.flags = BPF_F_REPLACE; + opta.attach_priority = 1; + err = bpf_prog_attach_opts(prog_fd2, loopback, BPF_NET_INGRESS, &opta); + if (!ASSERT_EQ(err, opta.attach_priority, "prog_replace")) + goto cleanup_detach; + + memset(progs, 0, sizeof(progs)); + prog_cnt = ARRAY_SIZE(progs); + err = bpf_prog_query(loopback, BPF_NET_INGRESS, 0, &attach_flags, + progs, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_detach; + + ASSERT_EQ(prog_cnt, 1, "prog_cnt"); + ASSERT_EQ(progs[0].prog_id, id2, "prog[0]_id"); + ASSERT_EQ(progs[0].link_id, 0, "prog[0]_link"); + ASSERT_EQ(progs[0].prio, 1, "prog[0]_prio"); + ASSERT_EQ(progs[1].prog_id, 0, "prog[1]_id"); + ASSERT_EQ(progs[1].link_id, 0, "prog[1]_link"); + ASSERT_EQ(progs[1].prio, 0, "prog[1]_prio"); + + /* Check auto-assignment for priority. */ + opta.flags = 0; + opta.attach_priority = 0; + err = bpf_prog_attach_opts(prog_fd1, loopback, BPF_NET_INGRESS, &opta); + if (!ASSERT_EQ(err, 2, "prog_replace")) + goto cleanup_detach; + + memset(progs, 0, sizeof(progs)); + prog_cnt = ARRAY_SIZE(progs); + err = bpf_prog_query(loopback, BPF_NET_INGRESS, 0, &attach_flags, + progs, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_detach2; + + ASSERT_EQ(prog_cnt, 2, "prog_cnt"); + ASSERT_EQ(progs[0].prog_id, id2, "prog[0]_id"); + ASSERT_EQ(progs[0].link_id, 0, "prog[0]_link"); + ASSERT_EQ(progs[0].prio, 1, "prog[0]_prio"); + ASSERT_EQ(progs[1].prog_id, id1, "prog[1]_id"); + ASSERT_EQ(progs[1].link_id, 0, "prog[1]_link"); + ASSERT_EQ(progs[1].prio, 2, "prog[1]_prio"); + ASSERT_EQ(progs[2].prog_id, 0, "prog[2]_id"); + ASSERT_EQ(progs[2].link_id, 0, "prog[2]_link"); + ASSERT_EQ(progs[2].prio, 0, "prog[2]_prio"); + + /* Remove the 1st program, so the 2nd becomes 1st in line. */ + prio = 2; + optd.attach_priority = 1; + err = bpf_prog_detach_opts(0, loopback, BPF_NET_INGRESS, &optd); + if (!ASSERT_OK(err, "prog_detach")) + goto cleanup_detach; + + memset(progs, 0, sizeof(progs)); + prog_cnt = ARRAY_SIZE(progs); + err = bpf_prog_query(loopback, BPF_NET_INGRESS, 0, &attach_flags, + progs, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_detach; + + ASSERT_EQ(prog_cnt, 1, "prog_cnt"); + ASSERT_EQ(progs[0].prog_id, id1, "prog[0]_id"); + ASSERT_EQ(progs[0].link_id, 0, "prog[0]_link"); + ASSERT_EQ(progs[0].prio, 2, "prog[0]_prio"); + ASSERT_EQ(progs[1].prog_id, 0, "prog[1]_id"); + ASSERT_EQ(progs[1].link_id, 0, "prog[1]_link"); + ASSERT_EQ(progs[1].prio, 0, "prog[1]_prio"); + + /* Add back higher prio program, so 1st becomes 2nd in line. + * Replace also works if nothing was attached at the given prio. + */ + opta.flags = BPF_F_REPLACE; + opta.attach_priority = 1; + err = bpf_prog_attach_opts(prog_fd2, loopback, BPF_NET_INGRESS, &opta); + if (!ASSERT_EQ(err, opta.attach_priority, "prog_replace")) + goto cleanup_detach; + + prio = 1; + memset(progs, 0, sizeof(progs)); + prog_cnt = ARRAY_SIZE(progs); + err = bpf_prog_query(loopback, BPF_NET_INGRESS, 0, &attach_flags, + progs, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_detach2; + + ASSERT_EQ(prog_cnt, 2, "prog_cnt"); + ASSERT_EQ(progs[0].prog_id, id2, "prog[0]_id"); + ASSERT_EQ(progs[0].link_id, 0, "prog[0]_link"); + ASSERT_EQ(progs[0].prio, 1, "prog[0]_prio"); + ASSERT_EQ(progs[1].prog_id, id1, "prog[1]_id"); + ASSERT_EQ(progs[1].link_id, 0, "prog[1]_link"); + ASSERT_EQ(progs[1].prio, 2, "prog[1]_prio"); + ASSERT_EQ(progs[2].prog_id, 0, "prog[2]_id"); + ASSERT_EQ(progs[2].link_id, 0, "prog[2]_link"); + ASSERT_EQ(progs[2].prio, 0, "prog[2]_prio"); + + optd.attach_priority = 2; + err = bpf_prog_detach_opts(0, loopback, BPF_NET_INGRESS, &optd); + ASSERT_OK(err, "prog_detach"); + + optd.attach_priority = 1; + err = bpf_prog_detach_opts(0, loopback, BPF_NET_INGRESS, &optd); + ASSERT_OK(err, "prog_detach"); + + /* Expected to be empty again. */ + prog_cnt = 0; + err = bpf_prog_query(loopback, BPF_NET_INGRESS, 0, &attach_flags, + NULL, &prog_cnt); + ASSERT_EQ(prog_cnt, 0, "prog_cnt"); + ASSERT_EQ(err, -ENOENT, "prog_query"); + goto cleanup; + +cleanup_detach: + optd.attach_priority = prio; + err = bpf_prog_detach_opts(0, loopback, BPF_NET_INGRESS, &optd); + if (!ASSERT_OK(err, "prog_detach")) + goto cleanup; +cleanup: + test_tc_link__destroy(skel); + return; +cleanup_detach2: + optd.attach_priority = 2; + err = bpf_prog_detach_opts(0, loopback, BPF_NET_INGRESS, &optd); + ASSERT_OK(err, "prog_detach"); + goto cleanup_detach; +} + +void serial_test_tc_link_mix(void) +{ + DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opta); + DECLARE_LIBBPF_OPTS(bpf_prog_detach_opts, optd); + __u32 prog_fd1, prog_fd2, id1, id2, id3; + struct test_tc_link *skel; + struct bpf_link *link; + struct bpf_prog_info prog_info; + struct bpf_link_info link_info; + __u32 link_info_len = sizeof(link_info); + __u32 prog_info_len = sizeof(prog_info); + __u32 prog_cnt, attach_flags = 0; + int err; + struct { + u32 prog_id; + u32 link_id; + u32 prio; + } __packed progs[4]; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + prog_fd1 = bpf_program__fd(skel->progs.tc_handler_in); + prog_fd2 = bpf_program__fd(skel->progs.tc_handler_eg); + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "fd_info1")) + goto cleanup; + id1 = prog_info.id; + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len); + if (!ASSERT_OK(err, "fd_info2")) + goto cleanup; + id2 = prog_info.id; + + ASSERT_NEQ(id1, id2, "prog_ids_1_2"); + + /* Sanity check that attaching with given prio works. */ + opta.flags = 0; + opta.attach_priority = 42; + err = bpf_prog_attach_opts(prog_fd1, loopback, BPF_NET_EGRESS, &opta); + if (!ASSERT_EQ(err, opta.attach_priority, "prog_attach")) + goto cleanup; + + prog_cnt = 0; + err = bpf_prog_query(loopback, BPF_NET_EGRESS, 0, &attach_flags, + NULL, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_detach; + + ASSERT_EQ(prog_cnt, 1, "prog_cnt"); + + memset(progs, 0, sizeof(progs)); + prog_cnt = ARRAY_SIZE(progs); + err = bpf_prog_query(loopback, BPF_NET_EGRESS, 0, &attach_flags, + progs, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_detach; + + ASSERT_EQ(prog_cnt, 1, "prog_cnt"); + ASSERT_EQ(progs[0].prog_id, id1, "prog[0]_id"); + ASSERT_EQ(progs[0].link_id, 0, "prog[0]_link"); + ASSERT_EQ(progs[0].prio, 42, "prog[0]_prio"); + ASSERT_EQ(progs[1].prog_id, 0, "prog[1]_id"); + ASSERT_EQ(progs[1].link_id, 0, "prog[1]_link"); + ASSERT_EQ(progs[1].prio, 0, "prog[1]_prio"); + + /* Sanity check that attaching link with same prio will fail. */ + link = bpf_program__attach_tc(skel->progs.tc_handler_eg, loopback, 42); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + /* Different prio on unused slot works of course. */ + link = bpf_program__attach_tc(skel->progs.tc_handler_eg, loopback, 0); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + skel->links.tc_handler_eg = link; + + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + if (!ASSERT_OK(err, "link_info")) + goto cleanup; + + ASSERT_EQ(link_info.prog_id, id2, "link_prog_id"); + id3 = link_info.id; + + memset(progs, 0, sizeof(progs)); + prog_cnt = ARRAY_SIZE(progs); + err = bpf_prog_query(loopback, BPF_NET_EGRESS, 0, &attach_flags, + progs, &prog_cnt); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_detach; + + ASSERT_EQ(prog_cnt, 2, "prog_cnt"); + ASSERT_EQ(progs[0].prog_id, id2, "prog[0]_id"); + ASSERT_EQ(progs[0].link_id, id3, "prog[0]_link"); + ASSERT_EQ(progs[0].prio, 1, "prog[0]_prio"); + ASSERT_EQ(progs[1].prog_id, id1, "prog[1]_id"); + ASSERT_EQ(progs[1].link_id, 0, "prog[1]_link"); + ASSERT_EQ(progs[1].prio, 42, "prog[1]_prio"); + ASSERT_EQ(progs[2].prog_id, 0, "prog[2]_id"); + ASSERT_EQ(progs[2].link_id, 0, "prog[2]_link"); + ASSERT_EQ(progs[2].prio, 0, "prog[2]_prio"); + + /* Sanity check that attaching non-link with same prio as link will fail. */ + opta.flags = BPF_F_REPLACE; + opta.attach_priority = 1; + err = bpf_prog_attach_opts(prog_fd1, loopback, BPF_NET_EGRESS, &opta); + if (!ASSERT_ERR(err, "prog_attach_should_fail")) + goto cleanup_detach; + + opta.flags = 0; + opta.attach_priority = 1; + err = bpf_prog_attach_opts(prog_fd1, loopback, BPF_NET_EGRESS, &opta); + if (!ASSERT_ERR(err, "prog_attach_should_fail")) + goto cleanup_detach; + +cleanup_detach: + optd.attach_priority = 42; + err = bpf_prog_detach_opts(0, loopback, BPF_NET_EGRESS, &optd); + if (!ASSERT_OK(err, "prog_detach")) + goto cleanup; +cleanup: + test_tc_link__destroy(skel); +} + +void serial_test_tc_link_run(void) +{ + struct test_tc_link *skel; + struct bpf_link *link; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + link = bpf_program__attach_tc(skel->progs.tc_handler_eg, loopback, 0); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + skel->links.tc_handler_eg = link; + + link = bpf_program__attach_tc(skel->progs.tc_handler_in, loopback, 0); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + skel->links.tc_handler_in = link; + + CHECK_FAIL(system(ping_cmd)); + + ASSERT_EQ(skel->bss->run, 3, "run32_value"); +cleanup: + test_tc_link__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_tc_link.c b/tools/testing/selftests/bpf/progs/test_tc_link.c new file mode 100644 index 00000000000000..80639b829d744a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_tc_link.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Isovalent */ +#include +#include + +char LICENSE[] SEC("license") = "GPL"; + +__u32 run; + +SEC("tc/ingress") +int tc_handler_in(struct __sk_buff *skb) +{ +#ifdef ENABLE_ATOMICS_TESTS + __sync_fetch_and_or(&run, 1); +#else + run |= 1; +#endif + return 0; +} + +SEC("tc/egress") +int tc_handler_eg(struct __sk_buff *skb) +{ +#ifdef ENABLE_ATOMICS_TESTS + __sync_fetch_and_or(&run, 2); +#else + run |= 2; +#endif + return 0; +}