tools/bpf: bpftool: improve output format for bpftool net

This is a followup patch for Commit f6f3bac08f
("tools/bpf: bpftool: add net support").
Some improvements are made for the bpftool net output.
Specially, plain output is more concise such that
per attachment should nicely fit in one line.
Compared to previous output, the prog tag is removed
since it can be easily obtained with program id.
Similar to xdp attachments, the device name is added
to tc attachments.

The bpf program attached through shared block
mechanism is supported as well.
  $ ip link add dev v1 type veth peer name v2
  $ tc qdisc add dev v1 ingress_block 10 egress_block 20 clsact
  $ tc qdisc add dev v2 ingress_block 10 egress_block 20 clsact
  $ tc filter add block 10 protocol ip prio 25 bpf obj bpf_shared.o sec ingress flowid 1:1
  $ tc filter add block 20 protocol ip prio 30 bpf obj bpf_cyclic.o sec classifier flowid 1:1
  $ bpftool net
  xdp:

  tc:
  v2(7) clsact/ingress bpf_shared.o:[ingress] id 23
  v2(7) clsact/egress bpf_cyclic.o:[classifier] id 24
  v1(8) clsact/ingress bpf_shared.o:[ingress] id 23
  v1(8) clsact/egress bpf_cyclic.o:[classifier] id 24

The documentation and "bpftool net help" are updated
to make it clear that current implementation only
supports xdp and tc attachments. For programs
attached to cgroups, "bpftool cgroup" can be used
to dump attachments. For other programs e.g.
sk_{filter,skb,msg,reuseport} and lwt/seg6,
iproute2 tools should be used.

The new output:
  $ bpftool net
  xdp:
  eth0(2) driver id 198

  tc:
  eth0(2) clsact/ingress fbflow_icmp id 335 act [{icmp_action id 336}]
  eth0(2) clsact/egress fbflow_egress id 334
  $ bpftool -jp net
  [{
        "xdp": [{
                "devname": "eth0",
                "ifindex": 2,
                "mode": "driver",
                "id": 198
            }
        ],
        "tc": [{
                "devname": "eth0",
                "ifindex": 2,
                "kind": "clsact/ingress",
                "name": "fbflow_icmp",
                "id": 335,
                "act": [{
                        "name": "icmp_action",
                        "id": 336
                    }
                ]
            },{
                "devname": "eth0",
                "ifindex": 2,
                "kind": "clsact/egress",
                "name": "fbflow_egress",
                "id": 334
            }
        ]
    }
  ]

Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
Yonghong Song 2018-09-17 16:13:00 -07:00 committed by Daniel Borkmann
parent 70e88c758a
commit 7900efc192
5 changed files with 160 additions and 129 deletions

View File

@ -26,9 +26,20 @@ NET COMMANDS
DESCRIPTION
===========
**bpftool net { show | list } [ dev name ]**
List all networking device driver and tc attachment in the system.
List bpf program attachments in the kernel networking subsystem.
Output will start with all xdp program attachment, followed by
Currently, only device driver xdp attachments and tc filter
classification/action attachments are implemented, i.e., for
program types **BPF_PROG_TYPE_SCHED_CLS**,
**BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**.
For programs attached to a particular cgroup, e.g.,
**BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**,
**BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
users can use **bpftool cgroup** to dump cgroup attachments.
For sk_{filter, skb, msg, reuseport} and lwt/seg6
bpf programs, users should consult other tools, e.g., iproute2.
The current output will start with all xdp program attachments, followed by
all tc class/qdisc bpf program attachments. Both xdp programs and
tc programs are ordered based on ifindex number. If multiple bpf
programs attached to the same networking device through **tc filter**,
@ -61,21 +72,15 @@ EXAMPLES
::
xdp [
ifindex 2 devname eth0 prog_id 198
]
tc_filters [
ifindex 2 kind qdisc_htb name prefix_matcher.o:[cls_prefix_matcher_htb]
prog_id 111727 tag d08fe3b4319bc2fd act []
ifindex 2 kind qdisc_clsact_ingress name fbflow_icmp
prog_id 130246 tag 3f265c7f26db62c9 act []
ifindex 2 kind qdisc_clsact_egress name prefix_matcher.o:[cls_prefix_matcher_clsact]
prog_id 111726 tag 99a197826974c876
ifindex 2 kind qdisc_clsact_egress name cls_fg_dscp
prog_id 108619 tag dc4630674fd72dcc act []
ifindex 2 kind qdisc_clsact_egress name fbflow_egress
prog_id 130245 tag 72d2d830d6888d2c
]
xdp:
eth0(2) driver id 198
tc:
eth0(2) htb name prefix_matcher.o:[cls_prefix_matcher_htb] id 111727 act []
eth0(2) clsact/ingress fbflow_icmp id 130246 act []
eth0(2) clsact/egress prefix_matcher.o:[cls_prefix_matcher_clsact] id 111726
eth0(2) clsact/egress cls_fg_dscp id 108619 act []
eth0(2) clsact/egress fbflow_egress id 130245
|
| **# bpftool -jp net**
@ -84,44 +89,45 @@ EXAMPLES
[{
"xdp": [{
"ifindex": 2,
"devname": "eth0",
"prog_id": 198
"ifindex": 2,
"mode": "driver",
"id": 198
}
],
"tc_filters": [{
"tc": [{
"devname": "eth0",
"ifindex": 2,
"kind": "qdisc_htb",
"kind": "htb",
"name": "prefix_matcher.o:[cls_prefix_matcher_htb]",
"prog_id": 111727,
"tag": "d08fe3b4319bc2fd",
"id": 111727,
"act": []
},{
"devname": "eth0",
"ifindex": 2,
"kind": "qdisc_clsact_ingress",
"kind": "clsact/ingress",
"name": "fbflow_icmp",
"prog_id": 130246,
"tag": "3f265c7f26db62c9",
"id": 130246,
"act": []
},{
"devname": "eth0",
"ifindex": 2,
"kind": "qdisc_clsact_egress",
"kind": "clsact/egress",
"name": "prefix_matcher.o:[cls_prefix_matcher_clsact]",
"prog_id": 111726,
"tag": "99a197826974c876"
"id": 111726,
},{
"devname": "eth0",
"ifindex": 2,
"kind": "qdisc_clsact_egress",
"kind": "clsact/egress",
"name": "cls_fg_dscp",
"prog_id": 108619,
"tag": "dc4630674fd72dcc",
"id": 108619,
"act": []
},{
"devname": "eth0",
"ifindex": 2,
"kind": "qdisc_clsact_egress",
"kind": "clsact/egress",
"name": "fbflow_egress",
"prog_id": 130245,
"tag": "72d2d830d6888d2c"
"id": 130245,
}
]
}

View File

@ -171,5 +171,6 @@ struct nlattr;
struct ifinfomsg;
struct tcmsg;
int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb);
int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind);
int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
const char *devname, int ifindex);
#endif

View File

@ -2,6 +2,7 @@
// Copyright (C) 2018 Facebook
#define _GNU_SOURCE
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
@ -17,8 +18,13 @@
#include "main.h"
#include "netlink_dumper.h"
struct ip_devname_ifindex {
char devname[64];
int ifindex;
};
struct bpf_netdev_t {
int *ifindex_array;
struct ip_devname_ifindex *devices;
int used_len;
int array_len;
int filter_idx;
@ -36,6 +42,12 @@ struct bpf_tcinfo_t {
bool is_qdisc;
};
struct bpf_filter_t {
const char *kind;
const char *devname;
int ifindex;
};
static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
struct bpf_netdev_t *netinfo = cookie;
@ -45,11 +57,20 @@ static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
return 0;
if (netinfo->used_len == netinfo->array_len) {
netinfo->ifindex_array = realloc(netinfo->ifindex_array,
(netinfo->array_len + 16) * sizeof(int));
netinfo->devices = realloc(netinfo->devices,
(netinfo->array_len + 16) *
sizeof(struct ip_devname_ifindex));
if (!netinfo->devices)
return -ENOMEM;
netinfo->array_len += 16;
}
netinfo->ifindex_array[netinfo->used_len++] = ifinfo->ifi_index;
netinfo->devices[netinfo->used_len].ifindex = ifinfo->ifi_index;
snprintf(netinfo->devices[netinfo->used_len].devname,
sizeof(netinfo->devices[netinfo->used_len].devname),
"%s",
tb[IFLA_IFNAME] ? nla_getattr_str(tb[IFLA_IFNAME]) : "");
netinfo->used_len++;
return do_xdp_dump(ifinfo, tb);
}
@ -71,13 +92,15 @@ static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
if (tcinfo->used_len == tcinfo->array_len) {
tcinfo->handle_array = realloc(tcinfo->handle_array,
(tcinfo->array_len + 16) * sizeof(struct tc_kind_handle));
if (!tcinfo->handle_array)
return -ENOMEM;
tcinfo->array_len += 16;
}
tcinfo->handle_array[tcinfo->used_len].handle = info->tcm_handle;
snprintf(tcinfo->handle_array[tcinfo->used_len].kind,
sizeof(tcinfo->handle_array[tcinfo->used_len].kind),
"%s_%s",
tcinfo->is_qdisc ? "qdisc" : "class",
"%s",
tb[TCA_KIND] ? nla_getattr_str(tb[TCA_KIND]) : "unknown");
tcinfo->used_len++;
@ -86,60 +109,71 @@ static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
static int dump_filter_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
const char *kind = cookie;
const struct bpf_filter_t *filter_info = cookie;
return do_filter_dump((struct tcmsg *)msg, tb, kind);
return do_filter_dump((struct tcmsg *)msg, tb, filter_info->kind,
filter_info->devname, filter_info->ifindex);
}
static int show_dev_tc_bpf(int sock, unsigned int nl_pid, int ifindex)
static int show_dev_tc_bpf(int sock, unsigned int nl_pid,
struct ip_devname_ifindex *dev)
{
struct bpf_filter_t filter_info;
struct bpf_tcinfo_t tcinfo;
int i, handle, ret;
int i, handle, ret = 0;
tcinfo.handle_array = NULL;
tcinfo.used_len = 0;
tcinfo.array_len = 0;
tcinfo.is_qdisc = false;
ret = nl_get_class(sock, nl_pid, ifindex, dump_class_qdisc_nlmsg,
ret = nl_get_class(sock, nl_pid, dev->ifindex, dump_class_qdisc_nlmsg,
&tcinfo);
if (ret)
return ret;
goto out;
tcinfo.is_qdisc = true;
ret = nl_get_qdisc(sock, nl_pid, ifindex, dump_class_qdisc_nlmsg,
ret = nl_get_qdisc(sock, nl_pid, dev->ifindex, dump_class_qdisc_nlmsg,
&tcinfo);
if (ret)
return ret;
goto out;
filter_info.devname = dev->devname;
filter_info.ifindex = dev->ifindex;
for (i = 0; i < tcinfo.used_len; i++) {
ret = nl_get_filter(sock, nl_pid, ifindex,
filter_info.kind = tcinfo.handle_array[i].kind;
ret = nl_get_filter(sock, nl_pid, dev->ifindex,
tcinfo.handle_array[i].handle,
dump_filter_nlmsg,
tcinfo.handle_array[i].kind);
&filter_info);
if (ret)
return ret;
goto out;
}
/* root, ingress and egress handle */
handle = TC_H_ROOT;
ret = nl_get_filter(sock, nl_pid, ifindex, handle, dump_filter_nlmsg,
"root");
filter_info.kind = "root";
ret = nl_get_filter(sock, nl_pid, dev->ifindex, handle,
dump_filter_nlmsg, &filter_info);
if (ret)
return ret;
goto out;
handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
ret = nl_get_filter(sock, nl_pid, ifindex, handle, dump_filter_nlmsg,
"qdisc_clsact_ingress");
filter_info.kind = "clsact/ingress";
ret = nl_get_filter(sock, nl_pid, dev->ifindex, handle,
dump_filter_nlmsg, &filter_info);
if (ret)
return ret;
goto out;
handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_EGRESS);
ret = nl_get_filter(sock, nl_pid, ifindex, handle, dump_filter_nlmsg,
"qdisc_clsact_egress");
filter_info.kind = "clsact/egress";
ret = nl_get_filter(sock, nl_pid, dev->ifindex, handle,
dump_filter_nlmsg, &filter_info);
if (ret)
return ret;
goto out;
out:
free(tcinfo.handle_array);
return 0;
}
@ -168,7 +202,7 @@ static int do_show(int argc, char **argv)
return -1;
}
dev_array.ifindex_array = NULL;
dev_array.devices = NULL;
dev_array.used_len = 0;
dev_array.array_len = 0;
dev_array.filter_idx = filter_idx;
@ -176,15 +210,15 @@ static int do_show(int argc, char **argv)
if (json_output)
jsonw_start_array(json_wtr);
NET_START_OBJECT;
NET_START_ARRAY("xdp", "\n");
NET_START_ARRAY("xdp", "%s:\n");
ret = nl_get_link(sock, nl_pid, dump_link_nlmsg, &dev_array);
NET_END_ARRAY("\n");
if (!ret) {
NET_START_ARRAY("tc_filters", "\n");
NET_START_ARRAY("tc", "%s:\n");
for (i = 0; i < dev_array.used_len; i++) {
ret = show_dev_tc_bpf(sock, nl_pid,
dev_array.ifindex_array[i]);
&dev_array.devices[i]);
if (ret)
break;
}
@ -200,7 +234,7 @@ static int do_show(int argc, char **argv)
libbpf_strerror(ret, err_buf, sizeof(err_buf));
fprintf(stderr, "Error: %s\n", err_buf);
}
free(dev_array.ifindex_array);
free(dev_array.devices);
close(sock);
return ret;
}
@ -214,7 +248,12 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %s %s { show | list } [dev <devname>]\n"
" %s %s help\n",
" %s %s help\n"
"Note: Only xdp and tc attachments are supported now.\n"
" For progs attached to cgroups, use \"bpftool cgroup\"\n"
" to dump program attachments. For program types\n"
" sk_{filter,skb,msg,reuseport} and lwt/seg6, please\n"
" consult iproute2.\n",
bin_name, argv[-2], bin_name, argv[-2]);
return 0;

View File

@ -12,12 +12,18 @@
#include "netlink_dumper.h"
static void xdp_dump_prog_id(struct nlattr **tb, int attr,
const char *type)
const char *mode,
bool new_json_object)
{
if (!tb[attr])
return;
NET_DUMP_UINT(type, nla_getattr_u32(tb[attr]))
if (new_json_object)
NET_START_OBJECT
NET_DUMP_STR("mode", " %s", mode);
NET_DUMP_UINT("id", " id %u", nla_getattr_u32(tb[attr]))
if (new_json_object)
NET_END_OBJECT
}
static int do_xdp_dump_one(struct nlattr *attr, unsigned int ifindex,
@ -37,18 +43,26 @@ static int do_xdp_dump_one(struct nlattr *attr, unsigned int ifindex,
return 0;
NET_START_OBJECT;
NET_DUMP_UINT("ifindex", ifindex);
if (name)
NET_DUMP_STR("devname", name);
if (tb[IFLA_XDP_PROG_ID])
NET_DUMP_UINT("prog_id", nla_getattr_u32(tb[IFLA_XDP_PROG_ID]));
NET_DUMP_STR("devname", "%s", name);
NET_DUMP_UINT("ifindex", "(%d)", ifindex);
if (mode == XDP_ATTACHED_MULTI) {
xdp_dump_prog_id(tb, IFLA_XDP_SKB_PROG_ID, "generic_prog_id");
xdp_dump_prog_id(tb, IFLA_XDP_DRV_PROG_ID, "drv_prog_id");
xdp_dump_prog_id(tb, IFLA_XDP_HW_PROG_ID, "offload_prog_id");
if (json_output) {
jsonw_name(json_wtr, "multi_attachments");
jsonw_start_array(json_wtr);
}
xdp_dump_prog_id(tb, IFLA_XDP_SKB_PROG_ID, "generic", true);
xdp_dump_prog_id(tb, IFLA_XDP_DRV_PROG_ID, "driver", true);
xdp_dump_prog_id(tb, IFLA_XDP_HW_PROG_ID, "offload", true);
if (json_output)
jsonw_end_array(json_wtr);
} else if (mode == XDP_ATTACHED_DRV) {
xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "driver", false);
} else if (mode == XDP_ATTACHED_SKB) {
xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "generic", false);
} else if (mode == XDP_ATTACHED_HW) {
xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "offload", false);
}
NET_END_OBJECT_FINAL;
@ -64,26 +78,9 @@ int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb)
nla_getattr_str(tb[IFLA_IFNAME]));
}
static char *hexstring_n2a(const unsigned char *str, int len,
char *buf, int blen)
{
char *ptr = buf;
int i;
for (i = 0; i < len; i++) {
if (blen < 3)
break;
sprintf(ptr, "%02x", str[i]);
ptr += 2;
blen -= 2;
}
return buf;
}
static int do_bpf_dump_one_act(struct nlattr *attr)
{
struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
char buf[256];
if (nla_parse_nested(tb, TCA_ACT_BPF_MAX, attr, NULL) < 0)
return -LIBBPF_ERRNO__NLPARSE;
@ -93,13 +90,11 @@ static int do_bpf_dump_one_act(struct nlattr *attr)
NET_START_OBJECT_NESTED2;
if (tb[TCA_ACT_BPF_NAME])
NET_DUMP_STR("name", nla_getattr_str(tb[TCA_ACT_BPF_NAME]));
NET_DUMP_STR("name", "%s",
nla_getattr_str(tb[TCA_ACT_BPF_NAME]));
if (tb[TCA_ACT_BPF_ID])
NET_DUMP_UINT("bpf_id", nla_getattr_u32(tb[TCA_ACT_BPF_ID]));
if (tb[TCA_ACT_BPF_TAG])
NET_DUMP_STR("tag", hexstring_n2a(nla_data(tb[TCA_ACT_BPF_TAG]),
nla_len(tb[TCA_ACT_BPF_TAG]),
buf, sizeof(buf)));
NET_DUMP_UINT("id", " id %u",
nla_getattr_u32(tb[TCA_ACT_BPF_ID]));
NET_END_OBJECT_NESTED;
return 0;
}
@ -128,13 +123,13 @@ static int do_bpf_act_dump(struct nlattr *attr)
if (nla_parse_nested(tb, TCA_ACT_MAX_PRIO, attr, NULL) < 0)
return -LIBBPF_ERRNO__NLPARSE;
NET_START_ARRAY("act", "");
NET_START_ARRAY("act", " %s [");
for (act = 0; act <= TCA_ACT_MAX_PRIO; act++) {
ret = do_dump_one_act(tb[act]);
if (ret)
break;
}
NET_END_ARRAY(" ");
NET_END_ARRAY("] ");
return ret;
}
@ -142,20 +137,15 @@ static int do_bpf_act_dump(struct nlattr *attr)
static int do_bpf_filter_dump(struct nlattr *attr)
{
struct nlattr *tb[TCA_BPF_MAX + 1];
char buf[256];
int ret;
if (nla_parse_nested(tb, TCA_BPF_MAX, attr, NULL) < 0)
return -LIBBPF_ERRNO__NLPARSE;
if (tb[TCA_BPF_NAME])
NET_DUMP_STR("name", nla_getattr_str(tb[TCA_BPF_NAME]));
NET_DUMP_STR("name", " %s", nla_getattr_str(tb[TCA_BPF_NAME]));
if (tb[TCA_BPF_ID])
NET_DUMP_UINT("prog_id", nla_getattr_u32(tb[TCA_BPF_ID]));
if (tb[TCA_BPF_TAG])
NET_DUMP_STR("tag", hexstring_n2a(nla_data(tb[TCA_BPF_TAG]),
nla_len(tb[TCA_BPF_TAG]),
buf, sizeof(buf)));
NET_DUMP_UINT("id", " id %u", nla_getattr_u32(tb[TCA_BPF_ID]));
if (tb[TCA_BPF_ACT]) {
ret = do_bpf_act_dump(tb[TCA_BPF_ACT]);
if (ret)
@ -165,14 +155,17 @@ static int do_bpf_filter_dump(struct nlattr *attr)
return 0;
}
int do_filter_dump(struct tcmsg *info, struct nlattr **tb, const char *kind)
int do_filter_dump(struct tcmsg *info, struct nlattr **tb, const char *kind,
const char *devname, int ifindex)
{
int ret = 0;
if (tb[TCA_OPTIONS] && strcmp(nla_data(tb[TCA_KIND]), "bpf") == 0) {
NET_START_OBJECT;
NET_DUMP_UINT("ifindex", info->tcm_ifindex);
NET_DUMP_STR("kind", kind);
if (devname[0] != '\0')
NET_DUMP_STR("devname", "%s", devname);
NET_DUMP_UINT("ifindex", "(%u)", ifindex);
NET_DUMP_STR("kind", " %s", kind);
ret = do_bpf_filter_dump(tb[TCA_OPTIONS]);
NET_END_OBJECT_FINAL;
}

View File

@ -50,13 +50,13 @@
fprintf(stderr, "\n"); \
}
#define NET_START_ARRAY(name, newline) \
#define NET_START_ARRAY(name, fmt_str) \
{ \
if (json_output) { \
jsonw_name(json_wtr, name); \
jsonw_start_array(json_wtr); \
} else { \
fprintf(stderr, "%s [%s", name, newline);\
fprintf(stderr, fmt_str, name); \
} \
}
@ -65,31 +65,23 @@
if (json_output) \
jsonw_end_array(json_wtr); \
else \
fprintf(stderr, "]%s", endstr); \
fprintf(stderr, "%s", endstr); \
}
#define NET_DUMP_UINT(name, val) \
#define NET_DUMP_UINT(name, fmt_str, val) \
{ \
if (json_output) \
jsonw_uint_field(json_wtr, name, val); \
else \
fprintf(stderr, "%s %d ", name, val); \
fprintf(stderr, fmt_str, val); \
}
#define NET_DUMP_LLUINT(name, val) \
{ \
if (json_output) \
jsonw_lluint_field(json_wtr, name, val);\
else \
fprintf(stderr, "%s %lld ", name, val); \
}
#define NET_DUMP_STR(name, str) \
#define NET_DUMP_STR(name, fmt_str, str) \
{ \
if (json_output) \
jsonw_string_field(json_wtr, name, str);\
else \
fprintf(stderr, "%s %s ", name, str); \
fprintf(stderr, fmt_str, str); \
}
#define NET_DUMP_STR_ONLY(str) \