cxgb4: add TC-MATCHALL IPv6 support

Matching IPv6 traffic require allocating their own individual slots
in TCAM. So, fetch additional slots to insert IPv6 rules. Also, fetch
the cumulative stats of all the slots occupied by the Matchall rule.

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Rahul Lakkireddy 2020-08-04 00:00:08 +05:30 committed by David S. Miller
parent af9fdd2bf8
commit 59b328cf56
3 changed files with 82 additions and 26 deletions

View File

@ -1438,6 +1438,8 @@ enum {
NAT_MODE_ALL /* NAT on entire 4-tuple */ NAT_MODE_ALL /* NAT on entire 4-tuple */
}; };
#define CXGB4_FILTER_TYPE_MAX 2
/* Host shadow copy of ingress filter entry. This is in host native format /* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the * and doesn't match the ordering or bit order, etc. of the hardware of the
* firmware command. The use of bit-field structure elements is purely to * firmware command. The use of bit-field structure elements is purely to

View File

@ -231,8 +231,26 @@ static void cxgb4_matchall_mirror_free(struct net_device *dev)
tc_port_matchall->ingress.viid_mirror = 0; tc_port_matchall->ingress.viid_mirror = 0;
} }
static int cxgb4_matchall_alloc_filter(struct net_device *dev, static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
struct tc_cls_matchall_offload *cls) {
struct cxgb4_tc_port_matchall *tc_port_matchall;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
int ret;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
&tc_port_matchall->ingress.fs[filter_type]);
if (ret)
return ret;
tc_port_matchall->ingress.tid[filter_type] = 0;
return 0;
}
static int cxgb4_matchall_add_filter(struct net_device *dev,
struct tc_cls_matchall_offload *cls,
u8 filter_type)
{ {
struct netlink_ext_ack *extack = cls->common.extack; struct netlink_ext_ack *extack = cls->common.extack;
struct cxgb4_tc_port_matchall *tc_port_matchall; struct cxgb4_tc_port_matchall *tc_port_matchall;
@ -244,28 +262,24 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
/* Get a free filter entry TID, where we can insert this new /* Get a free filter entry TID, where we can insert this new
* rule. Only insert rule if its prio doesn't conflict with * rule. Only insert rule if its prio doesn't conflict with
* existing rules. * existing rules.
*
* 1 slot is enough to create a wildcard matchall VIID rule.
*/ */
fidx = cxgb4_get_free_ftid(dev, PF_INET, false, cls->common.prio); fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
false, cls->common.prio);
if (fidx < 0) { if (fidx < 0) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available"); "No free LETCAM index available");
return -ENOMEM; return -ENOMEM;
} }
ret = cxgb4_matchall_mirror_alloc(dev, cls);
if (ret)
return ret;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
fs = &tc_port_matchall->ingress.fs; fs = &tc_port_matchall->ingress.fs[filter_type];
memset(fs, 0, sizeof(*fs)); memset(fs, 0, sizeof(*fs));
if (fidx < adap->tids.nhpftids) if (fidx < adap->tids.nhpftids)
fs->prio = 1; fs->prio = 1;
fs->tc_prio = cls->common.prio; fs->tc_prio = cls->common.prio;
fs->tc_cookie = cls->cookie; fs->tc_cookie = cls->cookie;
fs->type = filter_type;
fs->hitcnts = 1; fs->hitcnts = 1;
fs->val.pfvf_vld = 1; fs->val.pfvf_vld = 1;
@ -276,13 +290,39 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
ret = cxgb4_set_filter(dev, fidx, fs); ret = cxgb4_set_filter(dev, fidx, fs);
if (ret) if (ret)
goto out_free; return ret;
tc_port_matchall->ingress.tid[filter_type] = fidx;
return 0;
}
static int cxgb4_matchall_alloc_filter(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
struct cxgb4_tc_port_matchall *tc_port_matchall;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
int ret, i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
ret = cxgb4_matchall_mirror_alloc(dev, cls);
if (ret)
return ret;
for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
ret = cxgb4_matchall_add_filter(dev, cls, i);
if (ret)
goto out_free;
}
tc_port_matchall->ingress.tid = fidx;
tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED; tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
return 0; return 0;
out_free: out_free:
while (i-- > 0)
cxgb4_matchall_del_filter(dev, i);
cxgb4_matchall_mirror_free(dev); cxgb4_matchall_mirror_free(dev);
return ret; return ret;
} }
@ -293,20 +333,21 @@ static int cxgb4_matchall_free_filter(struct net_device *dev)
struct port_info *pi = netdev2pinfo(dev); struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev); struct adapter *adap = netdev2adap(dev);
int ret; int ret;
u8 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid, for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
&tc_port_matchall->ingress.fs); ret = cxgb4_matchall_del_filter(dev, i);
if (ret) if (ret)
return ret; return ret;
}
cxgb4_matchall_mirror_free(dev); cxgb4_matchall_mirror_free(dev);
tc_port_matchall->ingress.packets = 0; tc_port_matchall->ingress.packets = 0;
tc_port_matchall->ingress.bytes = 0; tc_port_matchall->ingress.bytes = 0;
tc_port_matchall->ingress.last_used = 0; tc_port_matchall->ingress.last_used = 0;
tc_port_matchall->ingress.tid = 0;
tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED; tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
return 0; return 0;
} }
@ -362,8 +403,12 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev,
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
if (ingress) { if (ingress) {
/* All the filter types of this matchall rule save the
* same cookie. So, checking for the first one is
* enough.
*/
if (cls_matchall->cookie != if (cls_matchall->cookie !=
tc_port_matchall->ingress.fs.tc_cookie) tc_port_matchall->ingress.fs[0].tc_cookie)
return -ENOENT; return -ENOENT;
return cxgb4_matchall_free_filter(dev); return cxgb4_matchall_free_filter(dev);
@ -379,21 +424,29 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev,
int cxgb4_tc_matchall_stats(struct net_device *dev, int cxgb4_tc_matchall_stats(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall) struct tc_cls_matchall_offload *cls_matchall)
{ {
u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
struct cxgb4_tc_port_matchall *tc_port_matchall; struct cxgb4_tc_port_matchall *tc_port_matchall;
struct cxgb4_matchall_ingress_entry *ingress;
struct port_info *pi = netdev2pinfo(dev); struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev); struct adapter *adap = netdev2adap(dev);
u64 packets, bytes;
int ret; int ret;
u8 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED) if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
return -ENOENT; return -ENOENT;
ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid, ingress = &tc_port_matchall->ingress;
&packets, &bytes, for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
tc_port_matchall->ingress.fs.hash); ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
if (ret) &tmp_packets, &tmp_bytes,
return ret; ingress->fs[i].hash);
if (ret)
return ret;
packets += tmp_packets;
bytes += tmp_bytes;
}
if (tc_port_matchall->ingress.packets != packets) { if (tc_port_matchall->ingress.packets != packets) {
flow_stats_update(&cls_matchall->stats, flow_stats_update(&cls_matchall->stats,

View File

@ -19,8 +19,9 @@ struct cxgb4_matchall_egress_entry {
struct cxgb4_matchall_ingress_entry { struct cxgb4_matchall_ingress_entry {
enum cxgb4_matchall_state state; /* Current MATCHALL offload state */ enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
u32 tid; /* Index to hardware filter entry */ u32 tid[CXGB4_FILTER_TYPE_MAX]; /* Index to hardware filter entries */
struct ch_filter_specification fs; /* Filter entry */ /* Filter entries */
struct ch_filter_specification fs[CXGB4_FILTER_TYPE_MAX];
u16 viid_mirror; /* Identifier for allocated Mirror VI */ u16 viid_mirror; /* Identifier for allocated Mirror VI */
u64 bytes; /* # of bytes hitting the filter */ u64 bytes; /* # of bytes hitting the filter */
u64 packets; /* # of packets hitting the filter */ u64 packets; /* # of packets hitting the filter */