mirror of https://gitee.com/openkylin/linux.git
Merge branch 'net-sched-allow-qdiscs-to-share-filter-block-instances'
Jiri Pirko says: ==================== net: sched: allow qdiscs to share filter block instances Currently the filters added to qdiscs are independent. So for example if you have 2 netdevices and you create ingress qdisc on both and you want to add identical filter rules both, you need to add them twice. This patchset makes this easier and mainly saves resources allowing to share all filters within a qdisc - I call it a "filter block". Also this helps to save resources when we do offload to hw for example to expensive TCAM. So back to the example. First, we create 2 qdiscs. Both will share block number 22. "22" is just an identification: $ tc qdisc add dev ens7 ingress_block 22 ingress ^^^^^^^^^^^^^^^^ $ tc qdisc add dev ens8 ingress_block 22 ingress ^^^^^^^^^^^^^^^^ If we don't specify "block" command line option, no shared block would be created: $ tc qdisc add dev ens9 ingress Now if we list the qdiscs, we will see the block index in the output: $ tc qdisc qdisc ingress ffff: dev ens7 parent ffff:fff1 ingress_block 22 qdisc ingress ffff: dev ens8 parent ffff:fff1 ingress_block 22 qdisc ingress ffff: dev ens9 parent ffff:fff1 To make is more visual, the situation looks like this: ens7 ingress qdisc ens7 ingress qdisc | | | | +----------> block 22 <----------+ Unlimited number of qdiscs may share the same block. Note that this patchset introduces block sharing support also for clsact qdisc: $ tc qdisc add dev ens10 ingress_block 23 egress_block 24 clsact $ tc qdisc show dev ens10 qdisc clsact ffff: dev ens10 parent ffff:fff1 ingress_block 23 egress_block 24 We can add filter using the block index: $ tc filter add block 22 protocol ip pref 25 flower dst_ip 192.168.0.0/16 action drop Note we cannot use the qdisc for filter manipulations of shared blocks: $ tc filter add dev ens8 ingress protocol ip pref 1 flower dst_ip 192.168.100.2 action drop Error: This filter block is shared. Please use the block index to manipulate the filters. We will see the same output if we list filters for ingress qdisc of ens7 and ens8, also for the block 22: $ tc filter show block 22 filter block 22 protocol ip pref 25 flower chain 0 filter block 22 protocol ip pref 25 flower chain 0 handle 0x1 ... $ tc filter show dev ens7 ingress filter block 22 protocol ip pref 25 flower chain 0 filter block 22 protocol ip pref 25 flower chain 0 handle 0x1 ... $ tc filter show dev ens8 ingress filter block 22 protocol ip pref 25 flower chain 0 filter block 22 protocol ip pref 25 flower chain 0 handle 0x1 ... --- v10->v11: - patch 2: - fixed error path when register_pernet_subsys fails pointed out by Cong - patch 9: - rebased on top of the current net-next v9->v10: - patch 7: - fixed ifindex magic in the patch description - userspace patches: - added manpages and patch descriptions v8->v9: - patch "net: sched: add rt netlink message type for block get" was removed, userspace check filter existence using qdisc dump v7->v8: - patch 7: - added comment to ifindex block magic - patch 9: - new patch - patch 10: - base this on the patch that introduces qdisc-generic block index attributes parsing/dumping - patch 13: - rebased on top of current net-next v6->v7: - patch 1: - unsquashed shared block patch that was previously squashed by mistake - fixed error path in block create - freeing chain 0 - patch 2: - new patch - splitted from the previous one as it got accidentaly squashed in the rebasing process in the past - converted to idr extended - removed auto-generating of block indexes. Callers have to explicily tell that the block is shared by passing non-zero block index - fixed error path in block get ext - freeing chain 0 - patch 7: - changed extack message for block index handle as suggested by DaveA - added extack message when block index does not exist - the block ifindex magic is in define and change to 0xffffffff as suggested by Jamal - patch 8: - new patch implementing RTM_GETBLOCK in order to query if the block with some index exists - patch 9: - adjust to the core changes and check block index attributes for being 0 v5->v6: - added patch 6 that introduces block handle v4->v5: - patch 5: - add tracking of binding of devs that are unable to offload and check that before block cbs call. v3->v4: - patch 1: - rebased on top of the current net-next - added some extack strings - patch 3: - rebased on top of the current net-next - patch 5: - propagate netdev_ops->ndo_setup_tc error up to tcf_block_offload_bind caller - patch 7: - rebased on top of the current net-next v2->v3: - removed original patch 1, removing tp->q cls_bpf dependency. Fixed by Jakub in the meantime. - patch 1: - rebased on top of the current net-next - patch 5: - new patch - patch 8: - removed "p_" prefix from block index function args - patch 10: - add tc offload feature handling ==================== Acked-by: David Ahern <dsahern@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ca46abd6f8
|
@ -1747,72 +1747,186 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
|
|||
}
|
||||
|
||||
static int
|
||||
mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
struct tc_cls_flower_offload *f,
|
||||
bool ingress)
|
||||
mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block *acl_block,
|
||||
struct tc_cls_flower_offload *f)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_acl_block_mlxsw_sp(acl_block);
|
||||
|
||||
switch (f->command) {
|
||||
case TC_CLSFLOWER_REPLACE:
|
||||
return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
|
||||
return mlxsw_sp_flower_replace(mlxsw_sp, acl_block, f);
|
||||
case TC_CLSFLOWER_DESTROY:
|
||||
mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
|
||||
mlxsw_sp_flower_destroy(mlxsw_sp, acl_block, f);
|
||||
return 0;
|
||||
case TC_CLSFLOWER_STATS:
|
||||
return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
|
||||
return mlxsw_sp_flower_stats(mlxsw_sp, acl_block, f);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
|
||||
void *cb_priv, bool ingress)
|
||||
static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
|
||||
void *type_data,
|
||||
void *cb_priv, bool ingress)
|
||||
{
|
||||
struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
|
||||
|
||||
if (!tc_can_offload(mlxsw_sp_port->dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_CLSMATCHALL:
|
||||
if (!tc_can_offload(mlxsw_sp_port->dev))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
|
||||
ingress);
|
||||
case TC_SETUP_CLSFLOWER:
|
||||
return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data,
|
||||
ingress);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type,
|
||||
void *type_data, void *cb_priv)
|
||||
static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
|
||||
void *type_data,
|
||||
void *cb_priv)
|
||||
{
|
||||
return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true);
|
||||
return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
|
||||
cb_priv, true);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type,
|
||||
void *type_data, void *cb_priv)
|
||||
static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
|
||||
void *type_data,
|
||||
void *cb_priv)
|
||||
{
|
||||
return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false);
|
||||
return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
|
||||
cb_priv, false);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
|
||||
void *type_data, void *cb_priv)
|
||||
{
|
||||
struct mlxsw_sp_acl_block *acl_block = cb_priv;
|
||||
|
||||
switch (type) {
|
||||
case TC_SETUP_CLSMATCHALL:
|
||||
return 0;
|
||||
case TC_SETUP_CLSFLOWER:
|
||||
if (mlxsw_sp_acl_block_disabled(acl_block))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return mlxsw_sp_setup_tc_cls_flower(acl_block, type_data);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
struct tcf_block *block, bool ingress)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
struct mlxsw_sp_acl_block *acl_block;
|
||||
struct tcf_block_cb *block_cb;
|
||||
int err;
|
||||
|
||||
block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
|
||||
mlxsw_sp);
|
||||
if (!block_cb) {
|
||||
acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, block->net);
|
||||
if (!acl_block)
|
||||
return -ENOMEM;
|
||||
block_cb = __tcf_block_cb_register(block,
|
||||
mlxsw_sp_setup_tc_block_cb_flower,
|
||||
mlxsw_sp, acl_block);
|
||||
if (IS_ERR(block_cb)) {
|
||||
err = PTR_ERR(block_cb);
|
||||
goto err_cb_register;
|
||||
}
|
||||
} else {
|
||||
acl_block = tcf_block_cb_priv(block_cb);
|
||||
}
|
||||
tcf_block_cb_incref(block_cb);
|
||||
err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
|
||||
mlxsw_sp_port, ingress);
|
||||
if (err)
|
||||
goto err_block_bind;
|
||||
|
||||
if (ingress)
|
||||
mlxsw_sp_port->ing_acl_block = acl_block;
|
||||
else
|
||||
mlxsw_sp_port->eg_acl_block = acl_block;
|
||||
|
||||
return 0;
|
||||
|
||||
err_block_bind:
|
||||
if (!tcf_block_cb_decref(block_cb)) {
|
||||
__tcf_block_cb_unregister(block_cb);
|
||||
err_cb_register:
|
||||
mlxsw_sp_acl_block_destroy(acl_block);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
struct tcf_block *block, bool ingress)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
struct mlxsw_sp_acl_block *acl_block;
|
||||
struct tcf_block_cb *block_cb;
|
||||
int err;
|
||||
|
||||
block_cb = tcf_block_cb_lookup(block, mlxsw_sp_setup_tc_block_cb_flower,
|
||||
mlxsw_sp);
|
||||
if (!block_cb)
|
||||
return;
|
||||
|
||||
if (ingress)
|
||||
mlxsw_sp_port->ing_acl_block = NULL;
|
||||
else
|
||||
mlxsw_sp_port->eg_acl_block = NULL;
|
||||
|
||||
acl_block = tcf_block_cb_priv(block_cb);
|
||||
err = mlxsw_sp_acl_block_unbind(mlxsw_sp, acl_block,
|
||||
mlxsw_sp_port, ingress);
|
||||
if (!err && !tcf_block_cb_decref(block_cb)) {
|
||||
__tcf_block_cb_unregister(block_cb);
|
||||
mlxsw_sp_acl_block_destroy(acl_block);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
struct tc_block_offload *f)
|
||||
{
|
||||
tc_setup_cb_t *cb;
|
||||
bool ingress;
|
||||
int err;
|
||||
|
||||
if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
|
||||
cb = mlxsw_sp_setup_tc_block_cb_ig;
|
||||
else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
|
||||
cb = mlxsw_sp_setup_tc_block_cb_eg;
|
||||
else
|
||||
if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
|
||||
cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
|
||||
ingress = true;
|
||||
} else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
|
||||
cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
|
||||
ingress = false;
|
||||
} else {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
switch (f->command) {
|
||||
case TC_BLOCK_BIND:
|
||||
return tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
|
||||
mlxsw_sp_port);
|
||||
err = tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
|
||||
mlxsw_sp_port);
|
||||
if (err)
|
||||
return err;
|
||||
err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port,
|
||||
f->block, ingress);
|
||||
if (err) {
|
||||
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
case TC_BLOCK_UNBIND:
|
||||
mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
|
||||
f->block, ingress);
|
||||
tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
|
||||
return 0;
|
||||
default:
|
||||
|
@ -1842,10 +1956,18 @@ static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
|
|||
{
|
||||
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
|
||||
|
||||
if (!enable && (mlxsw_sp_port->acl_rule_count ||
|
||||
!list_empty(&mlxsw_sp_port->mall_tc_list))) {
|
||||
netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
|
||||
return -EINVAL;
|
||||
if (!enable) {
|
||||
if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->ing_acl_block) ||
|
||||
mlxsw_sp_acl_block_rule_count(mlxsw_sp_port->eg_acl_block) ||
|
||||
!list_empty(&mlxsw_sp_port->mall_tc_list)) {
|
||||
netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->ing_acl_block);
|
||||
mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port->eg_acl_block);
|
||||
} else {
|
||||
mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->ing_acl_block);
|
||||
mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port->eg_acl_block);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -260,6 +260,8 @@ struct mlxsw_sp_port {
|
|||
struct list_head vlans_list;
|
||||
struct mlxsw_sp_qdisc *root_qdisc;
|
||||
unsigned acl_rule_count;
|
||||
struct mlxsw_sp_acl_block *ing_acl_block;
|
||||
struct mlxsw_sp_acl_block *eg_acl_block;
|
||||
};
|
||||
|
||||
static inline bool
|
||||
|
@ -468,8 +470,11 @@ struct mlxsw_sp_acl_profile_ops {
|
|||
void *priv, void *ruleset_priv);
|
||||
void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
|
||||
int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
|
||||
struct net_device *dev, bool ingress);
|
||||
void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
|
||||
struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool ingress);
|
||||
void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
|
||||
struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool ingress);
|
||||
u16 (*ruleset_group_id)(void *ruleset_priv);
|
||||
size_t rule_priv_size;
|
||||
int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
|
||||
|
@ -489,17 +494,34 @@ struct mlxsw_sp_acl_ops {
|
|||
enum mlxsw_sp_acl_profile profile);
|
||||
};
|
||||
|
||||
struct mlxsw_sp_acl_block;
|
||||
struct mlxsw_sp_acl_ruleset;
|
||||
|
||||
/* spectrum_acl.c */
|
||||
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
|
||||
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block);
|
||||
unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block);
|
||||
void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block);
|
||||
void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block);
|
||||
bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block);
|
||||
struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
|
||||
struct net *net);
|
||||
void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
|
||||
int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool ingress);
|
||||
int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool ingress);
|
||||
struct mlxsw_sp_acl_ruleset *
|
||||
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
|
||||
bool ingress, u32 chain_index,
|
||||
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block, u32 chain_index,
|
||||
enum mlxsw_sp_acl_profile profile);
|
||||
struct mlxsw_sp_acl_ruleset *
|
||||
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
|
||||
bool ingress, u32 chain_index,
|
||||
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block, u32 chain_index,
|
||||
enum mlxsw_sp_acl_profile profile);
|
||||
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_ruleset *ruleset);
|
||||
|
@ -566,11 +588,14 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
|
|||
extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
|
||||
|
||||
/* spectrum_flower.c */
|
||||
int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
||||
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f);
|
||||
void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
||||
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f);
|
||||
int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
||||
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f);
|
||||
|
||||
/* spectrum_qdisc.c */
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/tc_act/tc_vlan.h>
|
||||
|
||||
#include "reg.h"
|
||||
|
@ -70,9 +71,23 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
|
|||
return acl->afk;
|
||||
}
|
||||
|
||||
struct mlxsw_sp_acl_ruleset_ht_key {
|
||||
struct net_device *dev; /* dev this ruleset is bound to */
|
||||
struct mlxsw_sp_acl_block_binding {
|
||||
struct list_head list;
|
||||
struct net_device *dev;
|
||||
struct mlxsw_sp_port *mlxsw_sp_port;
|
||||
bool ingress;
|
||||
};
|
||||
|
||||
struct mlxsw_sp_acl_block {
|
||||
struct list_head binding_list;
|
||||
struct mlxsw_sp_acl_ruleset *ruleset_zero;
|
||||
struct mlxsw_sp *mlxsw_sp;
|
||||
unsigned int rule_count;
|
||||
unsigned int disable_count;
|
||||
};
|
||||
|
||||
struct mlxsw_sp_acl_ruleset_ht_key {
|
||||
struct mlxsw_sp_acl_block *block;
|
||||
u32 chain_index;
|
||||
const struct mlxsw_sp_acl_profile_ops *ops;
|
||||
};
|
||||
|
@ -118,8 +133,185 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
|
|||
return mlxsw_sp->acl->dummy_fid;
|
||||
}
|
||||
|
||||
struct mlxsw_sp *mlxsw_sp_acl_block_mlxsw_sp(struct mlxsw_sp_acl_block *block)
|
||||
{
|
||||
return block->mlxsw_sp;
|
||||
}
|
||||
|
||||
unsigned int mlxsw_sp_acl_block_rule_count(struct mlxsw_sp_acl_block *block)
|
||||
{
|
||||
return block ? block->rule_count : 0;
|
||||
}
|
||||
|
||||
void mlxsw_sp_acl_block_disable_inc(struct mlxsw_sp_acl_block *block)
|
||||
{
|
||||
if (block)
|
||||
block->disable_count++;
|
||||
}
|
||||
|
||||
void mlxsw_sp_acl_block_disable_dec(struct mlxsw_sp_acl_block *block)
|
||||
{
|
||||
if (block)
|
||||
block->disable_count--;
|
||||
}
|
||||
|
||||
bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
|
||||
{
|
||||
return block->disable_count;
|
||||
}
|
||||
|
||||
static int
|
||||
mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct mlxsw_sp_acl_block_binding *binding)
|
||||
{
|
||||
struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
|
||||
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
|
||||
|
||||
return ops->ruleset_bind(mlxsw_sp, ruleset->priv,
|
||||
binding->mlxsw_sp_port, binding->ingress);
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct mlxsw_sp_acl_block_binding *binding)
|
||||
{
|
||||
struct mlxsw_sp_acl_ruleset *ruleset = block->ruleset_zero;
|
||||
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
|
||||
|
||||
ops->ruleset_unbind(mlxsw_sp, ruleset->priv,
|
||||
binding->mlxsw_sp_port, binding->ingress);
|
||||
}
|
||||
|
||||
static bool mlxsw_sp_acl_ruleset_block_bound(struct mlxsw_sp_acl_block *block)
|
||||
{
|
||||
return block->ruleset_zero;
|
||||
}
|
||||
|
||||
static int
|
||||
mlxsw_sp_acl_ruleset_block_bind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_ruleset *ruleset,
|
||||
struct mlxsw_sp_acl_block *block)
|
||||
{
|
||||
struct mlxsw_sp_acl_block_binding *binding;
|
||||
int err;
|
||||
|
||||
block->ruleset_zero = ruleset;
|
||||
list_for_each_entry(binding, &block->binding_list, list) {
|
||||
err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
|
||||
if (err)
|
||||
goto rollback;
|
||||
}
|
||||
return 0;
|
||||
|
||||
rollback:
|
||||
list_for_each_entry_continue_reverse(binding, &block->binding_list,
|
||||
list)
|
||||
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
|
||||
block->ruleset_zero = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_acl_ruleset_block_unbind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_ruleset *ruleset,
|
||||
struct mlxsw_sp_acl_block *block)
|
||||
{
|
||||
struct mlxsw_sp_acl_block_binding *binding;
|
||||
|
||||
list_for_each_entry(binding, &block->binding_list, list)
|
||||
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
|
||||
block->ruleset_zero = NULL;
|
||||
}
|
||||
|
||||
struct mlxsw_sp_acl_block *mlxsw_sp_acl_block_create(struct mlxsw_sp *mlxsw_sp,
|
||||
struct net *net)
|
||||
{
|
||||
struct mlxsw_sp_acl_block *block;
|
||||
|
||||
block = kzalloc(sizeof(*block), GFP_KERNEL);
|
||||
if (!block)
|
||||
return NULL;
|
||||
INIT_LIST_HEAD(&block->binding_list);
|
||||
block->mlxsw_sp = mlxsw_sp;
|
||||
return block;
|
||||
}
|
||||
|
||||
void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block)
|
||||
{
|
||||
WARN_ON(!list_empty(&block->binding_list));
|
||||
kfree(block);
|
||||
}
|
||||
|
||||
static struct mlxsw_sp_acl_block_binding *
|
||||
mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
|
||||
struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
|
||||
{
|
||||
struct mlxsw_sp_acl_block_binding *binding;
|
||||
|
||||
list_for_each_entry(binding, &block->binding_list, list)
|
||||
if (binding->mlxsw_sp_port == mlxsw_sp_port &&
|
||||
binding->ingress == ingress)
|
||||
return binding;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool ingress)
|
||||
{
|
||||
struct mlxsw_sp_acl_block_binding *binding;
|
||||
int err;
|
||||
|
||||
if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
|
||||
return -EEXIST;
|
||||
|
||||
binding = kzalloc(sizeof(*binding), GFP_KERNEL);
|
||||
if (!binding)
|
||||
return -ENOMEM;
|
||||
binding->mlxsw_sp_port = mlxsw_sp_port;
|
||||
binding->ingress = ingress;
|
||||
|
||||
if (mlxsw_sp_acl_ruleset_block_bound(block)) {
|
||||
err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
|
||||
if (err)
|
||||
goto err_ruleset_bind;
|
||||
}
|
||||
|
||||
list_add(&binding->list, &block->binding_list);
|
||||
return 0;
|
||||
|
||||
err_ruleset_bind:
|
||||
kfree(binding);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool ingress)
|
||||
{
|
||||
struct mlxsw_sp_acl_block_binding *binding;
|
||||
|
||||
binding = mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress);
|
||||
if (!binding)
|
||||
return -ENOENT;
|
||||
|
||||
list_del(&binding->list);
|
||||
|
||||
if (mlxsw_sp_acl_ruleset_block_bound(block))
|
||||
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
|
||||
|
||||
kfree(binding);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct mlxsw_sp_acl_ruleset *
|
||||
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block, u32 chain_index,
|
||||
const struct mlxsw_sp_acl_profile_ops *ops)
|
||||
{
|
||||
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
|
||||
|
@ -132,6 +324,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
|
|||
if (!ruleset)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ruleset->ref_count = 1;
|
||||
ruleset->ht_key.block = block;
|
||||
ruleset->ht_key.chain_index = chain_index;
|
||||
ruleset->ht_key.ops = ops;
|
||||
|
||||
err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
|
||||
|
@ -142,8 +336,28 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
|
|||
if (err)
|
||||
goto err_ops_ruleset_add;
|
||||
|
||||
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
|
||||
mlxsw_sp_acl_ruleset_ht_params);
|
||||
if (err)
|
||||
goto err_ht_insert;
|
||||
|
||||
if (!chain_index) {
|
||||
/* We only need ruleset with chain index 0, the implicit one,
|
||||
* to be directly bound to device. The rest of the rulesets
|
||||
* are bound by "Goto action set".
|
||||
*/
|
||||
err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
|
||||
if (err)
|
||||
goto err_ruleset_bind;
|
||||
}
|
||||
|
||||
return ruleset;
|
||||
|
||||
err_ruleset_bind:
|
||||
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
|
||||
mlxsw_sp_acl_ruleset_ht_params);
|
||||
err_ht_insert:
|
||||
ops->ruleset_del(mlxsw_sp, ruleset->priv);
|
||||
err_ops_ruleset_add:
|
||||
rhashtable_destroy(&ruleset->rule_ht);
|
||||
err_rhashtable_init:
|
||||
|
@ -155,57 +369,19 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
|
|||
struct mlxsw_sp_acl_ruleset *ruleset)
|
||||
{
|
||||
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
|
||||
struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
|
||||
u32 chain_index = ruleset->ht_key.chain_index;
|
||||
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
|
||||
|
||||
if (!chain_index)
|
||||
mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
|
||||
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
|
||||
mlxsw_sp_acl_ruleset_ht_params);
|
||||
ops->ruleset_del(mlxsw_sp, ruleset->priv);
|
||||
rhashtable_destroy(&ruleset->rule_ht);
|
||||
kfree(ruleset);
|
||||
}
|
||||
|
||||
static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_ruleset *ruleset,
|
||||
struct net_device *dev, bool ingress,
|
||||
u32 chain_index)
|
||||
{
|
||||
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
|
||||
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
|
||||
int err;
|
||||
|
||||
ruleset->ht_key.dev = dev;
|
||||
ruleset->ht_key.ingress = ingress;
|
||||
ruleset->ht_key.chain_index = chain_index;
|
||||
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
|
||||
mlxsw_sp_acl_ruleset_ht_params);
|
||||
if (err)
|
||||
return err;
|
||||
if (!ruleset->ht_key.chain_index) {
|
||||
/* We only need ruleset with chain index 0, the implicit one,
|
||||
* to be directly bound to device. The rest of the rulesets
|
||||
* are bound by "Goto action set".
|
||||
*/
|
||||
err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
|
||||
if (err)
|
||||
goto err_ops_ruleset_bind;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_ops_ruleset_bind:
|
||||
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
|
||||
mlxsw_sp_acl_ruleset_ht_params);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_ruleset *ruleset)
|
||||
{
|
||||
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
|
||||
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
|
||||
|
||||
if (!ruleset->ht_key.chain_index)
|
||||
ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
|
||||
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
|
||||
mlxsw_sp_acl_ruleset_ht_params);
|
||||
}
|
||||
|
||||
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
|
||||
{
|
||||
ruleset->ref_count++;
|
||||
|
@ -216,20 +392,18 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
|
|||
{
|
||||
if (--ruleset->ref_count)
|
||||
return;
|
||||
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
|
||||
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
|
||||
}
|
||||
|
||||
static struct mlxsw_sp_acl_ruleset *
|
||||
__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
|
||||
bool ingress, u32 chain_index,
|
||||
__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl,
|
||||
struct mlxsw_sp_acl_block *block, u32 chain_index,
|
||||
const struct mlxsw_sp_acl_profile_ops *ops)
|
||||
{
|
||||
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
|
||||
|
||||
memset(&ht_key, 0, sizeof(ht_key));
|
||||
ht_key.dev = dev;
|
||||
ht_key.ingress = ingress;
|
||||
ht_key.block = block;
|
||||
ht_key.chain_index = chain_index;
|
||||
ht_key.ops = ops;
|
||||
return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
|
||||
|
@ -237,8 +411,8 @@ __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
|
|||
}
|
||||
|
||||
struct mlxsw_sp_acl_ruleset *
|
||||
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
|
||||
bool ingress, u32 chain_index,
|
||||
mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block, u32 chain_index,
|
||||
enum mlxsw_sp_acl_profile profile)
|
||||
{
|
||||
const struct mlxsw_sp_acl_profile_ops *ops;
|
||||
|
@ -248,45 +422,31 @@ mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
|
|||
ops = acl->ops->profile_ops(mlxsw_sp, profile);
|
||||
if (!ops)
|
||||
return ERR_PTR(-EINVAL);
|
||||
ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
|
||||
chain_index, ops);
|
||||
ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
|
||||
if (!ruleset)
|
||||
return ERR_PTR(-ENOENT);
|
||||
return ruleset;
|
||||
}
|
||||
|
||||
struct mlxsw_sp_acl_ruleset *
|
||||
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
|
||||
bool ingress, u32 chain_index,
|
||||
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block, u32 chain_index,
|
||||
enum mlxsw_sp_acl_profile profile)
|
||||
{
|
||||
const struct mlxsw_sp_acl_profile_ops *ops;
|
||||
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
|
||||
struct mlxsw_sp_acl_ruleset *ruleset;
|
||||
int err;
|
||||
|
||||
ops = acl->ops->profile_ops(mlxsw_sp, profile);
|
||||
if (!ops)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
|
||||
chain_index, ops);
|
||||
ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, block, chain_index, ops);
|
||||
if (ruleset) {
|
||||
mlxsw_sp_acl_ruleset_ref_inc(ruleset);
|
||||
return ruleset;
|
||||
}
|
||||
ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
|
||||
if (IS_ERR(ruleset))
|
||||
return ruleset;
|
||||
err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev,
|
||||
ingress, chain_index);
|
||||
if (err)
|
||||
goto err_ruleset_bind;
|
||||
return ruleset;
|
||||
|
||||
err_ruleset_bind:
|
||||
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
|
||||
return ERR_PTR(err);
|
||||
return mlxsw_sp_acl_ruleset_create(mlxsw_sp, block, chain_index, ops);
|
||||
}
|
||||
|
||||
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
|
||||
|
@ -535,6 +695,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
|
|||
goto err_rhashtable_insert;
|
||||
|
||||
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
|
||||
ruleset->ht_key.block->rule_count++;
|
||||
return 0;
|
||||
|
||||
err_rhashtable_insert:
|
||||
|
@ -548,6 +709,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
|
|||
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
|
||||
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
|
||||
|
||||
ruleset->ht_key.block->rule_count--;
|
||||
list_del(&rule->list);
|
||||
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
|
||||
mlxsw_sp_acl_rule_ht_params);
|
||||
|
|
|
@ -154,10 +154,6 @@ struct mlxsw_sp_acl_tcam_group {
|
|||
struct list_head region_list;
|
||||
unsigned int region_count;
|
||||
struct rhashtable chunk_ht;
|
||||
struct {
|
||||
u16 local_port;
|
||||
bool ingress;
|
||||
} bound;
|
||||
struct mlxsw_sp_acl_tcam_group_ops *ops;
|
||||
const struct mlxsw_sp_acl_tcam_pattern *patterns;
|
||||
unsigned int patterns_count;
|
||||
|
@ -262,35 +258,29 @@ static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
|
|||
static int
|
||||
mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_tcam_group *group,
|
||||
struct net_device *dev, bool ingress)
|
||||
struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool ingress)
|
||||
{
|
||||
struct mlxsw_sp_port *mlxsw_sp_port;
|
||||
char ppbt_pl[MLXSW_REG_PPBT_LEN];
|
||||
|
||||
if (!mlxsw_sp_port_dev_check(dev))
|
||||
return -EINVAL;
|
||||
|
||||
mlxsw_sp_port = netdev_priv(dev);
|
||||
group->bound.local_port = mlxsw_sp_port->local_port;
|
||||
group->bound.ingress = ingress;
|
||||
mlxsw_reg_ppbt_pack(ppbt_pl,
|
||||
group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
|
||||
MLXSW_REG_PXBT_E_EACL,
|
||||
MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
|
||||
mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
|
||||
MLXSW_REG_PXBT_E_EACL,
|
||||
MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
|
||||
group->id);
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_tcam_group *group)
|
||||
struct mlxsw_sp_acl_tcam_group *group,
|
||||
struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool ingress)
|
||||
{
|
||||
char ppbt_pl[MLXSW_REG_PPBT_LEN];
|
||||
|
||||
mlxsw_reg_ppbt_pack(ppbt_pl,
|
||||
group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
|
||||
MLXSW_REG_PXBT_E_EACL,
|
||||
MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
|
||||
mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
|
||||
MLXSW_REG_PXBT_E_EACL,
|
||||
MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
|
||||
group->id);
|
||||
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
|
||||
}
|
||||
|
@ -1056,21 +1046,25 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
|
|||
static int
|
||||
mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
|
||||
void *ruleset_priv,
|
||||
struct net_device *dev, bool ingress)
|
||||
struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool ingress)
|
||||
{
|
||||
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
|
||||
|
||||
return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
|
||||
dev, ingress);
|
||||
mlxsw_sp_port, ingress);
|
||||
}
|
||||
|
||||
static void
|
||||
mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
|
||||
void *ruleset_priv)
|
||||
void *ruleset_priv,
|
||||
struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
bool ingress)
|
||||
{
|
||||
struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
|
||||
|
||||
mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
|
||||
mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
|
||||
mlxsw_sp_port, ingress);
|
||||
}
|
||||
|
||||
static u16
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/flow_dissector.h>
|
||||
#include <net/pkt_cls.h>
|
||||
#include <net/tc_act/tc_gact.h>
|
||||
|
@ -45,7 +46,7 @@
|
|||
#include "core_acl_flex_keys.h"
|
||||
|
||||
static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
|
||||
struct net_device *dev, bool ingress,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct mlxsw_sp_acl_rule_info *rulei,
|
||||
struct tcf_exts *exts)
|
||||
{
|
||||
|
@ -80,8 +81,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
|
|||
struct mlxsw_sp_acl_ruleset *ruleset;
|
||||
u16 group_id;
|
||||
|
||||
ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
|
||||
ingress,
|
||||
ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
|
||||
chain_index,
|
||||
MLXSW_SP_ACL_PROFILE_FLOWER);
|
||||
if (IS_ERR(ruleset))
|
||||
|
@ -104,9 +104,6 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
|
|||
return err;
|
||||
|
||||
out_dev = tcf_mirred_dev(a);
|
||||
if (out_dev == dev)
|
||||
out_dev = NULL;
|
||||
|
||||
err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
|
||||
out_dev);
|
||||
if (err)
|
||||
|
@ -265,7 +262,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
|
|||
}
|
||||
|
||||
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
|
||||
struct net_device *dev, bool ingress,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct mlxsw_sp_acl_rule_info *rulei,
|
||||
struct tc_cls_flower_offload *f)
|
||||
{
|
||||
|
@ -383,21 +380,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
|
||||
rulei, f->exts);
|
||||
return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
|
||||
}
|
||||
|
||||
int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
||||
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
struct net_device *dev = mlxsw_sp_port->dev;
|
||||
struct mlxsw_sp_acl_rule_info *rulei;
|
||||
struct mlxsw_sp_acl_ruleset *ruleset;
|
||||
struct mlxsw_sp_acl_rule *rule;
|
||||
int err;
|
||||
|
||||
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
|
||||
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
|
||||
f->common.chain_index,
|
||||
MLXSW_SP_ACL_PROFILE_FLOWER);
|
||||
if (IS_ERR(ruleset))
|
||||
|
@ -410,7 +405,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
|||
}
|
||||
|
||||
rulei = mlxsw_sp_acl_rule_rulei(rule);
|
||||
err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
|
||||
err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
|
||||
if (err)
|
||||
goto err_flower_parse;
|
||||
|
||||
|
@ -423,7 +418,6 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
|||
goto err_rule_add;
|
||||
|
||||
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
|
||||
mlxsw_sp_port->acl_rule_count++;
|
||||
return 0;
|
||||
|
||||
err_rule_add:
|
||||
|
@ -435,15 +429,15 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
|||
return err;
|
||||
}
|
||||
|
||||
void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
||||
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
struct mlxsw_sp_acl_ruleset *ruleset;
|
||||
struct mlxsw_sp_acl_rule *rule;
|
||||
|
||||
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
|
||||
ingress, f->common.chain_index,
|
||||
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
|
||||
f->common.chain_index,
|
||||
MLXSW_SP_ACL_PROFILE_FLOWER);
|
||||
if (IS_ERR(ruleset))
|
||||
return;
|
||||
|
@ -455,13 +449,12 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
|||
}
|
||||
|
||||
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
|
||||
mlxsw_sp_port->acl_rule_count--;
|
||||
}
|
||||
|
||||
int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
||||
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
|
||||
struct mlxsw_sp_acl_block *block,
|
||||
struct tc_cls_flower_offload *f)
|
||||
{
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
struct mlxsw_sp_acl_ruleset *ruleset;
|
||||
struct mlxsw_sp_acl_rule *rule;
|
||||
u64 packets;
|
||||
|
@ -469,8 +462,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
|||
u64 bytes;
|
||||
int err;
|
||||
|
||||
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
|
||||
ingress, f->common.chain_index,
|
||||
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
|
||||
f->common.chain_index,
|
||||
MLXSW_SP_ACL_PROFILE_FLOWER);
|
||||
if (WARN_ON(IS_ERR(ruleset)))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -29,6 +29,7 @@ struct tcf_block_ext_info {
|
|||
enum tcf_block_binder_type binder_type;
|
||||
tcf_chain_head_change_t *chain_head_change;
|
||||
void *chain_head_change_priv;
|
||||
u32 block_index;
|
||||
};
|
||||
|
||||
struct tcf_block_cb;
|
||||
|
@ -38,6 +39,7 @@ bool tcf_queue_work(struct work_struct *work);
|
|||
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
|
||||
bool create);
|
||||
void tcf_chain_put(struct tcf_chain *chain);
|
||||
void tcf_block_netif_keep_dst(struct tcf_block *block);
|
||||
int tcf_block_get(struct tcf_block **p_block,
|
||||
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
|
||||
struct netlink_ext_ack *extack);
|
||||
|
@ -48,8 +50,14 @@ void tcf_block_put(struct tcf_block *block);
|
|||
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
|
||||
struct tcf_block_ext_info *ei);
|
||||
|
||||
static inline bool tcf_block_shared(struct tcf_block *block)
|
||||
{
|
||||
return block->index;
|
||||
}
|
||||
|
||||
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
|
||||
{
|
||||
WARN_ON(tcf_block_shared(block));
|
||||
return block->q;
|
||||
}
|
||||
|
||||
|
|
|
@ -204,6 +204,13 @@ struct Qdisc_ops {
|
|||
int (*dump)(struct Qdisc *, struct sk_buff *);
|
||||
int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
|
||||
|
||||
void (*ingress_block_set)(struct Qdisc *sch,
|
||||
u32 block_index);
|
||||
void (*egress_block_set)(struct Qdisc *sch,
|
||||
u32 block_index);
|
||||
u32 (*ingress_block_get)(struct Qdisc *sch);
|
||||
u32 (*egress_block_get)(struct Qdisc *sch);
|
||||
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
|
@ -255,8 +262,6 @@ struct tcf_proto {
|
|||
|
||||
/* All the rest */
|
||||
u32 prio;
|
||||
u32 classid;
|
||||
struct Qdisc *q;
|
||||
void *data;
|
||||
const struct tcf_proto_ops *ops;
|
||||
struct tcf_chain *chain;
|
||||
|
@ -275,8 +280,7 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
|
|||
|
||||
struct tcf_chain {
|
||||
struct tcf_proto __rcu *filter_chain;
|
||||
tcf_chain_head_change_t *chain_head_change;
|
||||
void *chain_head_change_priv;
|
||||
struct list_head filter_chain_list;
|
||||
struct list_head list;
|
||||
struct tcf_block *block;
|
||||
u32 index; /* chain index */
|
||||
|
@ -285,11 +289,33 @@ struct tcf_chain {
|
|||
|
||||
struct tcf_block {
|
||||
struct list_head chain_list;
|
||||
u32 index; /* block index for shared blocks */
|
||||
unsigned int refcnt;
|
||||
struct net *net;
|
||||
struct Qdisc *q;
|
||||
struct list_head cb_list;
|
||||
struct list_head owner_list;
|
||||
bool keep_dst;
|
||||
unsigned int offloadcnt; /* Number of oddloaded filters */
|
||||
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
|
||||
};
|
||||
|
||||
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
|
||||
{
|
||||
if (*flags & TCA_CLS_FLAGS_IN_HW)
|
||||
return;
|
||||
*flags |= TCA_CLS_FLAGS_IN_HW;
|
||||
block->offloadcnt++;
|
||||
}
|
||||
|
||||
static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
|
||||
{
|
||||
if (!(*flags & TCA_CLS_FLAGS_IN_HW))
|
||||
return;
|
||||
*flags &= ~TCA_CLS_FLAGS_IN_HW;
|
||||
block->offloadcnt--;
|
||||
}
|
||||
|
||||
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
|
||||
{
|
||||
struct qdisc_skb_cb *qcb;
|
||||
|
|
|
@ -541,9 +541,19 @@ struct tcmsg {
|
|||
int tcm_ifindex;
|
||||
__u32 tcm_handle;
|
||||
__u32 tcm_parent;
|
||||
/* tcm_block_index is used instead of tcm_parent
|
||||
* in case tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK
|
||||
*/
|
||||
#define tcm_block_index tcm_parent
|
||||
__u32 tcm_info;
|
||||
};
|
||||
|
||||
/* For manipulation of filters in shared block, tcm_ifindex is set to
|
||||
* TCM_IFINDEX_MAGIC_BLOCK, and tcm_parent is aliased to tcm_block_index
|
||||
* which is the block index.
|
||||
*/
|
||||
#define TCM_IFINDEX_MAGIC_BLOCK (0xFFFFFFFFU)
|
||||
|
||||
enum {
|
||||
TCA_UNSPEC,
|
||||
TCA_KIND,
|
||||
|
@ -558,6 +568,8 @@ enum {
|
|||
TCA_DUMP_INVISIBLE,
|
||||
TCA_CHAIN,
|
||||
TCA_HW_OFFLOAD,
|
||||
TCA_INGRESS_BLOCK,
|
||||
TCA_EGRESS_BLOCK,
|
||||
__TCA_MAX
|
||||
};
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/idr.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/netlink.h>
|
||||
|
@ -121,8 +122,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
|
|||
}
|
||||
|
||||
static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
|
||||
u32 prio, u32 parent, struct Qdisc *q,
|
||||
struct tcf_chain *chain)
|
||||
u32 prio, struct tcf_chain *chain)
|
||||
{
|
||||
struct tcf_proto *tp;
|
||||
int err;
|
||||
|
@ -156,8 +156,6 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
|
|||
tp->classify = tp->ops->classify;
|
||||
tp->protocol = protocol;
|
||||
tp->prio = prio;
|
||||
tp->classid = parent;
|
||||
tp->q = q;
|
||||
tp->chain = chain;
|
||||
|
||||
err = tp->ops->init(tp);
|
||||
|
@ -179,6 +177,12 @@ static void tcf_proto_destroy(struct tcf_proto *tp)
|
|||
kfree_rcu(tp, rcu);
|
||||
}
|
||||
|
||||
struct tcf_filter_chain_list_item {
|
||||
struct list_head list;
|
||||
tcf_chain_head_change_t *chain_head_change;
|
||||
void *chain_head_change_priv;
|
||||
};
|
||||
|
||||
static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
|
||||
u32 chain_index)
|
||||
{
|
||||
|
@ -187,6 +191,7 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
|
|||
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
|
||||
if (!chain)
|
||||
return NULL;
|
||||
INIT_LIST_HEAD(&chain->filter_chain_list);
|
||||
list_add_tail(&chain->list, &block->chain_list);
|
||||
chain->block = block;
|
||||
chain->index = chain_index;
|
||||
|
@ -194,12 +199,19 @@ static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
|
|||
return chain;
|
||||
}
|
||||
|
||||
static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
|
||||
struct tcf_proto *tp_head)
|
||||
{
|
||||
if (item->chain_head_change)
|
||||
item->chain_head_change(tp_head, item->chain_head_change_priv);
|
||||
}
|
||||
static void tcf_chain_head_change(struct tcf_chain *chain,
|
||||
struct tcf_proto *tp_head)
|
||||
{
|
||||
if (chain->chain_head_change)
|
||||
chain->chain_head_change(tp_head,
|
||||
chain->chain_head_change_priv);
|
||||
struct tcf_filter_chain_list_item *item;
|
||||
|
||||
list_for_each_entry(item, &chain->filter_chain_list, list)
|
||||
tcf_chain_head_change_item(item, tp_head);
|
||||
}
|
||||
|
||||
static void tcf_chain_flush(struct tcf_chain *chain)
|
||||
|
@ -253,47 +265,149 @@ void tcf_chain_put(struct tcf_chain *chain)
|
|||
}
|
||||
EXPORT_SYMBOL(tcf_chain_put);
|
||||
|
||||
static void tcf_block_offload_cmd(struct tcf_block *block, struct Qdisc *q,
|
||||
struct tcf_block_ext_info *ei,
|
||||
enum tc_block_command command)
|
||||
static bool tcf_block_offload_in_use(struct tcf_block *block)
|
||||
{
|
||||
return block->offloadcnt;
|
||||
}
|
||||
|
||||
static int tcf_block_offload_cmd(struct tcf_block *block,
|
||||
struct net_device *dev,
|
||||
struct tcf_block_ext_info *ei,
|
||||
enum tc_block_command command)
|
||||
{
|
||||
struct net_device *dev = q->dev_queue->dev;
|
||||
struct tc_block_offload bo = {};
|
||||
|
||||
if (!dev->netdev_ops->ndo_setup_tc)
|
||||
return;
|
||||
bo.command = command;
|
||||
bo.binder_type = ei->binder_type;
|
||||
bo.block = block;
|
||||
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
|
||||
return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
|
||||
}
|
||||
|
||||
static void tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
|
||||
struct tcf_block_ext_info *ei)
|
||||
static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
|
||||
struct tcf_block_ext_info *ei)
|
||||
{
|
||||
tcf_block_offload_cmd(block, q, ei, TC_BLOCK_BIND);
|
||||
struct net_device *dev = q->dev_queue->dev;
|
||||
int err;
|
||||
|
||||
if (!dev->netdev_ops->ndo_setup_tc)
|
||||
goto no_offload_dev_inc;
|
||||
|
||||
/* If tc offload feature is disabled and the block we try to bind
|
||||
* to already has some offloaded filters, forbid to bind.
|
||||
*/
|
||||
if (!tc_can_offload(dev) && tcf_block_offload_in_use(block))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND);
|
||||
if (err == -EOPNOTSUPP)
|
||||
goto no_offload_dev_inc;
|
||||
return err;
|
||||
|
||||
no_offload_dev_inc:
|
||||
if (tcf_block_offload_in_use(block))
|
||||
return -EOPNOTSUPP;
|
||||
block->nooffloaddevcnt++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
|
||||
struct tcf_block_ext_info *ei)
|
||||
{
|
||||
tcf_block_offload_cmd(block, q, ei, TC_BLOCK_UNBIND);
|
||||
struct net_device *dev = q->dev_queue->dev;
|
||||
int err;
|
||||
|
||||
if (!dev->netdev_ops->ndo_setup_tc)
|
||||
goto no_offload_dev_dec;
|
||||
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND);
|
||||
if (err == -EOPNOTSUPP)
|
||||
goto no_offload_dev_dec;
|
||||
return;
|
||||
|
||||
no_offload_dev_dec:
|
||||
WARN_ON(block->nooffloaddevcnt-- == 0);
|
||||
}
|
||||
|
||||
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
|
||||
struct tcf_block_ext_info *ei,
|
||||
struct netlink_ext_ack *extack)
|
||||
static int
|
||||
tcf_chain_head_change_cb_add(struct tcf_chain *chain,
|
||||
struct tcf_block_ext_info *ei,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
|
||||
struct tcf_filter_chain_list_item *item;
|
||||
|
||||
item = kmalloc(sizeof(*item), GFP_KERNEL);
|
||||
if (!item) {
|
||||
NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
|
||||
return -ENOMEM;
|
||||
}
|
||||
item->chain_head_change = ei->chain_head_change;
|
||||
item->chain_head_change_priv = ei->chain_head_change_priv;
|
||||
if (chain->filter_chain)
|
||||
tcf_chain_head_change_item(item, chain->filter_chain);
|
||||
list_add(&item->list, &chain->filter_chain_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
tcf_chain_head_change_cb_del(struct tcf_chain *chain,
|
||||
struct tcf_block_ext_info *ei)
|
||||
{
|
||||
struct tcf_filter_chain_list_item *item;
|
||||
|
||||
list_for_each_entry(item, &chain->filter_chain_list, list) {
|
||||
if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
|
||||
(item->chain_head_change == ei->chain_head_change &&
|
||||
item->chain_head_change_priv == ei->chain_head_change_priv)) {
|
||||
tcf_chain_head_change_item(item, NULL);
|
||||
list_del(&item->list);
|
||||
kfree(item);
|
||||
return;
|
||||
}
|
||||
}
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
struct tcf_net {
|
||||
struct idr idr;
|
||||
};
|
||||
|
||||
static unsigned int tcf_net_id;
|
||||
|
||||
static int tcf_block_insert(struct tcf_block *block, struct net *net,
|
||||
u32 block_index, struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tcf_net *tn = net_generic(net, tcf_net_id);
|
||||
int err;
|
||||
|
||||
err = idr_alloc_ext(&tn->idr, block, NULL, block_index,
|
||||
block_index + 1, GFP_KERNEL);
|
||||
if (err)
|
||||
return err;
|
||||
block->index = block_index;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tcf_block_remove(struct tcf_block *block, struct net *net)
|
||||
{
|
||||
struct tcf_net *tn = net_generic(net, tcf_net_id);
|
||||
|
||||
idr_remove_ext(&tn->idr, block->index);
|
||||
}
|
||||
|
||||
static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct tcf_block *block;
|
||||
struct tcf_chain *chain;
|
||||
int err;
|
||||
|
||||
block = kzalloc(sizeof(*block), GFP_KERNEL);
|
||||
if (!block) {
|
||||
NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
INIT_LIST_HEAD(&block->chain_list);
|
||||
INIT_LIST_HEAD(&block->cb_list);
|
||||
INIT_LIST_HEAD(&block->owner_list);
|
||||
|
||||
/* Create chain 0 by default, it has to be always present. */
|
||||
chain = tcf_chain_create(block, 0);
|
||||
|
@ -302,17 +416,149 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
|
|||
err = -ENOMEM;
|
||||
goto err_chain_create;
|
||||
}
|
||||
WARN_ON(!ei->chain_head_change);
|
||||
chain->chain_head_change = ei->chain_head_change;
|
||||
chain->chain_head_change_priv = ei->chain_head_change_priv;
|
||||
block->net = qdisc_net(q);
|
||||
block->refcnt = 1;
|
||||
block->net = net;
|
||||
block->q = q;
|
||||
tcf_block_offload_bind(block, q, ei);
|
||||
*p_block = block;
|
||||
return 0;
|
||||
return block;
|
||||
|
||||
err_chain_create:
|
||||
kfree(block);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
|
||||
{
|
||||
struct tcf_net *tn = net_generic(net, tcf_net_id);
|
||||
|
||||
return idr_find_ext(&tn->idr, block_index);
|
||||
}
|
||||
|
||||
static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block)
|
||||
{
|
||||
return list_first_entry(&block->chain_list, struct tcf_chain, list);
|
||||
}
|
||||
|
||||
struct tcf_block_owner_item {
|
||||
struct list_head list;
|
||||
struct Qdisc *q;
|
||||
enum tcf_block_binder_type binder_type;
|
||||
};
|
||||
|
||||
static void
|
||||
tcf_block_owner_netif_keep_dst(struct tcf_block *block,
|
||||
struct Qdisc *q,
|
||||
enum tcf_block_binder_type binder_type)
|
||||
{
|
||||
if (block->keep_dst &&
|
||||
binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
|
||||
binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
|
||||
netif_keep_dst(qdisc_dev(q));
|
||||
}
|
||||
|
||||
void tcf_block_netif_keep_dst(struct tcf_block *block)
|
||||
{
|
||||
struct tcf_block_owner_item *item;
|
||||
|
||||
block->keep_dst = true;
|
||||
list_for_each_entry(item, &block->owner_list, list)
|
||||
tcf_block_owner_netif_keep_dst(block, item->q,
|
||||
item->binder_type);
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_netif_keep_dst);
|
||||
|
||||
static int tcf_block_owner_add(struct tcf_block *block,
|
||||
struct Qdisc *q,
|
||||
enum tcf_block_binder_type binder_type)
|
||||
{
|
||||
struct tcf_block_owner_item *item;
|
||||
|
||||
item = kmalloc(sizeof(*item), GFP_KERNEL);
|
||||
if (!item)
|
||||
return -ENOMEM;
|
||||
item->q = q;
|
||||
item->binder_type = binder_type;
|
||||
list_add(&item->list, &block->owner_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tcf_block_owner_del(struct tcf_block *block,
|
||||
struct Qdisc *q,
|
||||
enum tcf_block_binder_type binder_type)
|
||||
{
|
||||
struct tcf_block_owner_item *item;
|
||||
|
||||
list_for_each_entry(item, &block->owner_list, list) {
|
||||
if (item->q == q && item->binder_type == binder_type) {
|
||||
list_del(&item->list);
|
||||
kfree(item);
|
||||
return;
|
||||
}
|
||||
}
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
|
||||
struct tcf_block_ext_info *ei,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct net *net = qdisc_net(q);
|
||||
struct tcf_block *block = NULL;
|
||||
bool created = false;
|
||||
int err;
|
||||
|
||||
if (ei->block_index) {
|
||||
/* block_index not 0 means the shared block is requested */
|
||||
block = tcf_block_lookup(net, ei->block_index);
|
||||
if (block)
|
||||
block->refcnt++;
|
||||
}
|
||||
|
||||
if (!block) {
|
||||
block = tcf_block_create(net, q, extack);
|
||||
if (IS_ERR(block))
|
||||
return PTR_ERR(block);
|
||||
created = true;
|
||||
if (ei->block_index) {
|
||||
err = tcf_block_insert(block, net,
|
||||
ei->block_index, extack);
|
||||
if (err)
|
||||
goto err_block_insert;
|
||||
}
|
||||
}
|
||||
|
||||
err = tcf_block_owner_add(block, q, ei->binder_type);
|
||||
if (err)
|
||||
goto err_block_owner_add;
|
||||
|
||||
tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
|
||||
|
||||
err = tcf_chain_head_change_cb_add(tcf_block_chain_zero(block),
|
||||
ei, extack);
|
||||
if (err)
|
||||
goto err_chain_head_change_cb_add;
|
||||
|
||||
err = tcf_block_offload_bind(block, q, ei);
|
||||
if (err)
|
||||
goto err_block_offload_bind;
|
||||
|
||||
*p_block = block;
|
||||
return 0;
|
||||
|
||||
err_block_offload_bind:
|
||||
tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
|
||||
err_chain_head_change_cb_add:
|
||||
tcf_block_owner_del(block, q, ei->binder_type);
|
||||
err_block_owner_add:
|
||||
if (created) {
|
||||
if (tcf_block_shared(block))
|
||||
tcf_block_remove(block, net);
|
||||
err_block_insert:
|
||||
kfree(tcf_block_chain_zero(block));
|
||||
kfree(block);
|
||||
} else {
|
||||
block->refcnt--;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_get_ext);
|
||||
|
@ -346,26 +592,35 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
|
|||
{
|
||||
struct tcf_chain *chain, *tmp;
|
||||
|
||||
/* Hold a refcnt for all chains, so that they don't disappear
|
||||
* while we are iterating.
|
||||
*/
|
||||
if (!block)
|
||||
return;
|
||||
list_for_each_entry(chain, &block->chain_list, list)
|
||||
tcf_chain_hold(chain);
|
||||
tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
|
||||
tcf_block_owner_del(block, q, ei->binder_type);
|
||||
|
||||
list_for_each_entry(chain, &block->chain_list, list)
|
||||
tcf_chain_flush(chain);
|
||||
if (--block->refcnt == 0) {
|
||||
if (tcf_block_shared(block))
|
||||
tcf_block_remove(block, block->net);
|
||||
|
||||
/* Hold a refcnt for all chains, so that they don't disappear
|
||||
* while we are iterating.
|
||||
*/
|
||||
list_for_each_entry(chain, &block->chain_list, list)
|
||||
tcf_chain_hold(chain);
|
||||
|
||||
list_for_each_entry(chain, &block->chain_list, list)
|
||||
tcf_chain_flush(chain);
|
||||
}
|
||||
|
||||
tcf_block_offload_unbind(block, q, ei);
|
||||
|
||||
/* At this point, all the chains should have refcnt >= 1. */
|
||||
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
|
||||
tcf_chain_put(chain);
|
||||
if (block->refcnt == 0) {
|
||||
/* At this point, all the chains should have refcnt >= 1. */
|
||||
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
|
||||
tcf_chain_put(chain);
|
||||
|
||||
/* Finally, put chain 0 and allow block to be freed. */
|
||||
chain = list_first_entry(&block->chain_list, struct tcf_chain, list);
|
||||
tcf_chain_put(chain);
|
||||
/* Finally, put chain 0 and allow block to be freed. */
|
||||
tcf_chain_put(tcf_block_chain_zero(block));
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_put_ext);
|
||||
|
||||
|
@ -423,9 +678,16 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
|
|||
{
|
||||
struct tcf_block_cb *block_cb;
|
||||
|
||||
/* At this point, playback of previous block cb calls is not supported,
|
||||
* so forbid to register to block which already has some offloaded
|
||||
* filters present.
|
||||
*/
|
||||
if (tcf_block_offload_in_use(block))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
|
||||
if (!block_cb)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
block_cb->cb = cb;
|
||||
block_cb->cb_ident = cb_ident;
|
||||
block_cb->cb_priv = cb_priv;
|
||||
|
@ -441,7 +703,7 @@ int tcf_block_cb_register(struct tcf_block *block,
|
|||
struct tcf_block_cb *block_cb;
|
||||
|
||||
block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
|
||||
return block_cb ? 0 : -ENOMEM;
|
||||
return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_block_cb_register);
|
||||
|
||||
|
@ -471,6 +733,10 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
|
|||
int ok_count = 0;
|
||||
int err;
|
||||
|
||||
/* Make sure all netdevs sharing this block are offload-capable. */
|
||||
if (block->nooffloaddevcnt && err_stop)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
list_for_each_entry(block_cb, &block->cb_list, list) {
|
||||
err = block_cb->cb(type, type_data, block_cb->cb_priv);
|
||||
if (err) {
|
||||
|
@ -524,8 +790,9 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
|||
#ifdef CONFIG_NET_CLS_ACT
|
||||
reset:
|
||||
if (unlikely(limit++ >= max_reclassify_loop)) {
|
||||
net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
|
||||
tp->q->ops->id, tp->prio & 0xffff,
|
||||
net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
|
||||
tp->chain->block->index,
|
||||
tp->prio & 0xffff,
|
||||
ntohs(tp->protocol));
|
||||
return TC_ACT_SHOT;
|
||||
}
|
||||
|
@ -598,8 +865,9 @@ static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
|
|||
}
|
||||
|
||||
static int tcf_fill_node(struct net *net, struct sk_buff *skb,
|
||||
struct tcf_proto *tp, struct Qdisc *q, u32 parent,
|
||||
void *fh, u32 portid, u32 seq, u16 flags, int event)
|
||||
struct tcf_proto *tp, struct tcf_block *block,
|
||||
struct Qdisc *q, u32 parent, void *fh,
|
||||
u32 portid, u32 seq, u16 flags, int event)
|
||||
{
|
||||
struct tcmsg *tcm;
|
||||
struct nlmsghdr *nlh;
|
||||
|
@ -612,8 +880,13 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
|
|||
tcm->tcm_family = AF_UNSPEC;
|
||||
tcm->tcm__pad1 = 0;
|
||||
tcm->tcm__pad2 = 0;
|
||||
tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
|
||||
tcm->tcm_parent = parent;
|
||||
if (q) {
|
||||
tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
|
||||
tcm->tcm_parent = parent;
|
||||
} else {
|
||||
tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
|
||||
tcm->tcm_block_index = block->index;
|
||||
}
|
||||
tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
|
||||
if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
|
||||
goto nla_put_failure;
|
||||
|
@ -636,8 +909,8 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
|
|||
|
||||
static int tfilter_notify(struct net *net, struct sk_buff *oskb,
|
||||
struct nlmsghdr *n, struct tcf_proto *tp,
|
||||
struct Qdisc *q, u32 parent,
|
||||
void *fh, int event, bool unicast)
|
||||
struct tcf_block *block, struct Qdisc *q,
|
||||
u32 parent, void *fh, int event, bool unicast)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
|
||||
|
@ -646,8 +919,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
|
|||
if (!skb)
|
||||
return -ENOBUFS;
|
||||
|
||||
if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq,
|
||||
n->nlmsg_flags, event) <= 0) {
|
||||
if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
|
||||
n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -661,8 +934,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
|
|||
|
||||
static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
|
||||
struct nlmsghdr *n, struct tcf_proto *tp,
|
||||
struct Qdisc *q, u32 parent,
|
||||
void *fh, bool unicast, bool *last)
|
||||
struct tcf_block *block, struct Qdisc *q,
|
||||
u32 parent, void *fh, bool unicast, bool *last)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
|
||||
|
@ -672,8 +945,8 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
|
|||
if (!skb)
|
||||
return -ENOBUFS;
|
||||
|
||||
if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq,
|
||||
n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
|
||||
if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
|
||||
n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -692,15 +965,16 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
|
|||
}
|
||||
|
||||
static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
|
||||
struct Qdisc *q, u32 parent,
|
||||
struct nlmsghdr *n,
|
||||
struct tcf_block *block, struct Qdisc *q,
|
||||
u32 parent, struct nlmsghdr *n,
|
||||
struct tcf_chain *chain, int event)
|
||||
{
|
||||
struct tcf_proto *tp;
|
||||
|
||||
for (tp = rtnl_dereference(chain->filter_chain);
|
||||
tp; tp = rtnl_dereference(tp->next))
|
||||
tfilter_notify(net, oskb, n, tp, q, parent, 0, event, false);
|
||||
tfilter_notify(net, oskb, n, tp, block,
|
||||
q, parent, 0, event, false);
|
||||
}
|
||||
|
||||
/* Add/change/delete/get a filter node */
|
||||
|
@ -716,13 +990,11 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
bool prio_allocate;
|
||||
u32 parent;
|
||||
u32 chain_index;
|
||||
struct net_device *dev;
|
||||
struct Qdisc *q;
|
||||
struct Qdisc *q = NULL;
|
||||
struct tcf_chain_info chain_info;
|
||||
struct tcf_chain *chain = NULL;
|
||||
struct tcf_block *block;
|
||||
struct tcf_proto *tp;
|
||||
const struct Qdisc_class_ops *cops;
|
||||
unsigned long cl;
|
||||
void *fh;
|
||||
int err;
|
||||
|
@ -769,41 +1041,58 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
|
||||
/* Find head of filter chain. */
|
||||
|
||||
/* Find link */
|
||||
dev = __dev_get_by_index(net, t->tcm_ifindex);
|
||||
if (dev == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
/* Find qdisc */
|
||||
if (!parent) {
|
||||
q = dev->qdisc;
|
||||
parent = q->handle;
|
||||
if (t->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
|
||||
block = tcf_block_lookup(net, t->tcm_block_index);
|
||||
if (!block) {
|
||||
NL_SET_ERR_MSG(extack, "Block of given index was not found");
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
}
|
||||
} else {
|
||||
q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
|
||||
if (q == NULL)
|
||||
const struct Qdisc_class_ops *cops;
|
||||
struct net_device *dev;
|
||||
|
||||
/* Find link */
|
||||
dev = __dev_get_by_index(net, t->tcm_ifindex);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
/* Find qdisc */
|
||||
if (!parent) {
|
||||
q = dev->qdisc;
|
||||
parent = q->handle;
|
||||
} else {
|
||||
q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
|
||||
if (!q)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Is it classful? */
|
||||
cops = q->ops->cl_ops;
|
||||
if (!cops)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Is it classful? */
|
||||
cops = q->ops->cl_ops;
|
||||
if (!cops)
|
||||
return -EINVAL;
|
||||
if (!cops->tcf_block)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!cops->tcf_block)
|
||||
return -EOPNOTSUPP;
|
||||
/* Do we search for filter, attached to class? */
|
||||
if (TC_H_MIN(parent)) {
|
||||
cl = cops->find(q, parent);
|
||||
if (cl == 0)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Do we search for filter, attached to class? */
|
||||
if (TC_H_MIN(parent)) {
|
||||
cl = cops->find(q, parent);
|
||||
if (cl == 0)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* And the last stroke */
|
||||
block = cops->tcf_block(q, cl, extack);
|
||||
if (!block) {
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
/* And the last stroke */
|
||||
block = cops->tcf_block(q, cl, extack);
|
||||
if (!block) {
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
}
|
||||
if (tcf_block_shared(block)) {
|
||||
NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
|
||||
err = -EOPNOTSUPP;
|
||||
goto errout;
|
||||
}
|
||||
}
|
||||
|
||||
chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
|
||||
|
@ -819,7 +1108,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
}
|
||||
|
||||
if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
|
||||
tfilter_notify_chain(net, skb, q, parent, n,
|
||||
tfilter_notify_chain(net, skb, block, q, parent, n,
|
||||
chain, RTM_DELTFILTER);
|
||||
tcf_chain_flush(chain);
|
||||
err = 0;
|
||||
|
@ -851,7 +1140,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
|
||||
|
||||
tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
|
||||
protocol, prio, parent, q, chain);
|
||||
protocol, prio, chain);
|
||||
if (IS_ERR(tp)) {
|
||||
err = PTR_ERR(tp);
|
||||
goto errout;
|
||||
|
@ -867,7 +1156,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
if (!fh) {
|
||||
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
|
||||
tcf_chain_tp_remove(chain, &chain_info, tp);
|
||||
tfilter_notify(net, skb, n, tp, q, parent, fh,
|
||||
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
|
||||
RTM_DELTFILTER, false);
|
||||
tcf_proto_destroy(tp);
|
||||
err = 0;
|
||||
|
@ -892,8 +1181,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
}
|
||||
break;
|
||||
case RTM_DELTFILTER:
|
||||
err = tfilter_del_notify(net, skb, n, tp, q, parent,
|
||||
fh, false, &last);
|
||||
err = tfilter_del_notify(net, skb, n, tp, block,
|
||||
q, parent, fh, false, &last);
|
||||
if (err)
|
||||
goto errout;
|
||||
if (last) {
|
||||
|
@ -902,8 +1191,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
}
|
||||
goto errout;
|
||||
case RTM_GETTFILTER:
|
||||
err = tfilter_notify(net, skb, n, tp, q, parent, fh,
|
||||
RTM_NEWTFILTER, true);
|
||||
err = tfilter_notify(net, skb, n, tp, block, q, parent,
|
||||
fh, RTM_NEWTFILTER, true);
|
||||
goto errout;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
|
@ -916,7 +1205,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
if (err == 0) {
|
||||
if (tp_created)
|
||||
tcf_chain_tp_insert(chain, &chain_info, tp);
|
||||
tfilter_notify(net, skb, n, tp, q, parent, fh,
|
||||
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
|
||||
RTM_NEWTFILTER, false);
|
||||
} else {
|
||||
if (tp_created)
|
||||
|
@ -936,6 +1225,7 @@ struct tcf_dump_args {
|
|||
struct tcf_walker w;
|
||||
struct sk_buff *skb;
|
||||
struct netlink_callback *cb;
|
||||
struct tcf_block *block;
|
||||
struct Qdisc *q;
|
||||
u32 parent;
|
||||
};
|
||||
|
@ -945,7 +1235,7 @@ static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
|
|||
struct tcf_dump_args *a = (void *)arg;
|
||||
struct net *net = sock_net(a->skb->sk);
|
||||
|
||||
return tcf_fill_node(net, a->skb, tp, a->q, a->parent,
|
||||
return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
|
||||
n, NETLINK_CB(a->cb->skb).portid,
|
||||
a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
|
||||
RTM_NEWTFILTER);
|
||||
|
@ -956,6 +1246,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
|
|||
long index_start, long *p_index)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct tcf_block *block = chain->block;
|
||||
struct tcmsg *tcm = nlmsg_data(cb->nlh);
|
||||
struct tcf_dump_args arg;
|
||||
struct tcf_proto *tp;
|
||||
|
@ -974,7 +1265,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
|
|||
memset(&cb->args[1], 0,
|
||||
sizeof(cb->args) - sizeof(cb->args[0]));
|
||||
if (cb->args[1] == 0) {
|
||||
if (tcf_fill_node(net, skb, tp, q, parent, 0,
|
||||
if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
|
||||
NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq, NLM_F_MULTI,
|
||||
RTM_NEWTFILTER) <= 0)
|
||||
|
@ -987,6 +1278,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
|
|||
arg.w.fn = tcf_node_dump;
|
||||
arg.skb = skb;
|
||||
arg.cb = cb;
|
||||
arg.block = block;
|
||||
arg.q = q;
|
||||
arg.parent = parent;
|
||||
arg.w.stop = 0;
|
||||
|
@ -1005,13 +1297,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct nlattr *tca[TCA_MAX + 1];
|
||||
struct net_device *dev;
|
||||
struct Qdisc *q;
|
||||
struct Qdisc *q = NULL;
|
||||
struct tcf_block *block;
|
||||
struct tcf_chain *chain;
|
||||
struct tcmsg *tcm = nlmsg_data(cb->nlh);
|
||||
unsigned long cl = 0;
|
||||
const struct Qdisc_class_ops *cops;
|
||||
long index_start;
|
||||
long index;
|
||||
u32 parent;
|
||||
|
@ -1024,32 +1313,44 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
|
||||
if (!dev)
|
||||
return skb->len;
|
||||
|
||||
parent = tcm->tcm_parent;
|
||||
if (!parent) {
|
||||
q = dev->qdisc;
|
||||
parent = q->handle;
|
||||
} else {
|
||||
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
|
||||
}
|
||||
if (!q)
|
||||
goto out;
|
||||
cops = q->ops->cl_ops;
|
||||
if (!cops)
|
||||
goto out;
|
||||
if (!cops->tcf_block)
|
||||
goto out;
|
||||
if (TC_H_MIN(tcm->tcm_parent)) {
|
||||
cl = cops->find(q, tcm->tcm_parent);
|
||||
if (cl == 0)
|
||||
if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
|
||||
block = tcf_block_lookup(net, tcm->tcm_block_index);
|
||||
if (!block)
|
||||
goto out;
|
||||
} else {
|
||||
const struct Qdisc_class_ops *cops;
|
||||
struct net_device *dev;
|
||||
unsigned long cl = 0;
|
||||
|
||||
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
|
||||
if (!dev)
|
||||
return skb->len;
|
||||
|
||||
parent = tcm->tcm_parent;
|
||||
if (!parent) {
|
||||
q = dev->qdisc;
|
||||
parent = q->handle;
|
||||
} else {
|
||||
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
|
||||
}
|
||||
if (!q)
|
||||
goto out;
|
||||
cops = q->ops->cl_ops;
|
||||
if (!cops)
|
||||
goto out;
|
||||
if (!cops->tcf_block)
|
||||
goto out;
|
||||
if (TC_H_MIN(tcm->tcm_parent)) {
|
||||
cl = cops->find(q, tcm->tcm_parent);
|
||||
if (cl == 0)
|
||||
goto out;
|
||||
}
|
||||
block = cops->tcf_block(q, cl, NULL);
|
||||
if (!block)
|
||||
goto out;
|
||||
if (tcf_block_shared(block))
|
||||
q = NULL;
|
||||
}
|
||||
block = cops->tcf_block(q, cl, NULL);
|
||||
if (!block)
|
||||
goto out;
|
||||
|
||||
index_start = cb->args[0];
|
||||
index = 0;
|
||||
|
@ -1252,18 +1553,50 @@ int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
|
|||
}
|
||||
EXPORT_SYMBOL(tc_setup_cb_call);
|
||||
|
||||
static __net_init int tcf_net_init(struct net *net)
|
||||
{
|
||||
struct tcf_net *tn = net_generic(net, tcf_net_id);
|
||||
|
||||
idr_init(&tn->idr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __net_exit tcf_net_exit(struct net *net)
|
||||
{
|
||||
struct tcf_net *tn = net_generic(net, tcf_net_id);
|
||||
|
||||
idr_destroy(&tn->idr);
|
||||
}
|
||||
|
||||
static struct pernet_operations tcf_net_ops = {
|
||||
.init = tcf_net_init,
|
||||
.exit = tcf_net_exit,
|
||||
.id = &tcf_net_id,
|
||||
.size = sizeof(struct tcf_net),
|
||||
};
|
||||
|
||||
static int __init tc_filter_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
|
||||
if (!tc_filter_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
err = register_pernet_subsys(&tcf_net_ops);
|
||||
if (err)
|
||||
goto err_register_pernet_subsys;
|
||||
|
||||
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
|
||||
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
|
||||
rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
|
||||
tc_dump_tfilter, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
err_register_pernet_subsys:
|
||||
destroy_workqueue(tc_filter_wq);
|
||||
return err;
|
||||
}
|
||||
|
||||
subsys_initcall(tc_filter_init);
|
||||
|
|
|
@ -167,13 +167,16 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
|
|||
cls_bpf.exts_integrated = obj->exts_integrated;
|
||||
cls_bpf.gen_flags = obj->gen_flags;
|
||||
|
||||
if (oldprog)
|
||||
tcf_block_offload_dec(block, &oldprog->gen_flags);
|
||||
|
||||
err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
|
||||
if (prog) {
|
||||
if (err < 0) {
|
||||
cls_bpf_offload_cmd(tp, oldprog, prog);
|
||||
return err;
|
||||
} else if (err > 0) {
|
||||
prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
|
||||
tcf_block_offload_inc(block, &prog->gen_flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -392,8 +395,8 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
|
|||
prog->bpf_name = name;
|
||||
prog->filter = fp;
|
||||
|
||||
if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
|
||||
netif_keep_dst(qdisc_dev(tp->q));
|
||||
if (fp->dst_needed)
|
||||
tcf_block_netif_keep_dst(tp->chain->block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -526,7 +526,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
|
|||
|
||||
timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
|
||||
|
||||
netif_keep_dst(qdisc_dev(tp->q));
|
||||
tcf_block_netif_keep_dst(tp->chain->block);
|
||||
|
||||
if (tb[TCA_FLOW_KEYS]) {
|
||||
fnew->keymask = keymask;
|
||||
|
|
|
@ -229,6 +229,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
|
|||
|
||||
tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
|
||||
&cls_flower, false);
|
||||
tcf_block_offload_dec(block, &f->flags);
|
||||
}
|
||||
|
||||
static int fl_hw_replace_filter(struct tcf_proto *tp,
|
||||
|
@ -256,7 +257,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
|
|||
fl_hw_destroy_filter(tp, f);
|
||||
return err;
|
||||
} else if (err > 0) {
|
||||
f->flags |= TCA_CLS_FLAGS_IN_HW;
|
||||
tcf_block_offload_inc(block, &f->flags);
|
||||
}
|
||||
|
||||
if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
|
||||
|
|
|
@ -81,6 +81,7 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
|
|||
cls_mall.cookie = cookie;
|
||||
|
||||
tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false);
|
||||
tcf_block_offload_dec(block, &head->flags);
|
||||
}
|
||||
|
||||
static int mall_replace_hw_filter(struct tcf_proto *tp,
|
||||
|
@ -103,7 +104,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
|
|||
mall_destroy_hw_filter(tp, head, cookie);
|
||||
return err;
|
||||
} else if (err > 0) {
|
||||
head->flags |= TCA_CLS_FLAGS_IN_HW;
|
||||
tcf_block_offload_inc(block, &head->flags);
|
||||
}
|
||||
|
||||
if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
|
||||
|
|
|
@ -527,7 +527,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
|
|||
if (f->handle < f1->handle)
|
||||
break;
|
||||
|
||||
netif_keep_dst(qdisc_dev(tp->q));
|
||||
tcf_block_netif_keep_dst(tp->chain->block);
|
||||
rcu_assign_pointer(f->next, f1);
|
||||
rcu_assign_pointer(*fp, f);
|
||||
|
||||
|
|
|
@ -529,16 +529,17 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
|
||||
static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n)
|
||||
{
|
||||
struct tcf_block *block = tp->chain->block;
|
||||
struct tc_cls_u32_offload cls_u32 = {};
|
||||
|
||||
tc_cls_common_offload_init(&cls_u32.common, tp);
|
||||
cls_u32.command = TC_CLSU32_DELETE_KNODE;
|
||||
cls_u32.knode.handle = handle;
|
||||
cls_u32.knode.handle = n->handle;
|
||||
|
||||
tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
|
||||
tcf_block_offload_dec(block, &n->flags);
|
||||
}
|
||||
|
||||
static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
|
||||
|
@ -567,10 +568,10 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
|
|||
|
||||
err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
|
||||
if (err < 0) {
|
||||
u32_remove_hw_knode(tp, n->handle);
|
||||
u32_remove_hw_knode(tp, n);
|
||||
return err;
|
||||
} else if (err > 0) {
|
||||
n->flags |= TCA_CLS_FLAGS_IN_HW;
|
||||
tcf_block_offload_inc(block, &n->flags);
|
||||
}
|
||||
|
||||
if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
|
||||
|
@ -589,7 +590,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
|
|||
RCU_INIT_POINTER(ht->ht[h],
|
||||
rtnl_dereference(n->next));
|
||||
tcf_unbind_filter(tp, &n->res);
|
||||
u32_remove_hw_knode(tp, n->handle);
|
||||
u32_remove_hw_knode(tp, n);
|
||||
idr_remove_ext(&ht->handle_idr, n->handle);
|
||||
if (tcf_exts_get_net(&n->exts))
|
||||
call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
|
||||
|
@ -682,7 +683,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last)
|
|||
goto out;
|
||||
|
||||
if (TC_U32_KEY(ht->handle)) {
|
||||
u32_remove_hw_knode(tp, ht->handle);
|
||||
u32_remove_hw_knode(tp, (struct tc_u_knode *)ht);
|
||||
ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -791,6 +791,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
|
|||
unsigned char *b = skb_tail_pointer(skb);
|
||||
struct gnet_dump d;
|
||||
struct qdisc_size_table *stab;
|
||||
u32 block_index;
|
||||
__u32 qlen;
|
||||
|
||||
cond_resched();
|
||||
|
@ -807,6 +808,18 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
|
|||
tcm->tcm_info = refcount_read(&q->refcnt);
|
||||
if (nla_put_string(skb, TCA_KIND, q->ops->id))
|
||||
goto nla_put_failure;
|
||||
if (q->ops->ingress_block_get) {
|
||||
block_index = q->ops->ingress_block_get(q);
|
||||
if (block_index &&
|
||||
nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
if (q->ops->egress_block_get) {
|
||||
block_index = q->ops->egress_block_get(q);
|
||||
if (block_index &&
|
||||
nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
if (q->ops->dump && q->ops->dump(q, skb) < 0)
|
||||
goto nla_put_failure;
|
||||
if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
|
||||
|
@ -994,6 +1007,40 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 block_index;
|
||||
|
||||
if (tca[TCA_INGRESS_BLOCK]) {
|
||||
block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
|
||||
|
||||
if (!block_index) {
|
||||
NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!sch->ops->ingress_block_set) {
|
||||
NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
sch->ops->ingress_block_set(sch, block_index);
|
||||
}
|
||||
if (tca[TCA_EGRESS_BLOCK]) {
|
||||
block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
|
||||
|
||||
if (!block_index) {
|
||||
NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!sch->ops->egress_block_set) {
|
||||
NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
sch->ops->egress_block_set(sch, block_index);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* lockdep annotation is needed for ingress; egress gets it only for name */
|
||||
static struct lock_class_key qdisc_tx_lock;
|
||||
static struct lock_class_key qdisc_rx_lock;
|
||||
|
@ -1088,6 +1135,10 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
|
|||
netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
|
||||
}
|
||||
|
||||
err = qdisc_block_indexes_set(sch, tca, extack);
|
||||
if (err)
|
||||
goto err_out3;
|
||||
|
||||
if (ops->init) {
|
||||
err = ops->init(sch, tca[TCA_OPTIONS], extack);
|
||||
if (err != 0)
|
||||
|
@ -1169,6 +1220,10 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
|
|||
NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
|
||||
NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -1894,6 +1949,11 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
|
|||
}
|
||||
}
|
||||
|
||||
if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
|
||||
NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
new_cl = cl;
|
||||
err = -EOPNOTSUPP;
|
||||
if (cops->change)
|
||||
|
|
|
@ -61,6 +61,20 @@ static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
|
|||
struct mini_Qdisc_pair *miniqp = priv;
|
||||
|
||||
mini_qdisc_pair_swap(miniqp, tp_head);
|
||||
};
|
||||
|
||||
static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
|
||||
{
|
||||
struct ingress_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
q->block_info.block_index = block_index;
|
||||
}
|
||||
|
||||
static u32 ingress_ingress_block_get(struct Qdisc *sch)
|
||||
{
|
||||
struct ingress_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
return q->block_info.block_index;
|
||||
}
|
||||
|
||||
static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
@ -113,14 +127,16 @@ static const struct Qdisc_class_ops ingress_class_ops = {
|
|||
};
|
||||
|
||||
static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
|
||||
.cl_ops = &ingress_class_ops,
|
||||
.id = "ingress",
|
||||
.priv_size = sizeof(struct ingress_sched_data),
|
||||
.static_flags = TCQ_F_CPUSTATS,
|
||||
.init = ingress_init,
|
||||
.destroy = ingress_destroy,
|
||||
.dump = ingress_dump,
|
||||
.owner = THIS_MODULE,
|
||||
.cl_ops = &ingress_class_ops,
|
||||
.id = "ingress",
|
||||
.priv_size = sizeof(struct ingress_sched_data),
|
||||
.static_flags = TCQ_F_CPUSTATS,
|
||||
.init = ingress_init,
|
||||
.destroy = ingress_destroy,
|
||||
.dump = ingress_dump,
|
||||
.ingress_block_set = ingress_ingress_block_set,
|
||||
.ingress_block_get = ingress_ingress_block_get,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
struct clsact_sched_data {
|
||||
|
@ -164,6 +180,34 @@ static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
|
|||
}
|
||||
}
|
||||
|
||||
static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
|
||||
{
|
||||
struct clsact_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
q->ingress_block_info.block_index = block_index;
|
||||
}
|
||||
|
||||
static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
|
||||
{
|
||||
struct clsact_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
q->egress_block_info.block_index = block_index;
|
||||
}
|
||||
|
||||
static u32 clsact_ingress_block_get(struct Qdisc *sch)
|
||||
{
|
||||
struct clsact_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
return q->ingress_block_info.block_index;
|
||||
}
|
||||
|
||||
static u32 clsact_egress_block_get(struct Qdisc *sch)
|
||||
{
|
||||
struct clsact_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
return q->egress_block_info.block_index;
|
||||
}
|
||||
|
||||
static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
|
@ -215,14 +259,18 @@ static const struct Qdisc_class_ops clsact_class_ops = {
|
|||
};
|
||||
|
||||
static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
|
||||
.cl_ops = &clsact_class_ops,
|
||||
.id = "clsact",
|
||||
.priv_size = sizeof(struct clsact_sched_data),
|
||||
.static_flags = TCQ_F_CPUSTATS,
|
||||
.init = clsact_init,
|
||||
.destroy = clsact_destroy,
|
||||
.dump = ingress_dump,
|
||||
.owner = THIS_MODULE,
|
||||
.cl_ops = &clsact_class_ops,
|
||||
.id = "clsact",
|
||||
.priv_size = sizeof(struct clsact_sched_data),
|
||||
.static_flags = TCQ_F_CPUSTATS,
|
||||
.init = clsact_init,
|
||||
.destroy = clsact_destroy,
|
||||
.dump = ingress_dump,
|
||||
.ingress_block_set = clsact_ingress_block_set,
|
||||
.egress_block_set = clsact_egress_block_set,
|
||||
.ingress_block_get = clsact_ingress_block_get,
|
||||
.egress_block_get = clsact_egress_block_get,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init ingress_module_init(void)
|
||||
|
|
Loading…
Reference in New Issue