2014-11-28 21:34:17 +08:00
|
|
|
/*
|
|
|
|
* include/net/switchdev.h - Switch device API
|
2015-09-24 16:02:41 +08:00
|
|
|
* Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
|
2015-03-10 04:59:09 +08:00
|
|
|
* Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
|
2014-11-28 21:34:17 +08:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_SWITCHDEV_H_
|
|
|
|
#define _LINUX_SWITCHDEV_H_
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
2015-01-16 06:49:36 +08:00
|
|
|
#include <linux/notifier.h>
|
2015-09-24 16:02:41 +08:00
|
|
|
#include <linux/list.h>
|
2015-10-15 01:40:51 +08:00
|
|
|
#include <net/ip_fib.h>
|
2015-01-16 06:49:36 +08:00
|
|
|
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-11 00:47:48 +08:00
|
|
|
#define SWITCHDEV_F_NO_RECURSE BIT(0)
|
2015-10-09 10:23:18 +08:00
|
|
|
#define SWITCHDEV_F_SKIP_EOPNOTSUPP BIT(1)
|
2015-10-15 01:40:50 +08:00
|
|
|
#define SWITCHDEV_F_DEFER BIT(2)
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-11 00:47:48 +08:00
|
|
|
|
2015-09-24 16:02:41 +08:00
|
|
|
struct switchdev_trans_item {
|
|
|
|
struct list_head list;
|
|
|
|
void *data;
|
|
|
|
void (*destructor)(const void *data);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct switchdev_trans {
|
|
|
|
struct list_head item_list;
|
2015-09-24 16:02:49 +08:00
|
|
|
bool ph_prepare;
|
2015-09-24 16:02:41 +08:00
|
|
|
};
|
|
|
|
|
2015-09-24 16:02:43 +08:00
|
|
|
static inline bool switchdev_trans_ph_prepare(struct switchdev_trans *trans)
|
|
|
|
{
|
2015-09-24 16:02:49 +08:00
|
|
|
return trans && trans->ph_prepare;
|
2015-09-24 16:02:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool switchdev_trans_ph_commit(struct switchdev_trans *trans)
|
|
|
|
{
|
2015-09-24 16:02:49 +08:00
|
|
|
return trans && !trans->ph_prepare;
|
2015-09-24 16:02:43 +08:00
|
|
|
}
|
|
|
|
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-11 00:47:48 +08:00
|
|
|
enum switchdev_attr_id {
|
2015-10-01 17:03:42 +08:00
|
|
|
SWITCHDEV_ATTR_ID_UNDEFINED,
|
|
|
|
SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
|
|
|
|
SWITCHDEV_ATTR_ID_PORT_STP_STATE,
|
|
|
|
SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
|
2015-10-09 10:23:17 +08:00
|
|
|
SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
|
2016-01-06 20:01:05 +08:00
|
|
|
SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-11 00:47:48 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct switchdev_attr {
|
2015-12-15 23:03:35 +08:00
|
|
|
struct net_device *orig_dev;
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-11 00:47:48 +08:00
|
|
|
enum switchdev_attr_id id;
|
|
|
|
u32 flags;
|
2016-04-21 18:52:43 +08:00
|
|
|
void *complete_priv;
|
|
|
|
void (*complete)(struct net_device *dev, int err, void *priv);
|
2015-05-11 00:47:49 +08:00
|
|
|
union {
|
|
|
|
struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
|
2015-05-11 00:47:51 +08:00
|
|
|
u8 stp_state; /* PORT_STP_STATE */
|
2015-05-11 00:47:55 +08:00
|
|
|
unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
|
2016-07-19 03:02:06 +08:00
|
|
|
clock_t ageing_time; /* BRIDGE_AGEING_TIME */
|
2016-01-06 20:01:05 +08:00
|
|
|
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
|
2015-05-14 02:16:50 +08:00
|
|
|
} u;
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-11 00:47:48 +08:00
|
|
|
};
|
|
|
|
|
2015-05-11 00:47:52 +08:00
|
|
|
enum switchdev_obj_id {
|
2015-10-01 17:03:41 +08:00
|
|
|
SWITCHDEV_OBJ_ID_UNDEFINED,
|
|
|
|
SWITCHDEV_OBJ_ID_PORT_VLAN,
|
|
|
|
SWITCHDEV_OBJ_ID_IPV4_FIB,
|
|
|
|
SWITCHDEV_OBJ_ID_PORT_FDB,
|
2016-01-11 04:06:22 +08:00
|
|
|
SWITCHDEV_OBJ_ID_PORT_MDB,
|
2015-05-11 00:47:52 +08:00
|
|
|
};
|
|
|
|
|
2015-10-01 17:03:45 +08:00
|
|
|
struct switchdev_obj {
|
2015-12-15 23:03:35 +08:00
|
|
|
struct net_device *orig_dev;
|
2015-10-01 17:03:46 +08:00
|
|
|
enum switchdev_obj_id id;
|
2015-10-15 01:40:52 +08:00
|
|
|
u32 flags;
|
2016-04-21 18:52:43 +08:00
|
|
|
void *complete_priv;
|
|
|
|
void (*complete)(struct net_device *dev, int err, void *priv);
|
2015-10-01 17:03:45 +08:00
|
|
|
};
|
|
|
|
|
2015-10-01 17:03:41 +08:00
|
|
|
/* SWITCHDEV_OBJ_ID_PORT_VLAN */
|
2015-10-01 17:03:43 +08:00
|
|
|
struct switchdev_obj_port_vlan {
|
2015-10-01 17:03:45 +08:00
|
|
|
struct switchdev_obj obj;
|
2015-09-30 00:07:18 +08:00
|
|
|
u16 flags;
|
|
|
|
u16 vid_begin;
|
|
|
|
u16 vid_end;
|
|
|
|
};
|
|
|
|
|
2015-10-01 17:03:45 +08:00
|
|
|
#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
|
|
|
|
container_of(obj, struct switchdev_obj_port_vlan, obj)
|
|
|
|
|
2015-10-01 17:03:41 +08:00
|
|
|
/* SWITCHDEV_OBJ_ID_IPV4_FIB */
|
2015-09-30 00:07:18 +08:00
|
|
|
struct switchdev_obj_ipv4_fib {
|
2015-10-01 17:03:45 +08:00
|
|
|
struct switchdev_obj obj;
|
2015-09-30 00:07:18 +08:00
|
|
|
u32 dst;
|
|
|
|
int dst_len;
|
2016-05-18 00:58:08 +08:00
|
|
|
struct fib_info *fi;
|
2015-09-30 00:07:18 +08:00
|
|
|
u8 tos;
|
|
|
|
u8 type;
|
|
|
|
u32 nlflags;
|
|
|
|
u32 tb_id;
|
|
|
|
};
|
|
|
|
|
2015-10-01 17:03:45 +08:00
|
|
|
#define SWITCHDEV_OBJ_IPV4_FIB(obj) \
|
|
|
|
container_of(obj, struct switchdev_obj_ipv4_fib, obj)
|
|
|
|
|
2015-10-01 17:03:41 +08:00
|
|
|
/* SWITCHDEV_OBJ_ID_PORT_FDB */
|
2015-10-01 17:03:44 +08:00
|
|
|
struct switchdev_obj_port_fdb {
|
2015-10-01 17:03:45 +08:00
|
|
|
struct switchdev_obj obj;
|
2015-10-15 01:40:51 +08:00
|
|
|
unsigned char addr[ETH_ALEN];
|
2015-09-30 00:07:18 +08:00
|
|
|
u16 vid;
|
|
|
|
u16 ndm_state;
|
2015-05-11 00:47:52 +08:00
|
|
|
};
|
|
|
|
|
2015-10-01 17:03:45 +08:00
|
|
|
#define SWITCHDEV_OBJ_PORT_FDB(obj) \
|
|
|
|
container_of(obj, struct switchdev_obj_port_fdb, obj)
|
|
|
|
|
2016-01-11 04:06:22 +08:00
|
|
|
/* SWITCHDEV_OBJ_ID_PORT_MDB */
|
|
|
|
struct switchdev_obj_port_mdb {
|
|
|
|
struct switchdev_obj obj;
|
|
|
|
unsigned char addr[ETH_ALEN];
|
|
|
|
u16 vid;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SWITCHDEV_OBJ_PORT_MDB(obj) \
|
|
|
|
container_of(obj, struct switchdev_obj_port_mdb, obj)
|
|
|
|
|
2015-09-24 16:02:41 +08:00
|
|
|
void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
|
|
|
|
void *data, void (*destructor)(void const *),
|
|
|
|
struct switchdev_trans_item *tritem);
|
|
|
|
void *switchdev_trans_item_dequeue(struct switchdev_trans *trans);
|
|
|
|
|
2015-10-01 17:03:45 +08:00
|
|
|
typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
|
|
|
|
|
2015-03-16 12:07:14 +08:00
|
|
|
/**
|
|
|
|
* struct switchdev_ops - switchdev operations
|
|
|
|
*
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-11 00:47:48 +08:00
|
|
|
* @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
|
|
|
|
*
|
|
|
|
* @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
|
|
|
|
*
|
2015-09-30 00:07:18 +08:00
|
|
|
* @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*).
|
2015-05-11 00:47:52 +08:00
|
|
|
*
|
2015-09-30 00:07:18 +08:00
|
|
|
* @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*).
|
2015-05-14 12:55:43 +08:00
|
|
|
*
|
2015-09-30 00:07:18 +08:00
|
|
|
* @switchdev_port_obj_dump: Dump port objects (see switchdev_obj_*).
|
2015-03-16 12:07:14 +08:00
|
|
|
*/
|
2015-05-11 00:47:47 +08:00
|
|
|
struct switchdev_ops {
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-11 00:47:48 +08:00
|
|
|
int (*switchdev_port_attr_get)(struct net_device *dev,
|
|
|
|
struct switchdev_attr *attr);
|
|
|
|
int (*switchdev_port_attr_set)(struct net_device *dev,
|
2015-10-15 01:40:49 +08:00
|
|
|
const struct switchdev_attr *attr,
|
2015-09-24 16:02:41 +08:00
|
|
|
struct switchdev_trans *trans);
|
2015-05-11 00:47:52 +08:00
|
|
|
int (*switchdev_port_obj_add)(struct net_device *dev,
|
2015-10-01 17:03:45 +08:00
|
|
|
const struct switchdev_obj *obj,
|
2015-09-24 16:02:41 +08:00
|
|
|
struct switchdev_trans *trans);
|
2015-05-11 00:47:52 +08:00
|
|
|
int (*switchdev_port_obj_del)(struct net_device *dev,
|
2015-10-01 17:03:45 +08:00
|
|
|
const struct switchdev_obj *obj);
|
2015-05-14 12:55:43 +08:00
|
|
|
int (*switchdev_port_obj_dump)(struct net_device *dev,
|
2015-10-01 17:03:45 +08:00
|
|
|
struct switchdev_obj *obj,
|
|
|
|
switchdev_obj_dump_cb_t *cb);
|
2015-03-16 12:07:14 +08:00
|
|
|
};
|
|
|
|
|
2015-05-11 00:47:46 +08:00
|
|
|
enum switchdev_notifier_type {
|
|
|
|
SWITCHDEV_FDB_ADD = 1,
|
|
|
|
SWITCHDEV_FDB_DEL,
|
2015-01-16 06:49:37 +08:00
|
|
|
};
|
|
|
|
|
2015-05-11 00:47:46 +08:00
|
|
|
struct switchdev_notifier_info {
|
2015-01-16 06:49:36 +08:00
|
|
|
struct net_device *dev;
|
|
|
|
};
|
|
|
|
|
2015-05-11 00:47:46 +08:00
|
|
|
struct switchdev_notifier_fdb_info {
|
|
|
|
struct switchdev_notifier_info info; /* must be first */
|
2015-01-16 06:49:37 +08:00
|
|
|
const unsigned char *addr;
|
|
|
|
u16 vid;
|
|
|
|
};
|
|
|
|
|
2015-01-16 06:49:36 +08:00
|
|
|
static inline struct net_device *
|
2015-05-11 00:47:46 +08:00
|
|
|
switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
|
2015-01-16 06:49:36 +08:00
|
|
|
{
|
|
|
|
return info->dev;
|
|
|
|
}
|
2014-11-28 21:34:17 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_NET_SWITCHDEV
|
|
|
|
|
2015-10-15 01:40:48 +08:00
|
|
|
void switchdev_deferred_process(void);
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-11 00:47:48 +08:00
|
|
|
int switchdev_port_attr_get(struct net_device *dev,
|
|
|
|
struct switchdev_attr *attr);
|
|
|
|
int switchdev_port_attr_set(struct net_device *dev,
|
2015-10-15 01:40:49 +08:00
|
|
|
const struct switchdev_attr *attr);
|
2015-10-01 17:03:46 +08:00
|
|
|
int switchdev_port_obj_add(struct net_device *dev,
|
2015-10-01 17:03:45 +08:00
|
|
|
const struct switchdev_obj *obj);
|
2015-10-01 17:03:46 +08:00
|
|
|
int switchdev_port_obj_del(struct net_device *dev,
|
2015-10-01 17:03:45 +08:00
|
|
|
const struct switchdev_obj *obj);
|
2015-10-01 17:03:46 +08:00
|
|
|
int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
|
2015-10-01 17:03:45 +08:00
|
|
|
switchdev_obj_dump_cb_t *cb);
|
2015-05-11 00:47:46 +08:00
|
|
|
int register_switchdev_notifier(struct notifier_block *nb);
|
|
|
|
int unregister_switchdev_notifier(struct notifier_block *nb);
|
|
|
|
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
|
|
|
|
struct switchdev_notifier_info *info);
|
2015-05-11 00:48:04 +08:00
|
|
|
int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
|
|
|
|
struct net_device *dev, u32 filter_mask,
|
|
|
|
int nlflags);
|
2015-05-11 00:47:46 +08:00
|
|
|
int switchdev_port_bridge_setlink(struct net_device *dev,
|
|
|
|
struct nlmsghdr *nlh, u16 flags);
|
|
|
|
int switchdev_port_bridge_dellink(struct net_device *dev,
|
|
|
|
struct nlmsghdr *nlh, u16 flags);
|
|
|
|
int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
|
|
|
|
u8 tos, u8 type, u32 nlflags, u32 tb_id);
|
|
|
|
int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
|
|
|
|
u8 tos, u8 type, u32 tb_id);
|
|
|
|
void switchdev_fib_ipv4_abort(struct fib_info *fi);
|
2015-05-14 12:55:43 +08:00
|
|
|
int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
|
|
|
|
struct net_device *dev, const unsigned char *addr,
|
|
|
|
u16 vid, u16 nlm_flags);
|
|
|
|
int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
|
|
|
|
struct net_device *dev, const unsigned char *addr,
|
|
|
|
u16 vid);
|
|
|
|
int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
|
|
|
struct net_device *dev,
|
|
|
|
struct net_device *filter_dev, int idx);
|
2015-07-19 09:24:50 +08:00
|
|
|
void switchdev_port_fwd_mark_set(struct net_device *dev,
|
|
|
|
struct net_device *group_dev,
|
|
|
|
bool joining);
|
2015-03-06 13:21:15 +08:00
|
|
|
|
2016-07-14 15:32:43 +08:00
|
|
|
bool switchdev_port_same_parent_id(struct net_device *a,
|
|
|
|
struct net_device *b);
|
2014-11-28 21:34:17 +08:00
|
|
|
#else
|
|
|
|
|
2015-10-15 01:40:48 +08:00
|
|
|
static inline void switchdev_deferred_process(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-11 00:47:48 +08:00
|
|
|
static inline int switchdev_port_attr_get(struct net_device *dev,
|
|
|
|
struct switchdev_attr *attr)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int switchdev_port_attr_set(struct net_device *dev,
|
2015-10-15 01:40:49 +08:00
|
|
|
const struct switchdev_attr *attr)
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-11 00:47:48 +08:00
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-05-11 00:47:52 +08:00
|
|
|
static inline int switchdev_port_obj_add(struct net_device *dev,
|
2015-10-01 17:03:45 +08:00
|
|
|
const struct switchdev_obj *obj)
|
2015-05-11 00:47:52 +08:00
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int switchdev_port_obj_del(struct net_device *dev,
|
2015-10-01 17:03:45 +08:00
|
|
|
const struct switchdev_obj *obj)
|
2015-05-11 00:47:52 +08:00
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-05-14 12:55:43 +08:00
|
|
|
static inline int switchdev_port_obj_dump(struct net_device *dev,
|
2015-10-01 17:03:45 +08:00
|
|
|
const struct switchdev_obj *obj,
|
|
|
|
switchdev_obj_dump_cb_t *cb)
|
2015-05-14 12:55:43 +08:00
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-05-11 00:47:46 +08:00
|
|
|
static inline int register_switchdev_notifier(struct notifier_block *nb)
|
2015-01-16 06:49:36 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-11 00:47:46 +08:00
|
|
|
static inline int unregister_switchdev_notifier(struct notifier_block *nb)
|
2015-01-16 06:49:36 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-11 00:47:46 +08:00
|
|
|
static inline int call_switchdev_notifiers(unsigned long val,
|
|
|
|
struct net_device *dev,
|
|
|
|
struct switchdev_notifier_info *info)
|
2015-01-16 06:49:36 +08:00
|
|
|
{
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2015-05-11 00:48:04 +08:00
|
|
|
static inline int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid,
|
|
|
|
u32 seq, struct net_device *dev,
|
|
|
|
u32 filter_mask, int nlflags)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-05-11 00:47:46 +08:00
|
|
|
static inline int switchdev_port_bridge_setlink(struct net_device *dev,
|
|
|
|
struct nlmsghdr *nlh,
|
|
|
|
u16 flags)
|
2015-01-30 14:40:13 +08:00
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-05-11 00:47:46 +08:00
|
|
|
static inline int switchdev_port_bridge_dellink(struct net_device *dev,
|
|
|
|
struct nlmsghdr *nlh,
|
|
|
|
u16 flags)
|
2015-01-30 14:40:13 +08:00
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-05-11 00:47:46 +08:00
|
|
|
static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
|
|
|
|
struct fib_info *fi,
|
|
|
|
u8 tos, u8 type,
|
|
|
|
u32 nlflags, u32 tb_id)
|
2015-03-06 13:21:15 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-11 00:47:46 +08:00
|
|
|
static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
|
|
|
|
struct fib_info *fi,
|
|
|
|
u8 tos, u8 type, u32 tb_id)
|
2015-03-06 13:21:15 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-11 00:47:46 +08:00
|
|
|
static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
|
2015-03-06 13:21:19 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2015-05-14 12:55:43 +08:00
|
|
|
static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
|
|
|
|
struct net_device *dev,
|
|
|
|
const unsigned char *addr,
|
|
|
|
u16 vid, u16 nlm_flags)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
|
|
|
|
struct net_device *dev,
|
|
|
|
const unsigned char *addr, u16 vid)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
|
|
|
|
struct netlink_callback *cb,
|
|
|
|
struct net_device *dev,
|
|
|
|
struct net_device *filter_dev,
|
|
|
|
int idx)
|
|
|
|
{
|
2015-11-16 17:52:48 +08:00
|
|
|
return idx;
|
2015-05-14 12:55:43 +08:00
|
|
|
}
|
|
|
|
|
2015-07-19 09:24:50 +08:00
|
|
|
static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
|
|
|
|
struct net_device *group_dev,
|
|
|
|
bool joining)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2016-07-14 15:32:43 +08:00
|
|
|
static inline bool switchdev_port_same_parent_id(struct net_device *a,
|
|
|
|
struct net_device *b)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-11-28 21:34:17 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _LINUX_SWITCHDEV_H_ */
|