IB/mlx5: Register DEVX with mlx5_core to get async events
Register DEVX with with mlx5_core to get async events. This will enable to dispatch the applicable events to its consumers in down stream patches. Signed-off-by: Yishai Hadas <yishaih@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
2afc5e1b9c
commit
e337dd53ce
|
@ -1663,6 +1663,36 @@ static int devx_umem_cleanup(struct ib_uobject *uobject,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int devx_event_notifier(struct notifier_block *nb,
|
||||||
|
unsigned long event_type, void *data)
|
||||||
|
{
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
|
||||||
|
{
|
||||||
|
struct mlx5_devx_event_table *table = &dev->devx_event_table;
|
||||||
|
|
||||||
|
xa_init(&table->event_xa);
|
||||||
|
mutex_init(&table->event_xa_lock);
|
||||||
|
MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
|
||||||
|
mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
|
||||||
|
}
|
||||||
|
|
||||||
|
void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
|
||||||
|
{
|
||||||
|
struct mlx5_devx_event_table *table = &dev->devx_event_table;
|
||||||
|
void *entry;
|
||||||
|
unsigned long id;
|
||||||
|
|
||||||
|
mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
|
||||||
|
|
||||||
|
xa_for_each(&table->event_xa, id, entry)
|
||||||
|
kfree(entry);
|
||||||
|
|
||||||
|
xa_destroy(&table->event_xa);
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
|
static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
|
||||||
size_t count, loff_t *pos)
|
size_t count, loff_t *pos)
|
||||||
{
|
{
|
||||||
|
|
|
@ -6630,15 +6630,19 @@ static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
|
||||||
int uid;
|
int uid;
|
||||||
|
|
||||||
uid = mlx5_ib_devx_create(dev, false);
|
uid = mlx5_ib_devx_create(dev, false);
|
||||||
if (uid > 0)
|
if (uid > 0) {
|
||||||
dev->devx_whitelist_uid = uid;
|
dev->devx_whitelist_uid = uid;
|
||||||
|
mlx5_ib_devx_init_event_table(dev);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
|
static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
|
||||||
{
|
{
|
||||||
if (dev->devx_whitelist_uid)
|
if (dev->devx_whitelist_uid) {
|
||||||
|
mlx5_ib_devx_cleanup_event_table(dev);
|
||||||
mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
|
mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
|
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
|
||||||
|
|
|
@ -936,6 +936,13 @@ struct mlx5_ib_pf_eq {
|
||||||
mempool_t *pool;
|
mempool_t *pool;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx5_devx_event_table {
|
||||||
|
struct mlx5_nb devx_nb;
|
||||||
|
/* serialize updating the event_xa */
|
||||||
|
struct mutex event_xa_lock;
|
||||||
|
struct xarray event_xa;
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5_ib_dev {
|
struct mlx5_ib_dev {
|
||||||
struct ib_device ib_dev;
|
struct ib_device ib_dev;
|
||||||
struct mlx5_core_dev *mdev;
|
struct mlx5_core_dev *mdev;
|
||||||
|
@ -985,6 +992,7 @@ struct mlx5_ib_dev {
|
||||||
u16 devx_whitelist_uid;
|
u16 devx_whitelist_uid;
|
||||||
struct mlx5_srq_table srq_table;
|
struct mlx5_srq_table srq_table;
|
||||||
struct mlx5_async_ctx async_ctx;
|
struct mlx5_async_ctx async_ctx;
|
||||||
|
struct mlx5_devx_event_table devx_event_table;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
|
||||||
|
@ -1324,6 +1332,8 @@ void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
|
||||||
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
|
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
|
||||||
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
|
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
|
||||||
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
|
void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
|
||||||
|
void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev);
|
||||||
|
void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev);
|
||||||
const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
|
const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
|
||||||
extern const struct uapi_definition mlx5_ib_devx_defs[];
|
extern const struct uapi_definition mlx5_ib_devx_defs[];
|
||||||
extern const struct uapi_definition mlx5_ib_flow_defs[];
|
extern const struct uapi_definition mlx5_ib_flow_defs[];
|
||||||
|
@ -1341,6 +1351,8 @@ static inline int
|
||||||
mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
|
mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
|
||||||
bool is_user) { return -EOPNOTSUPP; }
|
bool is_user) { return -EOPNOTSUPP; }
|
||||||
static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
|
static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
|
||||||
|
static inline void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev) {}
|
||||||
|
static inline void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev) {}
|
||||||
static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
|
static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
|
||||||
int *dest_type)
|
int *dest_type)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue