RDMA merge window pull request
This is significantly bug fixes and general cleanups. The noteworthy new features are fairly small: - XRC support for HNS and improves RQ operations - Bug fixes and updates for hns, mlx5, bnxt_re, hfi1, i40iw, rxe, siw and qib - Quite a few general cleanups on spelling, error handling, static checker detections, etc - Increase the number of device ports supported beyond 255. High port count software switches now exist - Several bug fixes for rtrs - mlx5 Device Memory support for host controlled atomics - Report SRQ tables through to rdma-tool -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmCMMHEACgkQOG33FX4g mxri3Q//RAgIExCGHebQ9xkptZHVyTLLJMpiMl2cqk3ZVRdDZ7QdiQjIqY2KqlUK nxBj7EXJeX6rV5a1xqCcOO1gBetB28TSwnCNE2ZqrXP5B59ISW8D052IWza3UkUz WmHLARxHQlyKBWA4+ZAgfoUGL0NmWA8QPf56t/RK/3/OsuYnGzcnWmmFbt8XKFcH NtO3KC45mKWDqqG0A0XRrLbEQz/ElO3OuPBqlBKgB3ZgGPzgsOUTOGkm1tCcZ89L /pvZGB7SklKZdCX8TxdpVGd9h0zHl8pqh1yEzvTA1ypNAYSUId2mvZXluU8J5yJl FLk7E1IxE5050FNEc7T5uZdUVntulYiqL2558coRI34l5w26pKGjIMxw/nTB8hg8 4ZfBtKVemIG6yzW5Up6iBpK7qWYpvLWVShwYAWhbNsjN7JGzJuh1gJnjbmYgyz2P RTMU9wjFPLL2wZxg4LDHACVJNBb82j6KKuE+kZWpk11ro7INw9+7YwRuTo7/ezxC BwXKu8wF4igwSigV55jM+WnGXLhxdC3qmx/2cbtWyLM/PzdRL96tM0RWW5v8/Nv7 teFhkt+f3RVqcfYH5K1qCXy3UFrxG6bxFSvcHHSBx2bdIrqhuTY5FqszAYImeW2j iHoyIsuSuGu79HQgOzAQZsEyksWi6OYDvA9Q9VBoPP4bJ3DOAa4= =vsXA -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma updates from Jason Gunthorpe: "This is significantly bug fixes and general cleanups. The noteworthy new features are fairly small: - XRC support for HNS and improves RQ operations - Bug fixes and updates for hns, mlx5, bnxt_re, hfi1, i40iw, rxe, siw and qib - Quite a few general cleanups on spelling, error handling, static checker detections, etc - Increase the number of device ports supported beyond 255. High port count software switches now exist - Several bug fixes for rtrs - mlx5 Device Memory support for host controlled atomics - Report SRQ tables through to rdma-tool" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (145 commits) IB/qib: Remove redundant assignment to ret RDMA/nldev: Add copy-on-fork attribute to get sys command RDMA/bnxt_re: Fix a double free in bnxt_qplib_alloc_res RDMA/siw: Fix a use after free in siw_alloc_mr IB/hfi1: Remove redundant variable rcd RDMA/nldev: Add QP numbers to SRQ information RDMA/nldev: Return SRQ information RDMA/restrack: Add support to get resource tracking for SRQ RDMA/nldev: Return context information RDMA/core: Add CM to restrack after successful attachment to a device RDMA/cma: Skip device which doesn't support CM RDMA/rxe: Fix a bug in rxe_fill_ip_info() RDMA/mlx5: Expose private query port RDMA/mlx4: Remove an unused variable RDMA/mlx5: Fix type assignment for ICM DM IB/mlx5: Set right RoCE l3 type and roce version while deleting GID RDMA/i40iw: Fix error unwinding when i40iw_hmc_sd_one fails RDMA/cxgb4: add missing qpid increment IB/ipoib: Remove unnecessary struct declaration RDMA/bnxt_re: Get rid of custom module reference counting ...
This commit is contained in:
commit
f34b2cf178
|
@ -34,6 +34,9 @@ Description: Multipath policy specifies which path should be selected on each IO
|
|||
min-inflight (1):
|
||||
select path with minimum inflights.
|
||||
|
||||
min-latency (2):
|
||||
select path with minimum latency.
|
||||
|
||||
What: /sys/class/rtrs-client/<session-name>/paths/
|
||||
Date: Feb 2020
|
||||
KernelVersion: 5.7
|
||||
|
@ -95,6 +98,15 @@ KernelVersion: 5.7
|
|||
Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
|
||||
Description: RO, Contains the destination address of the path
|
||||
|
||||
What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/cur_latency
|
||||
Date: Feb 2020
|
||||
KernelVersion: 5.7
|
||||
Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
|
||||
Description: RO, Contains the latency time calculated by the heart-beat messages.
|
||||
Whenever the client sends heart-beat message, it checks the time gap
|
||||
between sending the heart-beat message and receiving the ACK.
|
||||
This value can be changed regularly.
|
||||
|
||||
What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/stats/reset_all
|
||||
Date: Feb 2020
|
||||
KernelVersion: 5.7
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
Hisilicon RoCE DT description
|
||||
|
||||
Hisilicon RoCE engine is a part of network subsystem.
|
||||
It works depending on other part of network wubsytem, such as, gmac and
|
||||
It works depending on other part of network subsystem, such as gmac and
|
||||
dsa fabric.
|
||||
|
||||
Additional properties are described here:
|
||||
|
|
|
@ -8226,7 +8226,6 @@ F: drivers/crypto/hisilicon/zip/
|
|||
|
||||
HISILICON ROCE DRIVER
|
||||
M: Lijun Ou <oulijun@huawei.com>
|
||||
M: Wei Hu(Xavier) <huwei87@hisilicon.com>
|
||||
M: Weihang Li <liweihang@huawei.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -15826,8 +15825,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-deve
|
|||
F: drivers/net/wireless/realtek/rtl8xxxu/
|
||||
|
||||
RTRS TRANSPORT DRIVERS
|
||||
M: Danil Kipnis <danil.kipnis@cloud.ionos.com>
|
||||
M: Jack Wang <jinpu.wang@cloud.ionos.com>
|
||||
M: Md. Haris Iqbal <haris.iqbal@ionos.com>
|
||||
M: Jack Wang <jinpu.wang@ionos.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/infiniband/ulp/rtrs/
|
||||
|
|
|
@ -121,7 +121,7 @@ struct ib_gid_table {
|
|||
u32 default_gid_indices;
|
||||
};
|
||||
|
||||
static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
|
||||
static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port)
|
||||
{
|
||||
struct ib_event event;
|
||||
|
||||
|
@ -197,7 +197,7 @@ int ib_cache_gid_parse_type_str(const char *buf)
|
|||
}
|
||||
EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
|
||||
|
||||
static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
|
||||
static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port)
|
||||
{
|
||||
return device->port_data[port].cache.gid;
|
||||
}
|
||||
|
@ -237,10 +237,10 @@ static void put_gid_ndev(struct rcu_head *head)
|
|||
static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
|
||||
{
|
||||
struct ib_device *device = entry->attr.device;
|
||||
u8 port_num = entry->attr.port_num;
|
||||
u32 port_num = entry->attr.port_num;
|
||||
struct ib_gid_table *table = rdma_gid_table(device, port_num);
|
||||
|
||||
dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__,
|
||||
dev_dbg(&device->dev, "%s port=%u index=%d gid %pI6\n", __func__,
|
||||
port_num, entry->attr.index, entry->attr.gid.raw);
|
||||
|
||||
write_lock_irq(&table->rwlock);
|
||||
|
@ -282,7 +282,7 @@ static void free_gid_work(struct work_struct *work)
|
|||
struct ib_gid_table_entry *entry =
|
||||
container_of(work, struct ib_gid_table_entry, del_work);
|
||||
struct ib_device *device = entry->attr.device;
|
||||
u8 port_num = entry->attr.port_num;
|
||||
u32 port_num = entry->attr.port_num;
|
||||
struct ib_gid_table *table = rdma_gid_table(device, port_num);
|
||||
|
||||
mutex_lock(&table->lock);
|
||||
|
@ -379,7 +379,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
|
|||
* @ix: GID entry index to delete
|
||||
*
|
||||
*/
|
||||
static void del_gid(struct ib_device *ib_dev, u8 port,
|
||||
static void del_gid(struct ib_device *ib_dev, u32 port,
|
||||
struct ib_gid_table *table, int ix)
|
||||
{
|
||||
struct roce_gid_ndev_storage *ndev_storage;
|
||||
|
@ -387,7 +387,7 @@ static void del_gid(struct ib_device *ib_dev, u8 port,
|
|||
|
||||
lockdep_assert_held(&table->lock);
|
||||
|
||||
dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port,
|
||||
dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port,
|
||||
ix, table->data_vec[ix]->attr.gid.raw);
|
||||
|
||||
write_lock_irq(&table->rwlock);
|
||||
|
@ -543,7 +543,7 @@ static void make_default_gid(struct net_device *dev, union ib_gid *gid)
|
|||
addrconf_ifid_eui48(&gid->raw[8], dev);
|
||||
}
|
||||
|
||||
static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
|
||||
static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
|
||||
union ib_gid *gid, struct ib_gid_attr *attr,
|
||||
unsigned long mask, bool default_gid)
|
||||
{
|
||||
|
@ -587,7 +587,7 @@ static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
|
||||
int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
|
||||
union ib_gid *gid, struct ib_gid_attr *attr)
|
||||
{
|
||||
unsigned long mask = GID_ATTR_FIND_MASK_GID |
|
||||
|
@ -598,7 +598,7 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
|
|||
}
|
||||
|
||||
static int
|
||||
_ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
|
||||
_ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
|
||||
union ib_gid *gid, struct ib_gid_attr *attr,
|
||||
unsigned long mask, bool default_gid)
|
||||
{
|
||||
|
@ -627,7 +627,7 @@ _ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
|
||||
int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
|
||||
union ib_gid *gid, struct ib_gid_attr *attr)
|
||||
{
|
||||
unsigned long mask = GID_ATTR_FIND_MASK_GID |
|
||||
|
@ -638,7 +638,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
|
|||
return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
|
||||
}
|
||||
|
||||
int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
|
||||
int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
struct ib_gid_table *table;
|
||||
|
@ -683,7 +683,7 @@ const struct ib_gid_attr *
|
|||
rdma_find_gid_by_port(struct ib_device *ib_dev,
|
||||
const union ib_gid *gid,
|
||||
enum ib_gid_type gid_type,
|
||||
u8 port, struct net_device *ndev)
|
||||
u32 port, struct net_device *ndev)
|
||||
{
|
||||
int local_index;
|
||||
struct ib_gid_table *table;
|
||||
|
@ -734,7 +734,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port);
|
|||
*
|
||||
*/
|
||||
const struct ib_gid_attr *rdma_find_gid_by_filter(
|
||||
struct ib_device *ib_dev, const union ib_gid *gid, u8 port,
|
||||
struct ib_device *ib_dev, const union ib_gid *gid, u32 port,
|
||||
bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
|
||||
void *),
|
||||
void *context)
|
||||
|
@ -818,7 +818,7 @@ static void release_gid_table(struct ib_device *device,
|
|||
kfree(table);
|
||||
}
|
||||
|
||||
static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
|
||||
static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port,
|
||||
struct ib_gid_table *table)
|
||||
{
|
||||
int i;
|
||||
|
@ -834,7 +834,7 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
|
|||
mutex_unlock(&table->lock);
|
||||
}
|
||||
|
||||
void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
|
||||
void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *ndev,
|
||||
unsigned long gid_type_mask,
|
||||
enum ib_cache_gid_default_mode mode)
|
||||
|
@ -867,7 +867,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
|
|||
}
|
||||
}
|
||||
|
||||
static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
|
||||
static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port,
|
||||
struct ib_gid_table *table)
|
||||
{
|
||||
unsigned int i;
|
||||
|
@ -884,7 +884,7 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
|
|||
|
||||
static void gid_table_release_one(struct ib_device *ib_dev)
|
||||
{
|
||||
unsigned int p;
|
||||
u32 p;
|
||||
|
||||
rdma_for_each_port (ib_dev, p) {
|
||||
release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
|
||||
|
@ -895,7 +895,7 @@ static void gid_table_release_one(struct ib_device *ib_dev)
|
|||
static int _gid_table_setup_one(struct ib_device *ib_dev)
|
||||
{
|
||||
struct ib_gid_table *table;
|
||||
unsigned int rdma_port;
|
||||
u32 rdma_port;
|
||||
|
||||
rdma_for_each_port (ib_dev, rdma_port) {
|
||||
table = alloc_gid_table(
|
||||
|
@ -915,7 +915,7 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
|
|||
|
||||
static void gid_table_cleanup_one(struct ib_device *ib_dev)
|
||||
{
|
||||
unsigned int p;
|
||||
u32 p;
|
||||
|
||||
rdma_for_each_port (ib_dev, p)
|
||||
cleanup_gid_table_port(ib_dev, p,
|
||||
|
@ -950,7 +950,7 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
|
|||
* Returns 0 on success or appropriate error code.
|
||||
*
|
||||
*/
|
||||
int rdma_query_gid(struct ib_device *device, u8 port_num,
|
||||
int rdma_query_gid(struct ib_device *device, u32 port_num,
|
||||
int index, union ib_gid *gid)
|
||||
{
|
||||
struct ib_gid_table *table;
|
||||
|
@ -1014,7 +1014,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
|
|||
unsigned long mask = GID_ATTR_FIND_MASK_GID |
|
||||
GID_ATTR_FIND_MASK_GID_TYPE;
|
||||
struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
|
||||
unsigned int p;
|
||||
u32 p;
|
||||
|
||||
if (ndev)
|
||||
mask |= GID_ATTR_FIND_MASK_NETDEV;
|
||||
|
@ -1043,7 +1043,7 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
|
|||
EXPORT_SYMBOL(rdma_find_gid);
|
||||
|
||||
int ib_get_cached_pkey(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
int index,
|
||||
u16 *pkey)
|
||||
{
|
||||
|
@ -1069,9 +1069,8 @@ int ib_get_cached_pkey(struct ib_device *device,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_get_cached_pkey);
|
||||
|
||||
int ib_get_cached_subnet_prefix(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u64 *sn_pfx)
|
||||
int ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
|
||||
u64 *sn_pfx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -1086,10 +1085,8 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
|
||||
|
||||
int ib_find_cached_pkey(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u16 pkey,
|
||||
u16 *index)
|
||||
int ib_find_cached_pkey(struct ib_device *device, u32 port_num,
|
||||
u16 pkey, u16 *index)
|
||||
{
|
||||
struct ib_pkey_cache *cache;
|
||||
unsigned long flags;
|
||||
|
@ -1116,8 +1113,9 @@ int ib_find_cached_pkey(struct ib_device *device,
|
|||
*index = i;
|
||||
ret = 0;
|
||||
break;
|
||||
} else
|
||||
} else {
|
||||
partial_ix = i;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret && partial_ix >= 0) {
|
||||
|
@ -1132,10 +1130,8 @@ int ib_find_cached_pkey(struct ib_device *device,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_find_cached_pkey);
|
||||
|
||||
int ib_find_exact_cached_pkey(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u16 pkey,
|
||||
u16 *index)
|
||||
int ib_find_exact_cached_pkey(struct ib_device *device, u32 port_num,
|
||||
u16 pkey, u16 *index)
|
||||
{
|
||||
struct ib_pkey_cache *cache;
|
||||
unsigned long flags;
|
||||
|
@ -1169,9 +1165,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_find_exact_cached_pkey);
|
||||
|
||||
int ib_get_cached_lmc(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u8 *lmc)
|
||||
int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
@ -1187,8 +1181,7 @@ int ib_get_cached_lmc(struct ib_device *device,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_get_cached_lmc);
|
||||
|
||||
int ib_get_cached_port_state(struct ib_device *device,
|
||||
u8 port_num,
|
||||
int ib_get_cached_port_state(struct ib_device *device, u32 port_num,
|
||||
enum ib_port_state *port_state)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -1222,7 +1215,7 @@ EXPORT_SYMBOL(ib_get_cached_port_state);
|
|||
* code.
|
||||
*/
|
||||
const struct ib_gid_attr *
|
||||
rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
|
||||
rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index)
|
||||
{
|
||||
const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
|
||||
struct ib_gid_table *table;
|
||||
|
@ -1263,7 +1256,7 @@ ssize_t rdma_query_gid_table(struct ib_device *device,
|
|||
const struct ib_gid_attr *gid_attr;
|
||||
ssize_t num_entries = 0, ret;
|
||||
struct ib_gid_table *table;
|
||||
unsigned int port_num, i;
|
||||
u32 port_num, i;
|
||||
struct net_device *ndev;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -1361,7 +1354,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
|
|||
container_of(attr, struct ib_gid_table_entry, attr);
|
||||
struct ib_device *device = entry->attr.device;
|
||||
struct net_device *ndev = ERR_PTR(-EINVAL);
|
||||
u8 port_num = entry->attr.port_num;
|
||||
u32 port_num = entry->attr.port_num;
|
||||
struct ib_gid_table *table;
|
||||
unsigned long flags;
|
||||
bool valid;
|
||||
|
@ -1441,7 +1434,7 @@ int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
|
|||
EXPORT_SYMBOL(rdma_read_gid_l2_fields);
|
||||
|
||||
static int config_non_roce_gid_cache(struct ib_device *device,
|
||||
u8 port, int gid_tbl_len)
|
||||
u32 port, int gid_tbl_len)
|
||||
{
|
||||
struct ib_gid_attr gid_attr = {};
|
||||
struct ib_gid_table *table;
|
||||
|
@ -1472,7 +1465,7 @@ static int config_non_roce_gid_cache(struct ib_device *device,
|
|||
}
|
||||
|
||||
static int
|
||||
ib_cache_update(struct ib_device *device, u8 port, bool enforce_security)
|
||||
ib_cache_update(struct ib_device *device, u32 port, bool enforce_security)
|
||||
{
|
||||
struct ib_port_attr *tprops = NULL;
|
||||
struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
|
||||
|
@ -1621,7 +1614,7 @@ EXPORT_SYMBOL(ib_dispatch_event);
|
|||
|
||||
int ib_cache_setup_one(struct ib_device *device)
|
||||
{
|
||||
unsigned int p;
|
||||
u32 p;
|
||||
int err;
|
||||
|
||||
rwlock_init(&device->cache_lock);
|
||||
|
@ -1641,7 +1634,7 @@ int ib_cache_setup_one(struct ib_device *device)
|
|||
|
||||
void ib_cache_release_one(struct ib_device *device)
|
||||
{
|
||||
unsigned int p;
|
||||
u32 p;
|
||||
|
||||
/*
|
||||
* The release function frees all the cache elements.
|
||||
|
|
|
@ -202,7 +202,7 @@ static struct attribute *cm_counter_default_attrs[] = {
|
|||
struct cm_port {
|
||||
struct cm_device *cm_dev;
|
||||
struct ib_mad_agent *mad_agent;
|
||||
u8 port_num;
|
||||
u32 port_num;
|
||||
struct list_head cm_priv_prim_list;
|
||||
struct list_head cm_priv_altr_list;
|
||||
struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
|
||||
|
@ -255,7 +255,8 @@ struct cm_id_private {
|
|||
struct completion comp;
|
||||
refcount_t refcount;
|
||||
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
|
||||
* Protected by the cm.lock spinlock. */
|
||||
* Protected by the cm.lock spinlock.
|
||||
*/
|
||||
int listen_sharecount;
|
||||
struct rcu_head rcu;
|
||||
|
||||
|
@ -420,8 +421,7 @@ static int cm_alloc_response_msg(struct cm_port *port,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void * cm_copy_private_data(const void *private_data,
|
||||
u8 private_data_len)
|
||||
static void *cm_copy_private_data(const void *private_data, u8 private_data_len)
|
||||
{
|
||||
void *data;
|
||||
|
||||
|
@ -680,8 +680,8 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
|
|||
return cm_id_priv;
|
||||
}
|
||||
|
||||
static struct cm_id_private * cm_find_listen(struct ib_device *device,
|
||||
__be64 service_id)
|
||||
static struct cm_id_private *cm_find_listen(struct ib_device *device,
|
||||
__be64 service_id)
|
||||
{
|
||||
struct rb_node *node = cm.listen_service_table.rb_node;
|
||||
struct cm_id_private *cm_id_priv;
|
||||
|
@ -708,8 +708,8 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
|
||||
*timewait_info)
|
||||
static struct cm_timewait_info *
|
||||
cm_insert_remote_id(struct cm_timewait_info *timewait_info)
|
||||
{
|
||||
struct rb_node **link = &cm.remote_id_table.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
|
@ -767,8 +767,8 @@ static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
|
|||
return res;
|
||||
}
|
||||
|
||||
static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
|
||||
*timewait_info)
|
||||
static struct cm_timewait_info *
|
||||
cm_insert_remote_qpn(struct cm_timewait_info *timewait_info)
|
||||
{
|
||||
struct rb_node **link = &cm.remote_qp_table.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
|
@ -797,8 +797,8 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
|
||||
*cm_id_priv)
|
||||
static struct cm_id_private *
|
||||
cm_insert_remote_sidr(struct cm_id_private *cm_id_priv)
|
||||
{
|
||||
struct rb_node **link = &cm.remote_sidr_table.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
|
@ -897,7 +897,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_create_cm_id);
|
||||
|
||||
static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
|
||||
static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv)
|
||||
{
|
||||
struct cm_work *work;
|
||||
|
||||
|
@ -986,7 +986,7 @@ static void cm_remove_remote(struct cm_id_private *cm_id_priv)
|
|||
}
|
||||
}
|
||||
|
||||
static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
|
||||
static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id)
|
||||
{
|
||||
struct cm_timewait_info *timewait_info;
|
||||
|
||||
|
@ -1631,7 +1631,7 @@ static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
|
|||
req_msg))));
|
||||
}
|
||||
|
||||
static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
|
||||
static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
|
||||
struct sa_path_rec *path, union ib_gid *gid)
|
||||
{
|
||||
if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
|
||||
|
@ -1750,7 +1750,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
|
|||
static u16 cm_get_bth_pkey(struct cm_work *work)
|
||||
{
|
||||
struct ib_device *ib_dev = work->port->cm_dev->ib_device;
|
||||
u8 port_num = work->port->port_num;
|
||||
u32 port_num = work->port->port_num;
|
||||
u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
|
||||
u16 pkey;
|
||||
int ret;
|
||||
|
@ -1778,7 +1778,7 @@ static void cm_opa_to_ib_sgid(struct cm_work *work,
|
|||
struct sa_path_rec *path)
|
||||
{
|
||||
struct ib_device *dev = work->port->cm_dev->ib_device;
|
||||
u8 port_num = work->port->port_num;
|
||||
u32 port_num = work->port->port_num;
|
||||
|
||||
if (rdma_cap_opa_ah(dev, port_num) &&
|
||||
(ib_is_opa_gid(&path->sgid))) {
|
||||
|
@ -1977,8 +1977,8 @@ unlock: spin_unlock_irq(&cm_id_priv->lock);
|
|||
free: cm_free_msg(msg);
|
||||
}
|
||||
|
||||
static struct cm_id_private * cm_match_req(struct cm_work *work,
|
||||
struct cm_id_private *cm_id_priv)
|
||||
static struct cm_id_private *cm_match_req(struct cm_work *work,
|
||||
struct cm_id_private *cm_id_priv)
|
||||
{
|
||||
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
|
||||
struct cm_timewait_info *timewait_info;
|
||||
|
@ -2138,20 +2138,17 @@ static int cm_req_handler(struct cm_work *work)
|
|||
goto destroy;
|
||||
}
|
||||
|
||||
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
|
||||
|
||||
memset(&work->path[0], 0, sizeof(work->path[0]));
|
||||
if (cm_req_has_alt_path(req_msg))
|
||||
memset(&work->path[1], 0, sizeof(work->path[1]));
|
||||
grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
|
||||
gid_attr = grh->sgid_attr;
|
||||
|
||||
if (gid_attr &&
|
||||
rdma_protocol_roce(work->port->cm_dev->ib_device,
|
||||
work->port->port_num)) {
|
||||
if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) {
|
||||
work->path[0].rec_type =
|
||||
sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
|
||||
} else {
|
||||
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
|
||||
cm_path_set_rec_type(
|
||||
work->port->cm_dev->ib_device, work->port->port_num,
|
||||
&work->path[0],
|
||||
|
@ -2993,7 +2990,7 @@ static void cm_format_rej_event(struct cm_work *work)
|
|||
IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
|
||||
}
|
||||
|
||||
static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
|
||||
static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
|
||||
{
|
||||
struct cm_id_private *cm_id_priv;
|
||||
__be32 remote_id;
|
||||
|
@ -3098,7 +3095,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
|
|||
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
||||
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
switch(cm_id_priv->id.state) {
|
||||
switch (cm_id_priv->id.state) {
|
||||
case IB_CM_REQ_RCVD:
|
||||
cm_state = IB_CM_MRA_REQ_SENT;
|
||||
lap_state = cm_id->lap_state;
|
||||
|
@ -3155,7 +3152,7 @@ error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
|||
}
|
||||
EXPORT_SYMBOL(ib_send_cm_mra);
|
||||
|
||||
static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
|
||||
static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
|
||||
{
|
||||
switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
|
||||
case CM_MSG_RESPONSE_REQ:
|
||||
|
@ -3917,8 +3914,7 @@ static int cm_establish(struct ib_cm_id *cm_id)
|
|||
|
||||
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
switch (cm_id->state)
|
||||
{
|
||||
switch (cm_id->state) {
|
||||
case IB_CM_REP_SENT:
|
||||
case IB_CM_MRA_REP_RCVD:
|
||||
cm_id->state = IB_CM_ESTABLISHED;
|
||||
|
@ -4334,7 +4330,7 @@ static int cm_add_one(struct ib_device *ib_device)
|
|||
unsigned long flags;
|
||||
int ret;
|
||||
int count = 0;
|
||||
unsigned int i;
|
||||
u32 i;
|
||||
|
||||
cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
|
||||
GFP_KERNEL);
|
||||
|
@ -4432,7 +4428,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
|
|||
.clr_port_cap_mask = IB_PORT_CM_SUP
|
||||
};
|
||||
unsigned long flags;
|
||||
unsigned int i;
|
||||
u32 i;
|
||||
|
||||
write_lock_irqsave(&cm.device_lock, flags);
|
||||
list_del(&cm_dev->list);
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
|
||||
{
|
||||
u8 transport_type = IBA_GET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg);
|
||||
switch(transport_type) {
|
||||
switch (transport_type) {
|
||||
case 0: return IB_QPT_RC;
|
||||
case 1: return IB_QPT_UC;
|
||||
case 3:
|
||||
|
@ -37,7 +37,7 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
|
|||
static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
|
||||
enum ib_qp_type qp_type)
|
||||
{
|
||||
switch(qp_type) {
|
||||
switch (qp_type) {
|
||||
case IB_QPT_UC:
|
||||
IBA_SET(CM_REQ_TRANSPORT_SERVICE_TYPE, req_msg, 1);
|
||||
break;
|
||||
|
|
|
@ -43,7 +43,6 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent");
|
|||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
#define CMA_CM_RESPONSE_TIMEOUT 20
|
||||
#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
|
||||
#define CMA_MAX_CM_RETRIES 15
|
||||
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
|
||||
#define CMA_IBOE_PACKET_LIFETIME 18
|
||||
|
@ -219,14 +218,6 @@ struct rdma_bind_list {
|
|||
unsigned short port;
|
||||
};
|
||||
|
||||
struct class_port_info_context {
|
||||
struct ib_class_port_info *class_port_info;
|
||||
struct ib_device *device;
|
||||
struct completion done;
|
||||
struct ib_sa_query *sa_query;
|
||||
u8 port_num;
|
||||
};
|
||||
|
||||
static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
|
||||
struct rdma_bind_list *bind_list, int snum)
|
||||
{
|
||||
|
@ -287,7 +278,7 @@ struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
|
|||
}
|
||||
|
||||
int cma_get_default_gid_type(struct cma_device *cma_dev,
|
||||
unsigned int port)
|
||||
u32 port)
|
||||
{
|
||||
if (!rdma_is_port_valid(cma_dev->device, port))
|
||||
return -EINVAL;
|
||||
|
@ -296,7 +287,7 @@ int cma_get_default_gid_type(struct cma_device *cma_dev,
|
|||
}
|
||||
|
||||
int cma_set_default_gid_type(struct cma_device *cma_dev,
|
||||
unsigned int port,
|
||||
u32 port,
|
||||
enum ib_gid_type default_gid_type)
|
||||
{
|
||||
unsigned long supported_gids;
|
||||
|
@ -319,7 +310,7 @@ int cma_set_default_gid_type(struct cma_device *cma_dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port)
|
||||
int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port)
|
||||
{
|
||||
if (!rdma_is_port_valid(cma_dev->device, port))
|
||||
return -EINVAL;
|
||||
|
@ -327,7 +318,7 @@ int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port)
|
|||
return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];
|
||||
}
|
||||
|
||||
int cma_set_default_roce_tos(struct cma_device *cma_dev, unsigned int port,
|
||||
int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port,
|
||||
u8 default_roce_tos)
|
||||
{
|
||||
if (!rdma_is_port_valid(cma_dev->device, port))
|
||||
|
@ -463,7 +454,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
|
|||
id_priv->id.route.addr.dev_addr.transport =
|
||||
rdma_node_get_transport(cma_dev->device->node_type);
|
||||
list_add_tail(&id_priv->list, &cma_dev->id_list);
|
||||
rdma_restrack_add(&id_priv->res);
|
||||
|
||||
trace_cm_id_attach(id_priv, cma_dev->device);
|
||||
}
|
||||
|
@ -562,7 +552,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
|
|||
}
|
||||
|
||||
static const struct ib_gid_attr *
|
||||
cma_validate_port(struct ib_device *device, u8 port,
|
||||
cma_validate_port(struct ib_device *device, u32 port,
|
||||
enum ib_gid_type gid_type,
|
||||
union ib_gid *gid,
|
||||
struct rdma_id_private *id_priv)
|
||||
|
@ -620,7 +610,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
|
|||
struct cma_device *cma_dev;
|
||||
enum ib_gid_type gid_type;
|
||||
int ret = -ENODEV;
|
||||
unsigned int port;
|
||||
u32 port;
|
||||
|
||||
if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
|
||||
id_priv->id.ps == RDMA_PS_IPOIB)
|
||||
|
@ -700,6 +690,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
|
|||
mutex_lock(&lock);
|
||||
cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
|
||||
mutex_unlock(&lock);
|
||||
rdma_restrack_add(&id_priv->res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -711,8 +702,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
|
|||
struct cma_device *cma_dev;
|
||||
enum ib_gid_type gid_type;
|
||||
int ret = -ENODEV;
|
||||
unsigned int port;
|
||||
union ib_gid gid;
|
||||
u32 port;
|
||||
|
||||
if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
|
||||
id_priv->id.ps == RDMA_PS_IPOIB)
|
||||
|
@ -754,8 +745,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
|
|||
}
|
||||
|
||||
out:
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
cma_attach_to_dev(id_priv, cma_dev);
|
||||
rdma_restrack_add(&id_priv->res);
|
||||
}
|
||||
|
||||
mutex_unlock(&lock);
|
||||
return ret;
|
||||
|
@ -816,6 +809,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
|
|||
|
||||
found:
|
||||
cma_attach_to_dev(id_priv, cma_dev);
|
||||
rdma_restrack_add(&id_priv->res);
|
||||
mutex_unlock(&lock);
|
||||
addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
|
||||
memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
|
||||
|
@ -852,6 +846,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
|
|||
id_priv->id.qp_type = qp_type;
|
||||
id_priv->tos_set = false;
|
||||
id_priv->timeout_set = false;
|
||||
id_priv->min_rnr_timer_set = false;
|
||||
id_priv->gid_type = IB_GID_TYPE_IB;
|
||||
spin_lock_init(&id_priv->lock);
|
||||
mutex_init(&id_priv->qp_mutex);
|
||||
|
@ -1135,12 +1130,16 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
|
|||
qp_attr_mask);
|
||||
qp_attr->port_num = id_priv->id.port_num;
|
||||
*qp_attr_mask |= IB_QP_PORT;
|
||||
} else
|
||||
} else {
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
|
||||
qp_attr->timeout = id_priv->timeout;
|
||||
|
||||
if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set)
|
||||
qp_attr->min_rnr_timer = id_priv->min_rnr_timer;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_init_qp_attr);
|
||||
|
@ -1581,7 +1580,7 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv,
|
|||
static bool cma_protocol_roce(const struct rdma_cm_id *id)
|
||||
{
|
||||
struct ib_device *device = id->device;
|
||||
const int port_num = id->port_num ?: rdma_start_port(device);
|
||||
const u32 port_num = id->port_num ?: rdma_start_port(device);
|
||||
|
||||
return rdma_protocol_roce(device, port_num);
|
||||
}
|
||||
|
@ -2474,6 +2473,7 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
|
|||
|
||||
id->tos = id_priv->tos;
|
||||
id->tos_set = id_priv->tos_set;
|
||||
id->afonly = id_priv->afonly;
|
||||
id_priv->cm_id.iw = id;
|
||||
|
||||
memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
|
||||
|
@ -2529,6 +2529,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
|
|||
rdma_addr_size(cma_src_addr(id_priv)));
|
||||
|
||||
_cma_attach_to_dev(dev_id_priv, cma_dev);
|
||||
rdma_restrack_add(&dev_id_priv->res);
|
||||
cma_id_get(id_priv);
|
||||
dev_id_priv->internal_id = 1;
|
||||
dev_id_priv->afonly = id_priv->afonly;
|
||||
|
@ -2615,6 +2616,43 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
|
|||
}
|
||||
EXPORT_SYMBOL(rdma_set_ack_timeout);
|
||||
|
||||
/**
|
||||
* rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
|
||||
* QP associated with a connection identifier.
|
||||
* @id: Communication identifier to associated with service type.
|
||||
* @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
|
||||
* Timer Field" in the IBTA specification.
|
||||
*
|
||||
* This function should be called before rdma_connect() on active
|
||||
* side, and on passive side before rdma_accept(). The timer value
|
||||
* will be associated with the local QP. When it receives a send it is
|
||||
* not read to handle, typically if the receive queue is empty, an RNR
|
||||
* Retry NAK is returned to the requester with the min_rnr_timer
|
||||
* encoded. The requester will then wait at least the time specified
|
||||
* in the NAK before retrying. The default is zero, which translates
|
||||
* to a minimum RNR Timer value of 655 ms.
|
||||
*
|
||||
* Return: 0 for success
|
||||
*/
|
||||
int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
|
||||
{
|
||||
struct rdma_id_private *id_priv;
|
||||
|
||||
/* It is a five-bit value */
|
||||
if (min_rnr_timer & 0xe0)
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT))
|
||||
return -EINVAL;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
id_priv->min_rnr_timer = min_rnr_timer;
|
||||
id_priv->min_rnr_timer_set = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_set_min_rnr_timer);
|
||||
|
||||
static void cma_query_handler(int status, struct sa_path_rec *path_rec,
|
||||
void *context)
|
||||
{
|
||||
|
@ -3169,6 +3207,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
|
|||
ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
|
||||
id_priv->id.port_num = p;
|
||||
cma_attach_to_dev(id_priv, cma_dev);
|
||||
rdma_restrack_add(&id_priv->res);
|
||||
cma_set_loopback(cma_src_addr(id_priv));
|
||||
out:
|
||||
mutex_unlock(&lock);
|
||||
|
@ -3201,6 +3240,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
|
|||
if (status)
|
||||
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
|
||||
status);
|
||||
rdma_restrack_add(&id_priv->res);
|
||||
} else if (status) {
|
||||
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
|
||||
}
|
||||
|
@ -3812,6 +3852,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
|
|||
if (ret)
|
||||
goto err2;
|
||||
|
||||
if (!cma_any_addr(addr))
|
||||
rdma_restrack_add(&id_priv->res);
|
||||
return 0;
|
||||
err2:
|
||||
if (id_priv->cma_dev)
|
||||
|
@ -4124,10 +4166,11 @@ int rdma_connect_locked(struct rdma_cm_id *id,
|
|||
ret = cma_resolve_ib_udp(id_priv, conn_param);
|
||||
else
|
||||
ret = cma_connect_ib(id_priv, conn_param);
|
||||
} else if (rdma_cap_iw_cm(id->device, id->port_num))
|
||||
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
|
||||
ret = cma_connect_iw(id_priv, conn_param);
|
||||
else
|
||||
} else {
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
if (ret)
|
||||
goto err_state;
|
||||
return 0;
|
||||
|
@ -4234,9 +4277,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
|
|||
iw_param.ird = conn_param->responder_resources;
|
||||
iw_param.private_data = conn_param->private_data;
|
||||
iw_param.private_data_len = conn_param->private_data_len;
|
||||
if (id_priv->id.qp) {
|
||||
if (id_priv->id.qp)
|
||||
iw_param.qpn = id_priv->qp_num;
|
||||
} else
|
||||
else
|
||||
iw_param.qpn = conn_param->qp_num;
|
||||
|
||||
return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
|
||||
|
@ -4319,11 +4362,11 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|||
else
|
||||
ret = cma_rep_recv(id_priv);
|
||||
}
|
||||
} else if (rdma_cap_iw_cm(id->device, id->port_num))
|
||||
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
|
||||
ret = cma_accept_iw(id_priv, conn_param);
|
||||
else
|
||||
} else {
|
||||
ret = -ENOSYS;
|
||||
|
||||
}
|
||||
if (ret)
|
||||
goto reject;
|
||||
|
||||
|
@ -4409,8 +4452,9 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
|
|||
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
|
||||
ret = iw_cm_reject(id_priv->cm_id.iw,
|
||||
private_data, private_data_len);
|
||||
} else
|
||||
} else {
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -4864,14 +4908,28 @@ static void cma_process_remove(struct cma_device *cma_dev)
|
|||
wait_for_completion(&cma_dev->comp);
|
||||
}
|
||||
|
||||
static bool cma_supported(struct ib_device *device)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
rdma_for_each_port(device, i) {
|
||||
if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int cma_add_one(struct ib_device *device)
|
||||
{
|
||||
struct rdma_id_private *to_destroy;
|
||||
struct cma_device *cma_dev;
|
||||
struct rdma_id_private *id_priv;
|
||||
unsigned int i;
|
||||
unsigned long supported_gids = 0;
|
||||
int ret;
|
||||
u32 i;
|
||||
|
||||
if (!cma_supported(device))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);
|
||||
if (!cma_dev)
|
||||
|
|
|
@ -43,7 +43,7 @@ struct cma_device;
|
|||
struct cma_dev_group;
|
||||
|
||||
struct cma_dev_port_group {
|
||||
unsigned int port_num;
|
||||
u32 port_num;
|
||||
struct cma_dev_group *cma_dev_group;
|
||||
struct config_group group;
|
||||
};
|
||||
|
@ -200,10 +200,10 @@ static const struct config_item_type cma_port_group_type = {
|
|||
static int make_cma_ports(struct cma_dev_group *cma_dev_group,
|
||||
struct cma_device *cma_dev)
|
||||
{
|
||||
struct ib_device *ibdev;
|
||||
unsigned int i;
|
||||
unsigned int ports_num;
|
||||
struct cma_dev_port_group *ports;
|
||||
struct ib_device *ibdev;
|
||||
u32 ports_num;
|
||||
u32 i;
|
||||
|
||||
ibdev = cma_get_ib_dev(cma_dev);
|
||||
|
||||
|
|
|
@ -86,9 +86,11 @@ struct rdma_id_private {
|
|||
u8 tos;
|
||||
u8 tos_set:1;
|
||||
u8 timeout_set:1;
|
||||
u8 min_rnr_timer_set:1;
|
||||
u8 reuseaddr;
|
||||
u8 afonly;
|
||||
u8 timeout;
|
||||
u8 min_rnr_timer;
|
||||
enum ib_gid_type gid_type;
|
||||
|
||||
/*
|
||||
|
@ -117,11 +119,11 @@ void cma_dev_put(struct cma_device *dev);
|
|||
typedef bool (*cma_device_filter)(struct ib_device *, void *);
|
||||
struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
|
||||
void *cookie);
|
||||
int cma_get_default_gid_type(struct cma_device *dev, unsigned int port);
|
||||
int cma_set_default_gid_type(struct cma_device *dev, unsigned int port,
|
||||
int cma_get_default_gid_type(struct cma_device *dev, u32 port);
|
||||
int cma_set_default_gid_type(struct cma_device *dev, u32 port,
|
||||
enum ib_gid_type default_gid_type);
|
||||
int cma_get_default_roce_tos(struct cma_device *dev, unsigned int port);
|
||||
int cma_set_default_roce_tos(struct cma_device *dev, unsigned int port,
|
||||
int cma_get_default_roce_tos(struct cma_device *dev, u32 port);
|
||||
int cma_set_default_roce_tos(struct cma_device *dev, u32 port,
|
||||
u8 default_roce_tos);
|
||||
struct ib_device *cma_get_ib_dev(struct cma_device *dev);
|
||||
|
||||
|
|
|
@ -83,14 +83,14 @@ void ib_device_unregister_sysfs(struct ib_device *device);
|
|||
int ib_device_rename(struct ib_device *ibdev, const char *name);
|
||||
int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim);
|
||||
|
||||
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
|
||||
typedef void (*roce_netdev_callback)(struct ib_device *device, u32 port,
|
||||
struct net_device *idev, void *cookie);
|
||||
|
||||
typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port,
|
||||
typedef bool (*roce_netdev_filter)(struct ib_device *device, u32 port,
|
||||
struct net_device *idev, void *cookie);
|
||||
|
||||
struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
|
||||
unsigned int port);
|
||||
u32 port);
|
||||
|
||||
void ib_enum_roce_netdev(struct ib_device *ib_dev,
|
||||
roce_netdev_filter filter,
|
||||
|
@ -113,7 +113,7 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
|
|||
struct ib_client_nl_info {
|
||||
struct sk_buff *nl_msg;
|
||||
struct device *cdev;
|
||||
unsigned int port;
|
||||
u32 port;
|
||||
u64 abi;
|
||||
};
|
||||
int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
|
||||
|
@ -128,24 +128,24 @@ int ib_cache_gid_parse_type_str(const char *buf);
|
|||
|
||||
const char *ib_cache_gid_type_str(enum ib_gid_type gid_type);
|
||||
|
||||
void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
|
||||
void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *ndev,
|
||||
unsigned long gid_type_mask,
|
||||
enum ib_cache_gid_default_mode mode);
|
||||
|
||||
int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
|
||||
int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
|
||||
union ib_gid *gid, struct ib_gid_attr *attr);
|
||||
|
||||
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
|
||||
int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
|
||||
union ib_gid *gid, struct ib_gid_attr *attr);
|
||||
|
||||
int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
|
||||
int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *ndev);
|
||||
|
||||
int roce_gid_mgmt_init(void);
|
||||
void roce_gid_mgmt_cleanup(void);
|
||||
|
||||
unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
|
||||
unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port);
|
||||
|
||||
int ib_cache_setup_one(struct ib_device *device);
|
||||
void ib_cache_cleanup_one(struct ib_device *device);
|
||||
|
@ -215,14 +215,14 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
|
|||
struct netlink_ext_ack *extack);
|
||||
|
||||
int ib_get_cached_subnet_prefix(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u64 *sn_pfx);
|
||||
u32 port_num,
|
||||
u64 *sn_pfx);
|
||||
|
||||
#ifdef CONFIG_SECURITY_INFINIBAND
|
||||
void ib_security_release_port_pkey_list(struct ib_device *device);
|
||||
|
||||
void ib_security_cache_change(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
u64 subnet_prefix);
|
||||
|
||||
int ib_security_modify_qp(struct ib_qp *qp,
|
||||
|
@ -247,7 +247,7 @@ static inline void ib_security_release_port_pkey_list(struct ib_device *device)
|
|||
}
|
||||
|
||||
static inline void ib_security_cache_change(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
u64 subnet_prefix)
|
||||
{
|
||||
}
|
||||
|
@ -381,7 +381,7 @@ int ib_setup_port_attrs(struct ib_core_device *coredev);
|
|||
|
||||
int rdma_compatdev_set(u8 enable);
|
||||
|
||||
int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
|
||||
int ib_port_register_module_stat(struct ib_device *device, u32 port_num,
|
||||
struct kobject *kobj, struct kobj_type *ktype,
|
||||
const char *name);
|
||||
void ib_port_unregister_module_stat(struct kobject *kobj);
|
||||
|
|
|
@ -14,10 +14,12 @@ static int __counter_set_mode(struct rdma_port_counter *port_counter,
|
|||
enum rdma_nl_counter_mode new_mode,
|
||||
enum rdma_nl_counter_mask new_mask)
|
||||
{
|
||||
if (new_mode == RDMA_COUNTER_MODE_AUTO && port_counter->num_counters)
|
||||
if (new_mask & ~ALL_AUTO_MODE_MASKS ||
|
||||
port_counter->mode.mode != RDMA_COUNTER_MODE_NONE)
|
||||
if (new_mode == RDMA_COUNTER_MODE_AUTO) {
|
||||
if (new_mask & (~ALL_AUTO_MODE_MASKS))
|
||||
return -EINVAL;
|
||||
if (port_counter->num_counters)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
port_counter->mode.mode = new_mode;
|
||||
port_counter->mode.mask = new_mask;
|
||||
|
@ -32,14 +34,17 @@ static int __counter_set_mode(struct rdma_port_counter *port_counter,
|
|||
* @mask: Mask to configure
|
||||
* @extack: Message to the user
|
||||
*
|
||||
* Return 0 on success.
|
||||
* Return 0 on success. If counter mode wasn't changed then it is considered
|
||||
* as success as well.
|
||||
* Return -EBUSY when changing to auto mode while there are bounded counters.
|
||||
*
|
||||
*/
|
||||
int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
|
||||
int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
|
||||
enum rdma_nl_counter_mask mask,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
enum rdma_nl_counter_mode mode = RDMA_COUNTER_MODE_AUTO;
|
||||
struct rdma_port_counter *port_counter;
|
||||
enum rdma_nl_counter_mode mode;
|
||||
int ret;
|
||||
|
||||
port_counter = &dev->port_data[port].port_counter;
|
||||
|
@ -47,25 +52,26 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
mutex_lock(&port_counter->lock);
|
||||
if (mask) {
|
||||
ret = __counter_set_mode(port_counter, mode, mask);
|
||||
if (ret)
|
||||
NL_SET_ERR_MSG(
|
||||
extack,
|
||||
"Turning on auto mode is not allowed when there is bound QP");
|
||||
if (mask)
|
||||
mode = RDMA_COUNTER_MODE_AUTO;
|
||||
else
|
||||
mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL :
|
||||
RDMA_COUNTER_MODE_NONE;
|
||||
|
||||
if (port_counter->mode.mode == mode &&
|
||||
port_counter->mode.mask == mask) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
ret = __counter_set_mode(port_counter, mode, mask);
|
||||
|
||||
mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL :
|
||||
RDMA_COUNTER_MODE_NONE;
|
||||
ret = __counter_set_mode(port_counter, mode, 0);
|
||||
out:
|
||||
mutex_unlock(&port_counter->lock);
|
||||
if (ret == -EBUSY)
|
||||
NL_SET_ERR_MSG(
|
||||
extack,
|
||||
"Modifying auto mode is not allowed when there is a bound QP");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -100,7 +106,7 @@ static int __rdma_counter_bind_qp(struct rdma_counter *counter,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u8 port,
|
||||
static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
|
||||
struct ib_qp *qp,
|
||||
enum rdma_nl_counter_mode mode)
|
||||
{
|
||||
|
@ -238,7 +244,7 @@ static void counter_history_stat_update(struct rdma_counter *counter)
|
|||
* Return: The counter (with ref-count increased) if found
|
||||
*/
|
||||
static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp,
|
||||
u8 port)
|
||||
u32 port)
|
||||
{
|
||||
struct rdma_port_counter *port_counter;
|
||||
struct rdma_counter *counter = NULL;
|
||||
|
@ -282,7 +288,7 @@ static void counter_release(struct kref *kref)
|
|||
* rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on
|
||||
* the auto-mode rule
|
||||
*/
|
||||
int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port)
|
||||
int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port)
|
||||
{
|
||||
struct rdma_port_counter *port_counter;
|
||||
struct ib_device *dev = qp->device;
|
||||
|
@ -352,7 +358,7 @@ int rdma_counter_query_stats(struct rdma_counter *counter)
|
|||
}
|
||||
|
||||
static u64 get_running_counters_hwstat_sum(struct ib_device *dev,
|
||||
u8 port, u32 index)
|
||||
u32 port, u32 index)
|
||||
{
|
||||
struct rdma_restrack_entry *res;
|
||||
struct rdma_restrack_root *rt;
|
||||
|
@ -388,7 +394,7 @@ static u64 get_running_counters_hwstat_sum(struct ib_device *dev,
|
|||
* rdma_counter_get_hwstat_value() - Get the sum value of all counters on a
|
||||
* specific port, including the running ones and history data
|
||||
*/
|
||||
u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
|
||||
u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index)
|
||||
{
|
||||
struct rdma_port_counter *port_counter;
|
||||
u64 sum;
|
||||
|
@ -443,7 +449,7 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
|
|||
/*
|
||||
* rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id
|
||||
*/
|
||||
int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
|
||||
int rdma_counter_bind_qpn(struct ib_device *dev, u32 port,
|
||||
u32 qp_num, u32 counter_id)
|
||||
{
|
||||
struct rdma_port_counter *port_counter;
|
||||
|
@ -493,7 +499,7 @@ int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
|
|||
* rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it
|
||||
* The id of new counter is returned in @counter_id
|
||||
*/
|
||||
int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
|
||||
int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port,
|
||||
u32 qp_num, u32 *counter_id)
|
||||
{
|
||||
struct rdma_port_counter *port_counter;
|
||||
|
@ -540,7 +546,7 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
|
|||
/*
|
||||
* rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter
|
||||
*/
|
||||
int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port,
|
||||
int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port,
|
||||
u32 qp_num, u32 counter_id)
|
||||
{
|
||||
struct rdma_port_counter *port_counter;
|
||||
|
@ -573,7 +579,7 @@ int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int rdma_counter_get_mode(struct ib_device *dev, u8 port,
|
||||
int rdma_counter_get_mode(struct ib_device *dev, u32 port,
|
||||
enum rdma_nl_counter_mode *mode,
|
||||
enum rdma_nl_counter_mask *mask)
|
||||
{
|
||||
|
|
|
@ -779,7 +779,7 @@ static void remove_client_context(struct ib_device *device,
|
|||
static int alloc_port_data(struct ib_device *device)
|
||||
{
|
||||
struct ib_port_data_rcu *pdata_rcu;
|
||||
unsigned int port;
|
||||
u32 port;
|
||||
|
||||
if (device->port_data)
|
||||
return 0;
|
||||
|
@ -788,6 +788,10 @@ static int alloc_port_data(struct ib_device *device)
|
|||
if (WARN_ON(!device->phys_port_cnt))
|
||||
return -EINVAL;
|
||||
|
||||
/* Reserve U32_MAX so the logic to go over all the ports is sane */
|
||||
if (WARN_ON(device->phys_port_cnt == U32_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* device->port_data is indexed directly by the port number to make
|
||||
* access to this data as efficient as possible.
|
||||
|
@ -819,7 +823,7 @@ static int alloc_port_data(struct ib_device *device)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int verify_immutable(const struct ib_device *dev, u8 port)
|
||||
static int verify_immutable(const struct ib_device *dev, u32 port)
|
||||
{
|
||||
return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
|
||||
rdma_max_mad_size(dev, port) != 0);
|
||||
|
@ -827,7 +831,7 @@ static int verify_immutable(const struct ib_device *dev, u8 port)
|
|||
|
||||
static int setup_port_data(struct ib_device *device)
|
||||
{
|
||||
unsigned int port;
|
||||
u32 port;
|
||||
int ret;
|
||||
|
||||
ret = alloc_port_data(device);
|
||||
|
@ -2005,7 +2009,7 @@ void ib_dispatch_event_clients(struct ib_event *event)
|
|||
}
|
||||
|
||||
static int iw_query_port(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
struct ib_port_attr *port_attr)
|
||||
{
|
||||
struct in_device *inetdev;
|
||||
|
@ -2044,7 +2048,7 @@ static int iw_query_port(struct ib_device *device,
|
|||
}
|
||||
|
||||
static int __ib_query_port(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
struct ib_port_attr *port_attr)
|
||||
{
|
||||
union ib_gid gid = {};
|
||||
|
@ -2078,7 +2082,7 @@ static int __ib_query_port(struct ib_device *device,
|
|||
* @port_attr pointer.
|
||||
*/
|
||||
int ib_query_port(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
struct ib_port_attr *port_attr)
|
||||
{
|
||||
if (!rdma_is_port_valid(device, port_num))
|
||||
|
@ -2130,7 +2134,7 @@ static void add_ndev_hash(struct ib_port_data *pdata)
|
|||
* NETDEV_UNREGISTER event.
|
||||
*/
|
||||
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
|
||||
unsigned int port)
|
||||
u32 port)
|
||||
{
|
||||
struct net_device *old_ndev;
|
||||
struct ib_port_data *pdata;
|
||||
|
@ -2173,7 +2177,7 @@ EXPORT_SYMBOL(ib_device_set_netdev);
|
|||
static void free_netdevs(struct ib_device *ib_dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int port;
|
||||
u32 port;
|
||||
|
||||
if (!ib_dev->port_data)
|
||||
return;
|
||||
|
@ -2204,7 +2208,7 @@ static void free_netdevs(struct ib_device *ib_dev)
|
|||
}
|
||||
|
||||
struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
|
||||
unsigned int port)
|
||||
u32 port)
|
||||
{
|
||||
struct ib_port_data *pdata;
|
||||
struct net_device *res;
|
||||
|
@ -2291,7 +2295,7 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
|
|||
roce_netdev_callback cb,
|
||||
void *cookie)
|
||||
{
|
||||
unsigned int port;
|
||||
u32 port;
|
||||
|
||||
rdma_for_each_port (ib_dev, port)
|
||||
if (rdma_protocol_roce(ib_dev, port)) {
|
||||
|
@ -2369,7 +2373,7 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
|
|||
* ib_query_pkey() fetches the specified P_Key table entry.
|
||||
*/
|
||||
int ib_query_pkey(struct ib_device *device,
|
||||
u8 port_num, u16 index, u16 *pkey)
|
||||
u32 port_num, u16 index, u16 *pkey)
|
||||
{
|
||||
if (!rdma_is_port_valid(device, port_num))
|
||||
return -EINVAL;
|
||||
|
@ -2414,7 +2418,7 @@ EXPORT_SYMBOL(ib_modify_device);
|
|||
* @port_modify_mask and @port_modify structure.
|
||||
*/
|
||||
int ib_modify_port(struct ib_device *device,
|
||||
u8 port_num, int port_modify_mask,
|
||||
u32 port_num, int port_modify_mask,
|
||||
struct ib_port_modify *port_modify)
|
||||
{
|
||||
int rc;
|
||||
|
@ -2446,10 +2450,10 @@ EXPORT_SYMBOL(ib_modify_port);
|
|||
* parameter may be NULL.
|
||||
*/
|
||||
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
|
||||
u8 *port_num, u16 *index)
|
||||
u32 *port_num, u16 *index)
|
||||
{
|
||||
union ib_gid tmp_gid;
|
||||
unsigned int port;
|
||||
u32 port;
|
||||
int ret, i;
|
||||
|
||||
rdma_for_each_port (device, port) {
|
||||
|
@ -2483,7 +2487,7 @@ EXPORT_SYMBOL(ib_find_gid);
|
|||
* @index: The index into the PKey table where the PKey was found.
|
||||
*/
|
||||
int ib_find_pkey(struct ib_device *device,
|
||||
u8 port_num, u16 pkey, u16 *index)
|
||||
u32 port_num, u16 pkey, u16 *index)
|
||||
{
|
||||
int ret, i;
|
||||
u16 tmp_pkey;
|
||||
|
@ -2526,7 +2530,7 @@ EXPORT_SYMBOL(ib_find_pkey);
|
|||
*
|
||||
*/
|
||||
struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
|
||||
u8 port,
|
||||
u32 port,
|
||||
u16 pkey,
|
||||
const union ib_gid *gid,
|
||||
const struct sockaddr *addr)
|
||||
|
@ -2696,7 +2700,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
|
|||
SET_DEVICE_OP(dev_ops, reg_dm_mr);
|
||||
SET_DEVICE_OP(dev_ops, reg_user_mr);
|
||||
SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf);
|
||||
SET_DEVICE_OP(dev_ops, req_ncomp_notif);
|
||||
SET_DEVICE_OP(dev_ops, req_notify_cq);
|
||||
SET_DEVICE_OP(dev_ops, rereg_user_mr);
|
||||
SET_DEVICE_OP(dev_ops, resize_cq);
|
||||
|
|
|
@ -528,7 +528,8 @@ int iwpm_add_mapping_cb(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
}
|
||||
|
||||
/* netlink attribute policy for the response to add and query mapping request
|
||||
* and response with remote address info */
|
||||
* and response with remote address info
|
||||
*/
|
||||
static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
|
||||
[IWPM_NLA_RQUERY_MAPPING_SEQ] = { .type = NLA_U32 },
|
||||
[IWPM_NLA_RQUERY_LOCAL_ADDR] = {
|
||||
|
|
|
@ -61,7 +61,7 @@ static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
|
|||
{
|
||||
u16 pkey;
|
||||
struct ib_device *dev = qp_info->port_priv->device;
|
||||
u8 pnum = qp_info->port_priv->port_num;
|
||||
u32 pnum = qp_info->port_priv->port_num;
|
||||
struct ib_ud_wr *wr = &mad_send_wr->send_wr;
|
||||
struct rdma_ah_attr attr = {};
|
||||
|
||||
|
@ -118,7 +118,7 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
|
|||
* Assumes ib_mad_port_list_lock is being held
|
||||
*/
|
||||
static inline struct ib_mad_port_private *
|
||||
__ib_get_mad_port(struct ib_device *device, int port_num)
|
||||
__ib_get_mad_port(struct ib_device *device, u32 port_num)
|
||||
{
|
||||
struct ib_mad_port_private *entry;
|
||||
|
||||
|
@ -134,7 +134,7 @@ __ib_get_mad_port(struct ib_device *device, int port_num)
|
|||
* for a device/port
|
||||
*/
|
||||
static inline struct ib_mad_port_private *
|
||||
ib_get_mad_port(struct ib_device *device, int port_num)
|
||||
ib_get_mad_port(struct ib_device *device, u32 port_num)
|
||||
{
|
||||
struct ib_mad_port_private *entry;
|
||||
unsigned long flags;
|
||||
|
@ -155,8 +155,7 @@ static inline u8 convert_mgmt_class(u8 mgmt_class)
|
|||
|
||||
static int get_spl_qp_index(enum ib_qp_type qp_type)
|
||||
{
|
||||
switch (qp_type)
|
||||
{
|
||||
switch (qp_type) {
|
||||
case IB_QPT_SMI:
|
||||
return 0;
|
||||
case IB_QPT_GSI:
|
||||
|
@ -222,7 +221,7 @@ EXPORT_SYMBOL(ib_response_mad);
|
|||
* Context: Process context.
|
||||
*/
|
||||
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
enum ib_qp_type qp_type,
|
||||
struct ib_mad_reg_req *mad_reg_req,
|
||||
u8 rmpp_version,
|
||||
|
@ -549,7 +548,7 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
|
|||
}
|
||||
|
||||
static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
|
||||
u16 pkey_index, u8 port_num, struct ib_wc *wc)
|
||||
u16 pkey_index, u32 port_num, struct ib_wc *wc)
|
||||
{
|
||||
memset(wc, 0, sizeof *wc);
|
||||
wc->wr_cqe = cqe;
|
||||
|
@ -608,7 +607,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|||
struct ib_mad_port_private *port_priv;
|
||||
struct ib_mad_agent_private *recv_mad_agent = NULL;
|
||||
struct ib_device *device = mad_agent_priv->agent.device;
|
||||
u8 port_num;
|
||||
u32 port_num;
|
||||
struct ib_wc mad_wc;
|
||||
struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
|
||||
size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
|
||||
|
@ -707,8 +706,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
|||
(const struct ib_mad *)smp,
|
||||
(struct ib_mad *)mad_priv->mad, &mad_size,
|
||||
&out_mad_pkey_index);
|
||||
switch (ret)
|
||||
{
|
||||
switch (ret) {
|
||||
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
|
||||
if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
|
||||
mad_agent_priv->agent.recv_handler) {
|
||||
|
@ -807,7 +805,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
|
|||
|
||||
/* Allocate data segments. */
|
||||
for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
|
||||
seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
|
||||
seg = kmalloc(sizeof(*seg) + seg_size, gfp_mask);
|
||||
if (!seg) {
|
||||
free_send_rmpp_list(send_wr);
|
||||
return -ENOMEM;
|
||||
|
@ -837,12 +835,11 @@ int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
|
|||
}
|
||||
EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
|
||||
|
||||
struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
|
||||
u32 remote_qpn, u16 pkey_index,
|
||||
int rmpp_active,
|
||||
int hdr_len, int data_len,
|
||||
gfp_t gfp_mask,
|
||||
u8 base_version)
|
||||
struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
|
||||
u32 remote_qpn, u16 pkey_index,
|
||||
int rmpp_active, int hdr_len,
|
||||
int data_len, gfp_t gfp_mask,
|
||||
u8 base_version)
|
||||
{
|
||||
struct ib_mad_agent_private *mad_agent_priv;
|
||||
struct ib_mad_send_wr_private *mad_send_wr;
|
||||
|
@ -1275,11 +1272,9 @@ static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
|
|||
int i;
|
||||
|
||||
/* Remove any methods for this mad agent */
|
||||
for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
|
||||
if (method->agent[i] == agent) {
|
||||
for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
|
||||
if (method->agent[i] == agent)
|
||||
method->agent[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
|
||||
|
@ -1454,9 +1449,8 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
|
|||
* Was MAD registration request supplied
|
||||
* with original registration ?
|
||||
*/
|
||||
if (!agent_priv->reg_req) {
|
||||
if (!agent_priv->reg_req)
|
||||
goto out;
|
||||
}
|
||||
|
||||
port_priv = agent_priv->qp_info->port_priv;
|
||||
mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
|
||||
|
@ -1613,7 +1607,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
|
|||
|
||||
if (mad_agent && !mad_agent->agent.recv_handler) {
|
||||
dev_notice(&port_priv->device->dev,
|
||||
"No receive handler for client %p on port %d\n",
|
||||
"No receive handler for client %p on port %u\n",
|
||||
&mad_agent->agent, port_priv->port_num);
|
||||
deref_mad_agent(mad_agent);
|
||||
mad_agent = NULL;
|
||||
|
@ -1677,15 +1671,16 @@ static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
|
|||
rwc->recv_buf.mad->mad_hdr.mgmt_class;
|
||||
}
|
||||
|
||||
static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
|
||||
const struct ib_mad_send_wr_private *wr,
|
||||
const struct ib_mad_recv_wc *rwc )
|
||||
static inline int
|
||||
rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
|
||||
const struct ib_mad_send_wr_private *wr,
|
||||
const struct ib_mad_recv_wc *rwc)
|
||||
{
|
||||
struct rdma_ah_attr attr;
|
||||
u8 send_resp, rcv_resp;
|
||||
union ib_gid sgid;
|
||||
struct ib_device *device = mad_agent_priv->agent.device;
|
||||
u8 port_num = mad_agent_priv->agent.port_num;
|
||||
u32 port_num = mad_agent_priv->agent.port_num;
|
||||
u8 lmc;
|
||||
bool has_grh;
|
||||
|
||||
|
@ -1834,7 +1829,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
|
|||
deref_mad_agent(mad_agent_priv);
|
||||
} else {
|
||||
/* not user rmpp, revert to normal behavior and
|
||||
* drop the mad */
|
||||
* drop the mad
|
||||
*/
|
||||
ib_free_recv_mad(mad_recv_wc);
|
||||
deref_mad_agent(mad_agent_priv);
|
||||
return;
|
||||
|
@ -1860,14 +1856,12 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
|
|||
mad_recv_wc);
|
||||
deref_mad_agent(mad_agent_priv);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
|
||||
const struct ib_mad_qp_info *qp_info,
|
||||
const struct ib_wc *wc,
|
||||
int port_num,
|
||||
u32 port_num,
|
||||
struct ib_mad_private *recv,
|
||||
struct ib_mad_private *response)
|
||||
{
|
||||
|
@ -1954,7 +1948,7 @@ static enum smi_action
|
|||
handle_opa_smi(struct ib_mad_port_private *port_priv,
|
||||
struct ib_mad_qp_info *qp_info,
|
||||
struct ib_wc *wc,
|
||||
int port_num,
|
||||
u32 port_num,
|
||||
struct ib_mad_private *recv,
|
||||
struct ib_mad_private *response)
|
||||
{
|
||||
|
@ -2010,7 +2004,7 @@ static enum smi_action
|
|||
handle_smi(struct ib_mad_port_private *port_priv,
|
||||
struct ib_mad_qp_info *qp_info,
|
||||
struct ib_wc *wc,
|
||||
int port_num,
|
||||
u32 port_num,
|
||||
struct ib_mad_private *recv,
|
||||
struct ib_mad_private *response,
|
||||
bool opa)
|
||||
|
@ -2034,7 +2028,7 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
struct ib_mad_private_header *mad_priv_hdr;
|
||||
struct ib_mad_private *recv, *response = NULL;
|
||||
struct ib_mad_agent_private *mad_agent;
|
||||
int port_num;
|
||||
u32 port_num;
|
||||
int ret = IB_MAD_RESULT_SUCCESS;
|
||||
size_t mad_size;
|
||||
u16 resp_mad_pkey_index = 0;
|
||||
|
@ -2202,9 +2196,10 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
|
|||
temp_mad_send_wr->timeout))
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
} else {
|
||||
list_item = &mad_agent_priv->wait_list;
|
||||
}
|
||||
|
||||
list_add(&mad_send_wr->agent_list, list_item);
|
||||
|
||||
/* Reschedule a work item if we have a shorter timeout */
|
||||
|
@ -2258,7 +2253,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
|
|||
adjust_timeout(mad_agent_priv);
|
||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
||||
|
||||
if (mad_send_wr->status != IB_WC_SUCCESS )
|
||||
if (mad_send_wr->status != IB_WC_SUCCESS)
|
||||
mad_send_wc->status = mad_send_wr->status;
|
||||
if (ret == IB_RMPP_RESULT_INTERNAL)
|
||||
ib_rmpp_send_handler(mad_send_wc);
|
||||
|
@ -2947,7 +2942,7 @@ static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
|
|||
* Create the QP, PD, MR, and CQ if needed
|
||||
*/
|
||||
static int ib_mad_port_open(struct ib_device *device,
|
||||
int port_num)
|
||||
u32 port_num)
|
||||
{
|
||||
int ret, cq_size;
|
||||
struct ib_mad_port_private *port_priv;
|
||||
|
@ -3002,7 +2997,7 @@ static int ib_mad_port_open(struct ib_device *device,
|
|||
if (ret)
|
||||
goto error7;
|
||||
|
||||
snprintf(name, sizeof name, "ib_mad%d", port_num);
|
||||
snprintf(name, sizeof(name), "ib_mad%u", port_num);
|
||||
port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
|
||||
if (!port_priv->wq) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -3048,7 +3043,7 @@ static int ib_mad_port_open(struct ib_device *device,
|
|||
* If there are no classes using the port, free the port
|
||||
* resources (CQ, MR, PD, QP) and remove the port's info structure
|
||||
*/
|
||||
static int ib_mad_port_close(struct ib_device *device, int port_num)
|
||||
static int ib_mad_port_close(struct ib_device *device, u32 port_num)
|
||||
{
|
||||
struct ib_mad_port_private *port_priv;
|
||||
unsigned long flags;
|
||||
|
@ -3057,7 +3052,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
|
|||
port_priv = __ib_get_mad_port(device, port_num);
|
||||
if (port_priv == NULL) {
|
||||
spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
|
||||
dev_err(&device->dev, "Port %d not found\n", port_num);
|
||||
dev_err(&device->dev, "Port %u not found\n", port_num);
|
||||
return -ENODEV;
|
||||
}
|
||||
list_del_init(&port_priv->port_list);
|
||||
|
|
|
@ -382,8 +382,8 @@ static inline int get_seg_num(struct ib_mad_recv_buf *seg)
|
|||
return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
|
||||
}
|
||||
|
||||
static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
|
||||
struct ib_mad_recv_buf *seg)
|
||||
static inline struct ib_mad_recv_buf *get_next_seg(struct list_head *rmpp_list,
|
||||
struct ib_mad_recv_buf *seg)
|
||||
{
|
||||
if (seg->list.next == rmpp_list)
|
||||
return NULL;
|
||||
|
@ -396,8 +396,8 @@ static inline int window_size(struct ib_mad_agent_private *agent)
|
|||
return max(agent->qp_info->recv_queue.max_active >> 3, 1);
|
||||
}
|
||||
|
||||
static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
|
||||
int seg_num)
|
||||
static struct ib_mad_recv_buf *find_seg_location(struct list_head *rmpp_list,
|
||||
int seg_num)
|
||||
{
|
||||
struct ib_mad_recv_buf *seg_buf;
|
||||
int cur_seg_num;
|
||||
|
@ -449,7 +449,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
|
|||
return hdr_size + rmpp_recv->seg_num * data_size - pad;
|
||||
}
|
||||
|
||||
static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
|
||||
static struct ib_mad_recv_wc *complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
|
||||
{
|
||||
struct ib_mad_recv_wc *rmpp_wc;
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ struct mcast_port {
|
|||
struct rb_root table;
|
||||
atomic_t refcount;
|
||||
struct completion comp;
|
||||
u8 port_num;
|
||||
u32 port_num;
|
||||
};
|
||||
|
||||
struct mcast_device {
|
||||
|
@ -605,7 +605,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
|
|||
*/
|
||||
struct ib_sa_multicast *
|
||||
ib_sa_join_multicast(struct ib_sa_client *client,
|
||||
struct ib_device *device, u8 port_num,
|
||||
struct ib_device *device, u32 port_num,
|
||||
struct ib_sa_mcmember_rec *rec,
|
||||
ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
|
||||
int (*callback)(int status,
|
||||
|
@ -690,7 +690,7 @@ void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
|
|||
}
|
||||
EXPORT_SYMBOL(ib_sa_free_multicast);
|
||||
|
||||
int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
|
||||
int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num,
|
||||
union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
|
||||
{
|
||||
struct mcast_device *dev;
|
||||
|
@ -732,7 +732,7 @@ EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
|
|||
* success or appropriate error code.
|
||||
*
|
||||
*/
|
||||
int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
|
||||
int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num,
|
||||
struct ib_sa_mcmember_rec *rec,
|
||||
struct net_device *ndev,
|
||||
enum ib_gid_type gid_type,
|
||||
|
|
|
@ -92,7 +92,9 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
|
|||
[RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
|
||||
[RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 },
|
||||
[RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
|
||||
[RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED },
|
||||
[RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 },
|
||||
[RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED },
|
||||
[RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
|
||||
.len = sizeof(struct __kernel_sockaddr_storage) },
|
||||
[RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
|
||||
|
@ -130,6 +132,11 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
|
|||
[RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
|
||||
[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
|
||||
[RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
|
||||
[RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED },
|
||||
[RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 },
|
||||
[RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED },
|
||||
[RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 },
|
||||
[RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 },
|
||||
[RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
|
||||
[RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
|
||||
[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 },
|
||||
|
@ -146,6 +153,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
|
|||
[RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 },
|
||||
[RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
|
||||
[RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
|
||||
[RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 },
|
||||
};
|
||||
|
||||
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
|
||||
|
@ -242,7 +250,7 @@ static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
|
|||
{
|
||||
char fw[IB_FW_VERSION_NAME_MAX];
|
||||
int ret = 0;
|
||||
u8 port;
|
||||
u32 port;
|
||||
|
||||
if (fill_nldev_handle(msg, device))
|
||||
return -EMSGSIZE;
|
||||
|
@ -385,6 +393,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
|
|||
[RDMA_RESTRACK_CM_ID] = "cm_id",
|
||||
[RDMA_RESTRACK_MR] = "mr",
|
||||
[RDMA_RESTRACK_CTX] = "ctx",
|
||||
[RDMA_RESTRACK_SRQ] = "srq",
|
||||
};
|
||||
|
||||
struct nlattr *table_attr;
|
||||
|
@ -703,6 +712,135 @@ static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
|
|||
err: return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin,
|
||||
struct rdma_restrack_entry *res, uint32_t port)
|
||||
{
|
||||
struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res);
|
||||
|
||||
if (rdma_is_kernel_res(res))
|
||||
return 0;
|
||||
|
||||
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id))
|
||||
return -EMSGSIZE;
|
||||
|
||||
return fill_res_name_pid(msg, res);
|
||||
}
|
||||
|
||||
static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range,
|
||||
uint32_t max_range)
|
||||
{
|
||||
struct nlattr *entry_attr;
|
||||
|
||||
if (!min_range)
|
||||
return 0;
|
||||
|
||||
entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
|
||||
if (!entry_attr)
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (min_range == max_range) {
|
||||
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range))
|
||||
goto err;
|
||||
} else {
|
||||
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range))
|
||||
goto err;
|
||||
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range))
|
||||
goto err;
|
||||
}
|
||||
nla_nest_end(msg, entry_attr);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
nla_nest_cancel(msg, entry_attr);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq)
|
||||
{
|
||||
uint32_t min_range = 0, prev = 0;
|
||||
struct rdma_restrack_entry *res;
|
||||
struct rdma_restrack_root *rt;
|
||||
struct nlattr *table_attr;
|
||||
struct ib_qp *qp = NULL;
|
||||
unsigned long id = 0;
|
||||
|
||||
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
|
||||
if (!table_attr)
|
||||
return -EMSGSIZE;
|
||||
|
||||
rt = &srq->device->res[RDMA_RESTRACK_QP];
|
||||
xa_lock(&rt->xa);
|
||||
xa_for_each(&rt->xa, id, res) {
|
||||
if (!rdma_restrack_get(res))
|
||||
continue;
|
||||
|
||||
qp = container_of(res, struct ib_qp, res);
|
||||
if (!qp->srq || (qp->srq->res.id != srq->res.id)) {
|
||||
rdma_restrack_put(res);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (qp->qp_num < prev)
|
||||
/* qp_num should be ascending */
|
||||
goto err_loop;
|
||||
|
||||
if (min_range == 0) {
|
||||
min_range = qp->qp_num;
|
||||
} else if (qp->qp_num > (prev + 1)) {
|
||||
if (fill_res_range_qp_entry(msg, min_range, prev))
|
||||
goto err_loop;
|
||||
|
||||
min_range = qp->qp_num;
|
||||
}
|
||||
prev = qp->qp_num;
|
||||
rdma_restrack_put(res);
|
||||
}
|
||||
|
||||
xa_unlock(&rt->xa);
|
||||
|
||||
if (fill_res_range_qp_entry(msg, min_range, prev))
|
||||
goto err;
|
||||
|
||||
nla_nest_end(msg, table_attr);
|
||||
return 0;
|
||||
|
||||
err_loop:
|
||||
rdma_restrack_put(res);
|
||||
xa_unlock(&rt->xa);
|
||||
err:
|
||||
nla_nest_cancel(msg, table_attr);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin,
|
||||
struct rdma_restrack_entry *res, uint32_t port)
|
||||
{
|
||||
struct ib_srq *srq = container_of(res, struct ib_srq, res);
|
||||
|
||||
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id))
|
||||
goto err;
|
||||
|
||||
if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type))
|
||||
goto err;
|
||||
|
||||
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id))
|
||||
goto err;
|
||||
|
||||
if (ib_srq_has_cq(srq->srq_type)) {
|
||||
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN,
|
||||
srq->ext.cq->res.id))
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (fill_res_srq_qps(msg, srq))
|
||||
goto err;
|
||||
|
||||
return fill_res_name_pid(msg, res);
|
||||
|
||||
err:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int fill_stat_counter_mode(struct sk_buff *msg,
|
||||
struct rdma_counter *counter)
|
||||
{
|
||||
|
@ -1236,6 +1374,19 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
|
|||
.entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
|
||||
.id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
|
||||
},
|
||||
[RDMA_RESTRACK_CTX] = {
|
||||
.nldev_attr = RDMA_NLDEV_ATTR_RES_CTX,
|
||||
.flags = NLDEV_PER_DEV,
|
||||
.entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY,
|
||||
.id = RDMA_NLDEV_ATTR_RES_CTXN,
|
||||
},
|
||||
[RDMA_RESTRACK_SRQ] = {
|
||||
.nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ,
|
||||
.flags = NLDEV_PER_DEV,
|
||||
.entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY,
|
||||
.id = RDMA_NLDEV_ATTR_RES_SRQN,
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
|
@ -1476,6 +1627,8 @@ RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
|
|||
RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
|
||||
RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR);
|
||||
RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
|
||||
RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX);
|
||||
RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ);
|
||||
|
||||
static LIST_HEAD(link_ops);
|
||||
static DECLARE_RWSEM(link_ops_rwsem);
|
||||
|
@ -1697,6 +1850,19 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
nlmsg_free(msg);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy-on-fork is supported.
|
||||
* See commits:
|
||||
* 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes")
|
||||
* 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm")
|
||||
* for more details. Don't backport this without them.
|
||||
*
|
||||
* Return value ignored on purpose, assume copy-on-fork is not
|
||||
* supported in case of failure.
|
||||
*/
|
||||
nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1);
|
||||
|
||||
nlmsg_end(msg, nlh);
|
||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||
}
|
||||
|
@ -2139,6 +2305,14 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
|
|||
.doit = nldev_res_get_pd_doit,
|
||||
.dump = nldev_res_get_pd_dumpit,
|
||||
},
|
||||
[RDMA_NLDEV_CMD_RES_CTX_GET] = {
|
||||
.doit = nldev_res_get_ctx_doit,
|
||||
.dump = nldev_res_get_ctx_dumpit,
|
||||
},
|
||||
[RDMA_NLDEV_CMD_RES_SRQ_GET] = {
|
||||
.doit = nldev_res_get_srq_doit,
|
||||
.dump = nldev_res_get_srq_dumpit,
|
||||
},
|
||||
[RDMA_NLDEV_CMD_SYS_GET] = {
|
||||
.doit = nldev_sys_get_doit,
|
||||
},
|
||||
|
|
|
@ -40,11 +40,11 @@
|
|||
#include "smi.h"
|
||||
|
||||
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
|
||||
int port_num, int phys_port_cnt);
|
||||
u32 port_num, int phys_port_cnt);
|
||||
int opa_smi_get_fwd_port(struct opa_smp *smp);
|
||||
extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
|
||||
extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
|
||||
bool is_switch, int port_num);
|
||||
bool is_switch, u32 port_num);
|
||||
|
||||
/*
|
||||
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
|
||||
|
|
|
@ -112,7 +112,7 @@ static void assert_uverbs_usecnt(struct ib_uobject *uobj,
|
|||
* however the type's allocat_commit function cannot have been called and the
|
||||
* uobject cannot be on the uobjects_lists
|
||||
*
|
||||
* For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via
|
||||
* For RDMA_REMOVE_DESTROY the caller should be holding a kref (eg via
|
||||
* rdma_lookup_get_uobject) and the object is left in a state where the caller
|
||||
* needs to call rdma_lookup_put_uobject.
|
||||
*
|
||||
|
@ -916,7 +916,7 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
|
|||
}
|
||||
|
||||
/*
|
||||
* Destroy the uncontext and every uobject associated with it.
|
||||
* Destroy the ucontext and every uobject associated with it.
|
||||
*
|
||||
* This is internally locked and can be called in parallel from multiple
|
||||
* contexts.
|
||||
|
|
|
@ -47,6 +47,7 @@ static const char *type2str(enum rdma_restrack_type type)
|
|||
[RDMA_RESTRACK_MR] = "MR",
|
||||
[RDMA_RESTRACK_CTX] = "CTX",
|
||||
[RDMA_RESTRACK_COUNTER] = "COUNTER",
|
||||
[RDMA_RESTRACK_SRQ] = "SRQ",
|
||||
};
|
||||
|
||||
return names[type];
|
||||
|
@ -141,6 +142,8 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
|
|||
return container_of(res, struct ib_ucontext, res)->device;
|
||||
case RDMA_RESTRACK_COUNTER:
|
||||
return container_of(res, struct rdma_counter, res)->device;
|
||||
case RDMA_RESTRACK_SRQ:
|
||||
return container_of(res, struct ib_srq, res)->device;
|
||||
default:
|
||||
WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
|
||||
return NULL;
|
||||
|
|
|
@ -70,7 +70,7 @@ struct netdev_event_work {
|
|||
};
|
||||
|
||||
static const struct {
|
||||
bool (*is_supported)(const struct ib_device *device, u8 port_num);
|
||||
bool (*is_supported)(const struct ib_device *device, u32 port_num);
|
||||
enum ib_gid_type gid_type;
|
||||
} PORT_CAP_TO_GID_TYPE[] = {
|
||||
{rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
|
||||
|
@ -79,7 +79,7 @@ static const struct {
|
|||
|
||||
#define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
|
||||
|
||||
unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
|
||||
unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port)
|
||||
{
|
||||
int i;
|
||||
unsigned int ret_flags = 0;
|
||||
|
@ -96,7 +96,7 @@ unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
|
|||
EXPORT_SYMBOL(roce_gid_type_mask_support);
|
||||
|
||||
static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
|
||||
u8 port, union ib_gid *gid,
|
||||
u32 port, union ib_gid *gid,
|
||||
struct ib_gid_attr *gid_attr)
|
||||
{
|
||||
int i;
|
||||
|
@ -144,7 +144,7 @@ static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_de
|
|||
#define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
|
||||
BONDING_SLAVE_STATE_NA)
|
||||
static bool
|
||||
is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port,
|
||||
is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
struct net_device *real_dev;
|
||||
|
@ -168,7 +168,7 @@ is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port,
|
|||
}
|
||||
|
||||
static bool
|
||||
is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port,
|
||||
is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
struct net_device *master_dev;
|
||||
|
@ -197,7 +197,7 @@ is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port,
|
|||
* considered for deriving default RoCE GID, returns false otherwise.
|
||||
*/
|
||||
static bool
|
||||
is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port,
|
||||
is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
struct net_device *cookie_ndev = cookie;
|
||||
|
@ -223,13 +223,13 @@ is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port,
|
|||
return res;
|
||||
}
|
||||
|
||||
static bool pass_all_filter(struct ib_device *ib_dev, u8 port,
|
||||
static bool pass_all_filter(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool upper_device_filter(struct ib_device *ib_dev, u8 port,
|
||||
static bool upper_device_filter(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
bool res;
|
||||
|
@ -260,7 +260,7 @@ static bool upper_device_filter(struct ib_device *ib_dev, u8 port,
|
|||
* not have been established as slave device yet.
|
||||
*/
|
||||
static bool
|
||||
is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
|
||||
is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev,
|
||||
void *cookie)
|
||||
{
|
||||
|
@ -280,7 +280,7 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
|
|||
|
||||
static void update_gid_ip(enum gid_op_type gid_op,
|
||||
struct ib_device *ib_dev,
|
||||
u8 port, struct net_device *ndev,
|
||||
u32 port, struct net_device *ndev,
|
||||
struct sockaddr *addr)
|
||||
{
|
||||
union ib_gid gid;
|
||||
|
@ -294,7 +294,7 @@ static void update_gid_ip(enum gid_op_type gid_op,
|
|||
}
|
||||
|
||||
static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
|
||||
u8 port,
|
||||
u32 port,
|
||||
struct net_device *rdma_ndev,
|
||||
struct net_device *event_ndev)
|
||||
{
|
||||
|
@ -328,7 +328,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
|
|||
}
|
||||
|
||||
static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
|
||||
u8 port, struct net_device *ndev)
|
||||
u32 port, struct net_device *ndev)
|
||||
{
|
||||
const struct in_ifaddr *ifa;
|
||||
struct in_device *in_dev;
|
||||
|
@ -372,7 +372,7 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
|
|||
}
|
||||
|
||||
static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
|
||||
u8 port, struct net_device *ndev)
|
||||
u32 port, struct net_device *ndev)
|
||||
{
|
||||
struct inet6_ifaddr *ifp;
|
||||
struct inet6_dev *in6_dev;
|
||||
|
@ -417,7 +417,7 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
|
|||
}
|
||||
}
|
||||
|
||||
static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
|
||||
static void _add_netdev_ips(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
enum_netdev_ipv4_ips(ib_dev, port, ndev);
|
||||
|
@ -425,13 +425,13 @@ static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
|
|||
enum_netdev_ipv6_ips(ib_dev, port, ndev);
|
||||
}
|
||||
|
||||
static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
|
||||
static void add_netdev_ips(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
_add_netdev_ips(ib_dev, port, cookie);
|
||||
}
|
||||
|
||||
static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
|
||||
static void del_netdev_ips(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
|
||||
|
@ -446,7 +446,7 @@ static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
|
|||
*
|
||||
* del_default_gids() deletes the default GIDs of the event/cookie netdevice.
|
||||
*/
|
||||
static void del_default_gids(struct ib_device *ib_dev, u8 port,
|
||||
static void del_default_gids(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
struct net_device *cookie_ndev = cookie;
|
||||
|
@ -458,7 +458,7 @@ static void del_default_gids(struct ib_device *ib_dev, u8 port,
|
|||
IB_CACHE_GID_DEFAULT_MODE_DELETE);
|
||||
}
|
||||
|
||||
static void add_default_gids(struct ib_device *ib_dev, u8 port,
|
||||
static void add_default_gids(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
struct net_device *event_ndev = cookie;
|
||||
|
@ -470,7 +470,7 @@ static void add_default_gids(struct ib_device *ib_dev, u8 port,
|
|||
}
|
||||
|
||||
static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
|
||||
u8 port,
|
||||
u32 port,
|
||||
struct net_device *rdma_ndev,
|
||||
void *cookie)
|
||||
{
|
||||
|
@ -515,7 +515,7 @@ void rdma_roce_rescan_device(struct ib_device *ib_dev)
|
|||
EXPORT_SYMBOL(rdma_roce_rescan_device);
|
||||
|
||||
static void callback_for_addr_gid_device_scan(struct ib_device *device,
|
||||
u8 port,
|
||||
u32 port,
|
||||
struct net_device *rdma_ndev,
|
||||
void *cookie)
|
||||
{
|
||||
|
@ -547,10 +547,10 @@ static int netdev_upper_walk(struct net_device *upper,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
|
||||
static void handle_netdev_upper(struct ib_device *ib_dev, u32 port,
|
||||
void *cookie,
|
||||
void (*handle_netdev)(struct ib_device *ib_dev,
|
||||
u8 port,
|
||||
u32 port,
|
||||
struct net_device *ndev))
|
||||
{
|
||||
struct net_device *ndev = cookie;
|
||||
|
@ -574,25 +574,25 @@ static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
|
|||
}
|
||||
}
|
||||
|
||||
static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
|
||||
static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *event_ndev)
|
||||
{
|
||||
ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
|
||||
}
|
||||
|
||||
static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
|
||||
static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
|
||||
}
|
||||
|
||||
static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
|
||||
static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev, void *cookie)
|
||||
{
|
||||
handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
|
||||
}
|
||||
|
||||
static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
|
||||
static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port,
|
||||
struct net_device *rdma_ndev,
|
||||
void *cookie)
|
||||
{
|
||||
|
|
|
@ -25,7 +25,7 @@ MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
|
|||
* registration is also enabled if registering memory might yield better
|
||||
* performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
|
||||
*/
|
||||
static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
|
||||
static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num)
|
||||
{
|
||||
if (rdma_protocol_iwarp(dev, port_num))
|
||||
return true;
|
||||
|
@ -42,7 +42,7 @@ static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
|
|||
* optimization otherwise. Additionally we have a debug option to force usage
|
||||
* of MRs to help testing this code path.
|
||||
*/
|
||||
static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
|
||||
static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num,
|
||||
enum dma_data_direction dir, int dma_nents)
|
||||
{
|
||||
if (dir == DMA_FROM_DEVICE) {
|
||||
|
@ -87,7 +87,7 @@ static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg)
|
|||
}
|
||||
|
||||
/* Caller must have zero-initialized *reg. */
|
||||
static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
|
||||
static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num,
|
||||
struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
|
||||
u32 sg_cnt, u32 offset)
|
||||
{
|
||||
|
@ -121,7 +121,7 @@ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
|
|||
}
|
||||
|
||||
static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
||||
u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
|
||||
u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
|
||||
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
|
||||
{
|
||||
struct rdma_rw_reg_ctx *prev = NULL;
|
||||
|
@ -308,7 +308,7 @@ static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
|
|||
* Returns the number of WQEs that will be needed on the workqueue if
|
||||
* successful, or a negative error code.
|
||||
*/
|
||||
int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
|
||||
int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
|
||||
struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
|
||||
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
|
||||
{
|
||||
|
@ -377,7 +377,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_init);
|
|||
* successful, or a negative error code.
|
||||
*/
|
||||
int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
||||
u8 port_num, struct scatterlist *sg, u32 sg_cnt,
|
||||
u32 port_num, struct scatterlist *sg, u32 sg_cnt,
|
||||
struct scatterlist *prot_sg, u32 prot_sg_cnt,
|
||||
struct ib_sig_attrs *sig_attrs,
|
||||
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
|
||||
|
@ -505,7 +505,7 @@ static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
|
|||
* completion notification.
|
||||
*/
|
||||
struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
||||
u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
|
||||
u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
|
||||
{
|
||||
struct ib_send_wr *first_wr, *last_wr;
|
||||
int i;
|
||||
|
@ -562,7 +562,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_wrs);
|
|||
* is not set @cqe must be set so that the caller gets a completion
|
||||
* notification.
|
||||
*/
|
||||
int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
|
||||
int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
|
||||
struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
|
||||
{
|
||||
struct ib_send_wr *first_wr;
|
||||
|
@ -581,8 +581,9 @@ EXPORT_SYMBOL(rdma_rw_ctx_post);
|
|||
* @sg_cnt: number of entries in @sg
|
||||
* @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
|
||||
*/
|
||||
void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
|
||||
struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir)
|
||||
void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
||||
u32 port_num, struct scatterlist *sg, u32 sg_cnt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -620,7 +621,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_destroy);
|
|||
* @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
|
||||
*/
|
||||
void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
||||
u8 port_num, struct scatterlist *sg, u32 sg_cnt,
|
||||
u32 port_num, struct scatterlist *sg, u32 sg_cnt,
|
||||
struct scatterlist *prot_sg, u32 prot_sg_cnt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
|
@ -647,7 +648,7 @@ EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
|
|||
* compute max_rdma_ctxts and the size of the transport's Send and
|
||||
* Send Completion Queues.
|
||||
*/
|
||||
unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
|
||||
unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
|
||||
unsigned int maxpages)
|
||||
{
|
||||
unsigned int mr_pages;
|
||||
|
|
|
@ -49,7 +49,7 @@ static inline void ib_sa_client_put(struct ib_sa_client *client)
|
|||
}
|
||||
|
||||
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
|
||||
struct ib_device *device, u8 port_num, u8 method,
|
||||
struct ib_device *device, u32 port_num, u8 method,
|
||||
struct ib_sa_mcmember_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
unsigned long timeout_ms, gfp_t gfp_mask,
|
||||
|
|
|
@ -95,7 +95,7 @@ struct ib_sa_port {
|
|||
struct delayed_work ib_cpi_work;
|
||||
spinlock_t classport_lock; /* protects class port info set */
|
||||
spinlock_t ah_lock;
|
||||
u8 port_num;
|
||||
u32 port_num;
|
||||
};
|
||||
|
||||
struct ib_sa_device {
|
||||
|
@ -1194,7 +1194,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
|
|||
}
|
||||
EXPORT_SYMBOL(ib_sa_cancel_query);
|
||||
|
||||
static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
|
||||
static u8 get_src_path_mask(struct ib_device *device, u32 port_num)
|
||||
{
|
||||
struct ib_sa_device *sa_dev;
|
||||
struct ib_sa_port *port;
|
||||
|
@ -1213,7 +1213,7 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
|
|||
return src_path_mask;
|
||||
}
|
||||
|
||||
static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
|
||||
static int init_ah_attr_grh_fields(struct ib_device *device, u32 port_num,
|
||||
struct sa_path_rec *rec,
|
||||
struct rdma_ah_attr *ah_attr,
|
||||
const struct ib_gid_attr *gid_attr)
|
||||
|
@ -1251,7 +1251,7 @@ static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
|
|||
* User must invoke rdma_destroy_ah_attr() to release reference to SGID
|
||||
* attributes which are initialized using ib_init_ah_attr_from_path().
|
||||
*/
|
||||
int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
|
||||
int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
|
||||
struct sa_path_rec *rec,
|
||||
struct rdma_ah_attr *ah_attr,
|
||||
const struct ib_gid_attr *gid_attr)
|
||||
|
@ -1409,7 +1409,7 @@ EXPORT_SYMBOL(ib_sa_pack_path);
|
|||
|
||||
static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
|
||||
struct ib_sa_device *sa_dev,
|
||||
u8 port_num)
|
||||
u32 port_num)
|
||||
{
|
||||
struct ib_sa_port *port;
|
||||
unsigned long flags;
|
||||
|
@ -1444,7 +1444,7 @@ enum opa_pr_supported {
|
|||
*/
|
||||
static int opa_pr_query_possible(struct ib_sa_client *client,
|
||||
struct ib_sa_device *sa_dev,
|
||||
struct ib_device *device, u8 port_num,
|
||||
struct ib_device *device, u32 port_num,
|
||||
struct sa_path_rec *rec)
|
||||
{
|
||||
struct ib_port_attr port_attr;
|
||||
|
@ -1533,7 +1533,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
|
|||
* the query.
|
||||
*/
|
||||
int ib_sa_path_rec_get(struct ib_sa_client *client,
|
||||
struct ib_device *device, u8 port_num,
|
||||
struct ib_device *device, u32 port_num,
|
||||
struct sa_path_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
unsigned long timeout_ms, gfp_t gfp_mask,
|
||||
|
@ -1688,7 +1688,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
|
|||
* the query.
|
||||
*/
|
||||
int ib_sa_service_rec_query(struct ib_sa_client *client,
|
||||
struct ib_device *device, u8 port_num, u8 method,
|
||||
struct ib_device *device, u32 port_num, u8 method,
|
||||
struct ib_sa_service_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
unsigned long timeout_ms, gfp_t gfp_mask,
|
||||
|
@ -1784,7 +1784,7 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
|
|||
}
|
||||
|
||||
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
|
||||
struct ib_device *device, u8 port_num,
|
||||
struct ib_device *device, u32 port_num,
|
||||
u8 method,
|
||||
struct ib_sa_mcmember_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
|
@ -1876,7 +1876,7 @@ static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
|
|||
}
|
||||
|
||||
int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
|
||||
struct ib_device *device, u8 port_num,
|
||||
struct ib_device *device, u32 port_num,
|
||||
struct ib_sa_guidinfo_rec *rec,
|
||||
ib_sa_comp_mask comp_mask, u8 method,
|
||||
unsigned long timeout_ms, gfp_t gfp_mask,
|
||||
|
@ -2265,7 +2265,7 @@ static void ib_sa_event(struct ib_event_handler *handler,
|
|||
unsigned long flags;
|
||||
struct ib_sa_device *sa_dev =
|
||||
container_of(handler, typeof(*sa_dev), event_handler);
|
||||
u8 port_num = event->element.port_num - sa_dev->start_port;
|
||||
u32 port_num = event->element.port_num - sa_dev->start_port;
|
||||
struct ib_sa_port *port = &sa_dev->port[port_num];
|
||||
|
||||
if (!rdma_cap_ib_sa(handler->device, port->port_num))
|
||||
|
|
|
@ -193,7 +193,7 @@ static void qp_to_error(struct ib_qp_security *sec)
|
|||
|
||||
static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
|
||||
struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
u64 subnet_prefix)
|
||||
{
|
||||
struct ib_port_pkey *pp, *tmp_pp;
|
||||
|
@ -245,7 +245,7 @@ static int port_pkey_list_insert(struct ib_port_pkey *pp)
|
|||
struct pkey_index_qp_list *tmp_pkey;
|
||||
struct pkey_index_qp_list *pkey;
|
||||
struct ib_device *dev;
|
||||
u8 port_num = pp->port_num;
|
||||
u32 port_num = pp->port_num;
|
||||
int ret = 0;
|
||||
|
||||
if (pp->state != IB_PORT_PKEY_VALID)
|
||||
|
@ -538,7 +538,7 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
|
|||
}
|
||||
|
||||
void ib_security_cache_change(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
u64 subnet_prefix)
|
||||
{
|
||||
struct pkey_index_qp_list *pkey;
|
||||
|
@ -649,7 +649,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
|||
}
|
||||
|
||||
static int ib_security_pkey_access(struct ib_device *dev,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
u16 pkey_index,
|
||||
void *sec)
|
||||
{
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#include "smi.h"
|
||||
#include "opa_smi.h"
|
||||
|
||||
static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
|
||||
static enum smi_action __smi_handle_dr_smp_send(bool is_switch, u32 port_num,
|
||||
u8 *hop_ptr, u8 hop_cnt,
|
||||
const u8 *initial_path,
|
||||
const u8 *return_path,
|
||||
|
@ -127,7 +127,7 @@ static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
|
|||
* Return IB_SMI_DISCARD if the SMP should be discarded
|
||||
*/
|
||||
enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
|
||||
bool is_switch, int port_num)
|
||||
bool is_switch, u32 port_num)
|
||||
{
|
||||
return __smi_handle_dr_smp_send(is_switch, port_num,
|
||||
&smp->hop_ptr, smp->hop_cnt,
|
||||
|
@ -139,7 +139,7 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
|
|||
}
|
||||
|
||||
enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
|
||||
bool is_switch, int port_num)
|
||||
bool is_switch, u32 port_num)
|
||||
{
|
||||
return __smi_handle_dr_smp_send(is_switch, port_num,
|
||||
&smp->hop_ptr, smp->hop_cnt,
|
||||
|
@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
|
|||
OPA_LID_PERMISSIVE);
|
||||
}
|
||||
|
||||
static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
|
||||
static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, u32 port_num,
|
||||
int phys_port_cnt,
|
||||
u8 *hop_ptr, u8 hop_cnt,
|
||||
const u8 *initial_path,
|
||||
|
@ -238,7 +238,7 @@ static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
|
|||
* Return IB_SMI_DISCARD if the SMP should be dropped
|
||||
*/
|
||||
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
|
||||
int port_num, int phys_port_cnt)
|
||||
u32 port_num, int phys_port_cnt)
|
||||
{
|
||||
return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
|
||||
&smp->hop_ptr, smp->hop_cnt,
|
||||
|
@ -254,7 +254,7 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
|
|||
* Return IB_SMI_DISCARD if the SMP should be dropped
|
||||
*/
|
||||
enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
|
||||
int port_num, int phys_port_cnt)
|
||||
u32 port_num, int phys_port_cnt)
|
||||
{
|
||||
return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
|
||||
&smp->hop_ptr, smp->hop_cnt,
|
||||
|
|
|
@ -52,11 +52,11 @@ enum smi_forward_action {
|
|||
};
|
||||
|
||||
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
|
||||
int port_num, int phys_port_cnt);
|
||||
u32 port_num, int phys_port_cnt);
|
||||
int smi_get_fwd_port(struct ib_smp *smp);
|
||||
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
|
||||
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
|
||||
bool is_switch, int port_num);
|
||||
bool is_switch, u32 port_num);
|
||||
|
||||
/*
|
||||
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
|
||||
|
|
|
@ -62,7 +62,7 @@ struct ib_port {
|
|||
const struct attribute_group *pma_table;
|
||||
struct attribute_group *hw_stats_ag;
|
||||
struct rdma_hw_stats *hw_stats;
|
||||
u8 port_num;
|
||||
u32 port_num;
|
||||
};
|
||||
|
||||
struct port_attribute {
|
||||
|
@ -94,7 +94,7 @@ struct hw_stats_attribute {
|
|||
const char *buf,
|
||||
size_t count);
|
||||
int index;
|
||||
u8 port_num;
|
||||
u32 port_num;
|
||||
};
|
||||
|
||||
static ssize_t port_attr_show(struct kobject *kobj,
|
||||
|
@ -297,7 +297,7 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
|
|||
|
||||
static const char *phys_state_to_str(enum ib_port_phys_state phys_state)
|
||||
{
|
||||
static const char * phys_state_str[] = {
|
||||
static const char *phys_state_str[] = {
|
||||
"<unknown>",
|
||||
"Sleep",
|
||||
"Polling",
|
||||
|
@ -470,14 +470,14 @@ static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
|
|||
struct port_table_attribute port_pma_attr_##_name = { \
|
||||
.attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
|
||||
.index = (_offset) | ((_width) << 16) | ((_counter) << 24), \
|
||||
.attr_id = IB_PMA_PORT_COUNTERS , \
|
||||
.attr_id = IB_PMA_PORT_COUNTERS, \
|
||||
}
|
||||
|
||||
#define PORT_PMA_ATTR_EXT(_name, _width, _offset) \
|
||||
struct port_table_attribute port_pma_attr_ext_##_name = { \
|
||||
.attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
|
||||
.index = (_offset) | ((_width) << 16), \
|
||||
.attr_id = IB_PMA_PORT_COUNTERS_EXT , \
|
||||
.attr_id = IB_PMA_PORT_COUNTERS_EXT, \
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -812,7 +812,7 @@ static const struct attribute_group *get_counter_table(struct ib_device *dev,
|
|||
}
|
||||
|
||||
static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
|
||||
u8 port_num, int index)
|
||||
u32 port_num, int index)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -938,7 +938,7 @@ static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group)
|
|||
kfree(attr_group);
|
||||
}
|
||||
|
||||
static struct attribute *alloc_hsa(int index, u8 port_num, const char *name)
|
||||
static struct attribute *alloc_hsa(int index, u32 port_num, const char *name)
|
||||
{
|
||||
struct hw_stats_attribute *hsa;
|
||||
|
||||
|
@ -956,7 +956,7 @@ static struct attribute *alloc_hsa(int index, u8 port_num, const char *name)
|
|||
return &hsa->attr;
|
||||
}
|
||||
|
||||
static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
|
||||
static struct attribute *alloc_hsa_lifespan(char *name, u32 port_num)
|
||||
{
|
||||
struct hw_stats_attribute *hsa;
|
||||
|
||||
|
@ -975,7 +975,7 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
|
|||
}
|
||||
|
||||
static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
|
||||
u8 port_num)
|
||||
u32 port_num)
|
||||
{
|
||||
struct attribute_group *hsag;
|
||||
struct rdma_hw_stats *stats;
|
||||
|
@ -1049,7 +1049,6 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
|
|||
kfree(hsag);
|
||||
err_free_stats:
|
||||
kfree(stats);
|
||||
return;
|
||||
}
|
||||
|
||||
static int add_port(struct ib_core_device *coredev, int port_num)
|
||||
|
@ -1075,9 +1074,8 @@ static int add_port(struct ib_core_device *coredev, int port_num)
|
|||
ret = kobject_init_and_add(&p->kobj, &port_type,
|
||||
coredev->ports_kobj,
|
||||
"%d", port_num);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL);
|
||||
if (!p->gid_attr_group) {
|
||||
|
@ -1088,9 +1086,8 @@ static int add_port(struct ib_core_device *coredev, int port_num)
|
|||
p->gid_attr_group->port = p;
|
||||
ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type,
|
||||
&p->kobj, "gid_attrs");
|
||||
if (ret) {
|
||||
if (ret)
|
||||
goto err_put_gid_attrs;
|
||||
}
|
||||
|
||||
if (device->ops.process_mad && is_full_dev) {
|
||||
p->pma_table = get_counter_table(device, port_num);
|
||||
|
@ -1383,7 +1380,7 @@ void ib_free_port_attrs(struct ib_core_device *coredev)
|
|||
int ib_setup_port_attrs(struct ib_core_device *coredev)
|
||||
{
|
||||
struct ib_device *device = rdma_device_to_ibdev(&coredev->dev);
|
||||
unsigned int port;
|
||||
u32 port;
|
||||
int ret;
|
||||
|
||||
coredev->ports_kobj = kobject_create_and_add("ports",
|
||||
|
@ -1437,7 +1434,7 @@ void ib_device_unregister_sysfs(struct ib_device *device)
|
|||
* @ktype: pointer to the ktype for this kobject.
|
||||
* @name: the name of the kobject
|
||||
*/
|
||||
int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
|
||||
int ib_port_register_module_stat(struct ib_device *device, u32 port_num,
|
||||
struct kobject *kobj, struct kobj_type *ktype,
|
||||
const char *name)
|
||||
{
|
||||
|
|
|
@ -231,7 +231,7 @@ static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
|
|||
memcpy(dst->private_data, src->private_data,
|
||||
src->private_data_len);
|
||||
dst->private_data_len = src->private_data_len;
|
||||
dst->responder_resources =src->responder_resources;
|
||||
dst->responder_resources = src->responder_resources;
|
||||
dst->initiator_depth = src->initiator_depth;
|
||||
dst->flow_control = src->flow_control;
|
||||
dst->retry_count = src->retry_count;
|
||||
|
@ -1034,7 +1034,7 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id,
|
|||
{
|
||||
dst->private_data = src->private_data;
|
||||
dst->private_data_len = src->private_data_len;
|
||||
dst->responder_resources =src->responder_resources;
|
||||
dst->responder_resources = src->responder_resources;
|
||||
dst->initiator_depth = src->initiator_depth;
|
||||
dst->flow_control = src->flow_control;
|
||||
dst->retry_count = src->retry_count;
|
||||
|
@ -1708,8 +1708,8 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
|
|||
ssize_t ret;
|
||||
|
||||
if (!ib_safe_file_access(filp)) {
|
||||
pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
|
||||
task_tgid_vnr(current), current->comm);
|
||||
pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
|
||||
__func__, task_tgid_vnr(current), current->comm);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
|
|
|
@ -100,10 +100,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
|
|||
*/
|
||||
pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
|
||||
|
||||
/* At minimum, drivers must support PAGE_SIZE or smaller */
|
||||
if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
|
||||
return 0;
|
||||
|
||||
umem->iova = va = virt;
|
||||
/* The best result is the smallest page size that results in the minimum
|
||||
* number of required pages. Compute the largest page size that could
|
||||
|
@ -309,8 +305,8 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
|
|||
int ret;
|
||||
|
||||
if (offset > umem->length || length > umem->length - offset) {
|
||||
pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
|
||||
offset, umem->length, end);
|
||||
pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n",
|
||||
__func__, offset, umem->length, end);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -168,6 +168,10 @@ void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
|
|||
{
|
||||
struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
|
||||
|
||||
dma_resv_lock(dmabuf->resv, NULL);
|
||||
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
|
||||
dma_resv_unlock(dmabuf->resv);
|
||||
|
||||
dma_buf_detach(dmabuf, umem_dmabuf->attach);
|
||||
dma_buf_put(dmabuf);
|
||||
kfree(umem_dmabuf);
|
||||
|
|
|
@ -101,7 +101,7 @@ struct ib_umad_port {
|
|||
struct ib_device *ib_dev;
|
||||
struct ib_umad_device *umad_dev;
|
||||
int dev_num;
|
||||
u8 port_num;
|
||||
u32 port_num;
|
||||
};
|
||||
|
||||
struct ib_umad_device {
|
||||
|
@ -165,8 +165,8 @@ static void ib_umad_dev_put(struct ib_umad_device *dev)
|
|||
|
||||
static int hdr_size(struct ib_umad_file *file)
|
||||
{
|
||||
return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
|
||||
sizeof (struct ib_user_mad_hdr_old);
|
||||
return file->use_pkey_index ? sizeof(struct ib_user_mad_hdr) :
|
||||
sizeof(struct ib_user_mad_hdr_old);
|
||||
}
|
||||
|
||||
/* caller must hold file->mutex */
|
||||
|
@ -688,8 +688,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
|
|||
mutex_lock(&file->mutex);
|
||||
|
||||
if (!file->port->ib_dev) {
|
||||
dev_notice(&file->port->dev,
|
||||
"ib_umad_reg_agent: invalid device\n");
|
||||
dev_notice(&file->port->dev, "%s: invalid device\n", __func__);
|
||||
ret = -EPIPE;
|
||||
goto out;
|
||||
}
|
||||
|
@ -701,7 +700,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
|
|||
|
||||
if (ureq.qpn != 0 && ureq.qpn != 1) {
|
||||
dev_notice(&file->port->dev,
|
||||
"ib_umad_reg_agent: invalid QPN %d specified\n",
|
||||
"%s: invalid QPN %d specified\n", __func__,
|
||||
ureq.qpn);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
@ -711,9 +710,9 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
|
|||
if (!__get_agent(file, agent_id))
|
||||
goto found;
|
||||
|
||||
dev_notice(&file->port->dev,
|
||||
"ib_umad_reg_agent: Max Agents (%u) reached\n",
|
||||
dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__,
|
||||
IB_UMAD_MAX_AGENTS);
|
||||
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
||||
|
@ -790,8 +789,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
|
|||
mutex_lock(&file->mutex);
|
||||
|
||||
if (!file->port->ib_dev) {
|
||||
dev_notice(&file->port->dev,
|
||||
"ib_umad_reg_agent2: invalid device\n");
|
||||
dev_notice(&file->port->dev, "%s: invalid device\n", __func__);
|
||||
ret = -EPIPE;
|
||||
goto out;
|
||||
}
|
||||
|
@ -802,17 +800,16 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
|
|||
}
|
||||
|
||||
if (ureq.qpn != 0 && ureq.qpn != 1) {
|
||||
dev_notice(&file->port->dev,
|
||||
"ib_umad_reg_agent2: invalid QPN %d specified\n",
|
||||
ureq.qpn);
|
||||
dev_notice(&file->port->dev, "%s: invalid QPN %d specified\n",
|
||||
__func__, ureq.qpn);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) {
|
||||
dev_notice(&file->port->dev,
|
||||
"ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n",
|
||||
ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
|
||||
"%s failed: invalid registration flags specified 0x%x; supported 0x%x\n",
|
||||
__func__, ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
|
||||
ret = -EINVAL;
|
||||
|
||||
if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP,
|
||||
|
@ -827,8 +824,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
|
|||
if (!__get_agent(file, agent_id))
|
||||
goto found;
|
||||
|
||||
dev_notice(&file->port->dev,
|
||||
"ib_umad_reg_agent2: Max Agents (%u) reached\n",
|
||||
dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__,
|
||||
IB_UMAD_MAX_AGENTS);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -840,7 +836,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
|
|||
req.mgmt_class_version = ureq.mgmt_class_version;
|
||||
if (ureq.oui & 0xff000000) {
|
||||
dev_notice(&file->port->dev,
|
||||
"ib_umad_reg_agent2 failed: oui invalid 0x%08x\n",
|
||||
"%s failed: oui invalid 0x%08x\n", __func__,
|
||||
ureq.oui);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
@ -1145,7 +1141,7 @@ static const struct file_operations umad_sm_fops = {
|
|||
|
||||
static struct ib_umad_port *get_port(struct ib_device *ibdev,
|
||||
struct ib_umad_device *umad_dev,
|
||||
unsigned int port)
|
||||
u32 port)
|
||||
{
|
||||
if (!umad_dev)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
|
|
@ -364,7 +364,7 @@ static void copy_query_dev_fields(struct ib_ucontext *ucontext,
|
|||
resp->max_srq_sge = attr->max_srq_sge;
|
||||
resp->max_pkeys = attr->max_pkeys;
|
||||
resp->local_ca_ack_delay = attr->local_ca_ack_delay;
|
||||
resp->phys_port_cnt = ib_dev->phys_port_cnt;
|
||||
resp->phys_port_cnt = min_t(u32, ib_dev->phys_port_cnt, U8_MAX);
|
||||
}
|
||||
|
||||
static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs)
|
||||
|
@ -2002,12 +2002,13 @@ static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs)
|
|||
|
||||
static void *alloc_wr(size_t wr_size, __u32 num_sge)
|
||||
{
|
||||
if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
|
||||
sizeof (struct ib_sge))
|
||||
if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof(struct ib_sge))) /
|
||||
sizeof(struct ib_sge))
|
||||
return NULL;
|
||||
|
||||
return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
|
||||
num_sge * sizeof (struct ib_sge), GFP_KERNEL);
|
||||
return kmalloc(ALIGN(wr_size, sizeof(struct ib_sge)) +
|
||||
num_sge * sizeof(struct ib_sge),
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
|
||||
|
@ -2216,7 +2217,7 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
|
|||
const struct ib_sge __user *sgls;
|
||||
const void __user *wqes;
|
||||
|
||||
if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
|
||||
if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
|
||||
|
@ -2249,14 +2250,14 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
|
|||
}
|
||||
|
||||
if (user_wr->num_sge >=
|
||||
(U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
|
||||
sizeof (struct ib_sge)) {
|
||||
(U32_MAX - ALIGN(sizeof(*next), sizeof(struct ib_sge))) /
|
||||
sizeof(struct ib_sge)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
|
||||
user_wr->num_sge * sizeof (struct ib_sge),
|
||||
next = kmalloc(ALIGN(sizeof(*next), sizeof(struct ib_sge)) +
|
||||
user_wr->num_sge * sizeof(struct ib_sge),
|
||||
GFP_KERNEL);
|
||||
if (!next) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -2274,8 +2275,8 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
|
|||
next->num_sge = user_wr->num_sge;
|
||||
|
||||
if (next->num_sge) {
|
||||
next->sg_list = (void *) next +
|
||||
ALIGN(sizeof *next, sizeof (struct ib_sge));
|
||||
next->sg_list = (void *)next +
|
||||
ALIGN(sizeof(*next), sizeof(struct ib_sge));
|
||||
if (copy_from_user(next->sg_list, sgls + sg_ind,
|
||||
next->num_sge *
|
||||
sizeof(struct ib_sge))) {
|
||||
|
|
|
@ -752,9 +752,10 @@ int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
|
|||
return uverbs_set_output(bundle, attr);
|
||||
}
|
||||
|
||||
int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
|
||||
size_t idx, s64 lower_bound, u64 upper_bound,
|
||||
s64 *def_val)
|
||||
int _uverbs_get_const_signed(s64 *to,
|
||||
const struct uverbs_attr_bundle *attrs_bundle,
|
||||
size_t idx, s64 lower_bound, u64 upper_bound,
|
||||
s64 *def_val)
|
||||
{
|
||||
const struct uverbs_attr *attr;
|
||||
|
||||
|
@ -773,7 +774,30 @@ int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_uverbs_get_const);
|
||||
EXPORT_SYMBOL(_uverbs_get_const_signed);
|
||||
|
||||
int _uverbs_get_const_unsigned(u64 *to,
|
||||
const struct uverbs_attr_bundle *attrs_bundle,
|
||||
size_t idx, u64 upper_bound, u64 *def_val)
|
||||
{
|
||||
const struct uverbs_attr *attr;
|
||||
|
||||
attr = uverbs_attr_get(attrs_bundle, idx);
|
||||
if (IS_ERR(attr)) {
|
||||
if ((PTR_ERR(attr) != -ENOENT) || !def_val)
|
||||
return PTR_ERR(attr);
|
||||
|
||||
*to = *def_val;
|
||||
} else {
|
||||
*to = attr->ptr_attr.data;
|
||||
}
|
||||
|
||||
if (*to > upper_bound)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_uverbs_get_const_unsigned);
|
||||
|
||||
int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
|
||||
size_t idx, const void *from, size_t size)
|
||||
|
|
|
@ -96,10 +96,10 @@ static const char * const wc_statuses[] = {
|
|||
[IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
|
||||
[IB_WC_LOC_PROT_ERR] = "local protection error",
|
||||
[IB_WC_WR_FLUSH_ERR] = "WR flushed",
|
||||
[IB_WC_MW_BIND_ERR] = "memory management operation error",
|
||||
[IB_WC_MW_BIND_ERR] = "memory bind operation error",
|
||||
[IB_WC_BAD_RESP_ERR] = "bad response error",
|
||||
[IB_WC_LOC_ACCESS_ERR] = "local access error",
|
||||
[IB_WC_REM_INV_REQ_ERR] = "invalid request error",
|
||||
[IB_WC_REM_INV_REQ_ERR] = "remote invalid request error",
|
||||
[IB_WC_REM_ACCESS_ERR] = "remote access error",
|
||||
[IB_WC_REM_OP_ERR] = "remote operation error",
|
||||
[IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
|
||||
|
@ -227,7 +227,8 @@ rdma_node_get_transport(unsigned int node_type)
|
|||
}
|
||||
EXPORT_SYMBOL(rdma_node_get_transport);
|
||||
|
||||
enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
|
||||
enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
|
||||
u32 port_num)
|
||||
{
|
||||
enum rdma_transport_type lt;
|
||||
if (device->ops.get_link_layer)
|
||||
|
@ -341,7 +342,8 @@ int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
|
|||
}
|
||||
|
||||
/* uverbs manipulates usecnt with proper locking, while the kabi
|
||||
requires the caller to guarantee we can't race here. */
|
||||
* requires the caller to guarantee we can't race here.
|
||||
*/
|
||||
WARN_ON(atomic_read(&pd->usecnt));
|
||||
|
||||
ret = pd->device->ops.dealloc_pd(pd, udata);
|
||||
|
@ -658,7 +660,7 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
|
|||
EXPORT_SYMBOL(ib_get_rdma_header_version);
|
||||
|
||||
static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
const struct ib_grh *grh)
|
||||
{
|
||||
int grh_version;
|
||||
|
@ -701,7 +703,7 @@ static bool find_gid_index(const union ib_gid *gid,
|
|||
}
|
||||
|
||||
static const struct ib_gid_attr *
|
||||
get_sgid_attr_from_eth(struct ib_device *device, u8 port_num,
|
||||
get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
|
||||
u16 vlan_id, const union ib_gid *sgid,
|
||||
enum ib_gid_type gid_type)
|
||||
{
|
||||
|
@ -788,7 +790,7 @@ static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
|
|||
* On success the caller is responsible to call rdma_destroy_ah_attr on the
|
||||
* attr.
|
||||
*/
|
||||
int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
|
||||
int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
|
||||
const struct ib_wc *wc, const struct ib_grh *grh,
|
||||
struct rdma_ah_attr *ah_attr)
|
||||
{
|
||||
|
@ -919,7 +921,7 @@ void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
|
|||
EXPORT_SYMBOL(rdma_destroy_ah_attr);
|
||||
|
||||
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
|
||||
const struct ib_grh *grh, u8 port_num)
|
||||
const struct ib_grh *grh, u32 port_num)
|
||||
{
|
||||
struct rdma_ah_attr ah_attr;
|
||||
struct ib_ah *ah;
|
||||
|
@ -1037,8 +1039,12 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
|
|||
}
|
||||
atomic_inc(&pd->usecnt);
|
||||
|
||||
rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ);
|
||||
rdma_restrack_parent_name(&srq->res, &pd->res);
|
||||
|
||||
ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
|
||||
if (ret) {
|
||||
rdma_restrack_put(&srq->res);
|
||||
atomic_dec(&srq->pd->usecnt);
|
||||
if (srq->srq_type == IB_SRQT_XRC)
|
||||
atomic_dec(&srq->ext.xrc.xrcd->usecnt);
|
||||
|
@ -1048,6 +1054,8 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
rdma_restrack_add(&srq->res);
|
||||
|
||||
return srq;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_create_srq_user);
|
||||
|
@ -1086,6 +1094,7 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
|
|||
atomic_dec(&srq->ext.xrc.xrcd->usecnt);
|
||||
if (ib_srq_has_cq(srq->srq_type))
|
||||
atomic_dec(&srq->ext.cq->usecnt);
|
||||
rdma_restrack_del(&srq->res);
|
||||
kfree(srq);
|
||||
|
||||
return ret;
|
||||
|
@ -1673,7 +1682,7 @@ static bool is_qp_type_connected(const struct ib_qp *qp)
|
|||
static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata)
|
||||
{
|
||||
u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
|
||||
u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
|
||||
const struct ib_gid_attr *old_sgid_attr_av;
|
||||
const struct ib_gid_attr *old_sgid_attr_alt_av;
|
||||
int ret;
|
||||
|
@ -1801,7 +1810,7 @@ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_modify_qp_with_udata);
|
||||
|
||||
int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width)
|
||||
int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
|
||||
{
|
||||
int rc;
|
||||
u32 netdev_speed;
|
||||
|
@ -2467,7 +2476,7 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_check_mr_status);
|
||||
|
||||
int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
|
||||
int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
|
||||
int state)
|
||||
{
|
||||
if (!device->ops.set_vf_link_state)
|
||||
|
@ -2477,7 +2486,7 @@ int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_set_vf_link_state);
|
||||
|
||||
int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
|
||||
int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
|
||||
struct ifla_vf_info *info)
|
||||
{
|
||||
if (!device->ops.get_vf_config)
|
||||
|
@ -2487,7 +2496,7 @@ int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_get_vf_config);
|
||||
|
||||
int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
|
||||
int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
|
||||
struct ifla_vf_stats *stats)
|
||||
{
|
||||
if (!device->ops.get_vf_stats)
|
||||
|
@ -2497,7 +2506,7 @@ int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_get_vf_stats);
|
||||
|
||||
int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
|
||||
int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
|
||||
int type)
|
||||
{
|
||||
if (!device->ops.set_vf_guid)
|
||||
|
@ -2507,7 +2516,7 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_set_vf_guid);
|
||||
|
||||
int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
|
||||
int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
|
||||
struct ifla_vf_guid *node_guid,
|
||||
struct ifla_vf_guid *port_guid)
|
||||
{
|
||||
|
@ -2849,7 +2858,7 @@ void ib_drain_qp(struct ib_qp *qp)
|
|||
}
|
||||
EXPORT_SYMBOL(ib_drain_qp);
|
||||
|
||||
struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
|
||||
struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
|
||||
enum rdma_netdev_t type, const char *name,
|
||||
unsigned char name_assign_type,
|
||||
void (*setup)(struct net_device *))
|
||||
|
@ -2875,7 +2884,7 @@ struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
|
|||
}
|
||||
EXPORT_SYMBOL(rdma_alloc_netdev);
|
||||
|
||||
int rdma_init_netdev(struct ib_device *device, u8 port_num,
|
||||
int rdma_init_netdev(struct ib_device *device, u32 port_num,
|
||||
enum rdma_netdev_t type, const char *name,
|
||||
unsigned char name_assign_type,
|
||||
void (*setup)(struct net_device *),
|
||||
|
|
|
@ -2,9 +2,7 @@
|
|||
config INFINIBAND_BNXT_RE
|
||||
tristate "Broadcom Netxtreme HCA support"
|
||||
depends on 64BIT
|
||||
depends on ETHERNET && NETDEVICES && PCI && INET && DCB
|
||||
select NET_VENDOR_BROADCOM
|
||||
select BNXT
|
||||
depends on INET && DCB && BNXT
|
||||
help
|
||||
This driver supports Broadcom NetXtreme-E 10/25/40/50 gigabit
|
||||
RoCE HCAs. To compile this driver as a module, choose M here:
|
||||
|
|
|
@ -138,6 +138,7 @@ struct bnxt_re_dev {
|
|||
#define BNXT_RE_FLAG_QOS_WORK_REG 5
|
||||
#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
|
||||
#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
|
||||
#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
|
||||
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
|
||||
struct net_device *netdev;
|
||||
unsigned int version, major, minor;
|
||||
|
|
|
@ -114,7 +114,7 @@ static const char * const bnxt_re_stat_name[] = {
|
|||
|
||||
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
|
||||
struct rdma_hw_stats *stats,
|
||||
u8 port, int index)
|
||||
u32 port, int index)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
|
||||
struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
|
||||
|
@ -235,7 +235,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
|
|||
}
|
||||
|
||||
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
|
||||
u8 port_num)
|
||||
u32 port_num)
|
||||
{
|
||||
BUILD_BUG_ON(ARRAY_SIZE(bnxt_re_stat_name) != BNXT_RE_NUM_COUNTERS);
|
||||
/* We support only per port stats */
|
||||
|
|
|
@ -97,8 +97,8 @@ enum bnxt_re_hw_stats {
|
|||
};
|
||||
|
||||
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
|
||||
u8 port_num);
|
||||
u32 port_num);
|
||||
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
|
||||
struct rdma_hw_stats *stats,
|
||||
u8 port, int index);
|
||||
u32 port, int index);
|
||||
#endif /* __BNXT_RE_HW_STATS_H__ */
|
||||
|
|
|
@ -189,7 +189,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
|
|||
}
|
||||
|
||||
/* Port */
|
||||
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
|
||||
int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
|
||||
struct ib_port_attr *port_attr)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
|
||||
|
@ -229,7 +229,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
|
||||
struct ib_port_immutable *immutable)
|
||||
{
|
||||
struct ib_port_attr port_attr;
|
||||
|
@ -254,7 +254,7 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
|
|||
rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
|
||||
}
|
||||
|
||||
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
|
||||
int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
|
||||
u16 index, u16 *pkey)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
|
||||
|
@ -266,7 +266,7 @@ int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
|
|||
&rdev->qplib_res.pkey_tbl, index, pkey);
|
||||
}
|
||||
|
||||
int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
|
||||
int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
|
||||
int index, union ib_gid *gid)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
|
||||
|
@ -374,7 +374,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
|
|||
}
|
||||
|
||||
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
|
||||
u8 port_num)
|
||||
u32 port_num)
|
||||
{
|
||||
return IB_LINK_LAYER_ETHERNET;
|
||||
}
|
||||
|
|
|
@ -149,19 +149,19 @@ static inline u16 bnxt_re_get_rwqe_size(int nsge)
|
|||
int bnxt_re_query_device(struct ib_device *ibdev,
|
||||
struct ib_device_attr *ib_attr,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
|
||||
int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
|
||||
struct ib_port_attr *port_attr);
|
||||
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
|
||||
struct ib_port_immutable *immutable);
|
||||
void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
|
||||
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
|
||||
int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
|
||||
u16 index, u16 *pkey);
|
||||
int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context);
|
||||
int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context);
|
||||
int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
|
||||
int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
|
||||
int index, union ib_gid *gid);
|
||||
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
|
||||
u8 port_num);
|
||||
u32 port_num);
|
||||
int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
|
||||
|
|
|
@ -81,6 +81,7 @@ static struct workqueue_struct *bnxt_re_wq;
|
|||
static void bnxt_re_remove_device(struct bnxt_re_dev *rdev);
|
||||
static void bnxt_re_dealloc_driver(struct ib_device *ib_dev);
|
||||
static void bnxt_re_stop_irq(void *handle);
|
||||
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
|
||||
|
||||
static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
|
||||
{
|
||||
|
@ -221,6 +222,37 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
|
|||
/* for handling bnxt_en callbacks later */
|
||||
static void bnxt_re_stop(void *p)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = p;
|
||||
struct bnxt *bp;
|
||||
|
||||
if (!rdev)
|
||||
return;
|
||||
ASSERT_RTNL();
|
||||
|
||||
/* L2 driver invokes this callback during device error/crash or device
|
||||
* reset. Current RoCE driver doesn't recover the device in case of
|
||||
* error. Handle the error by dispatching fatal events to all qps
|
||||
* ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
|
||||
* L2 driver want to modify the MSIx table.
|
||||
*/
|
||||
bp = netdev_priv(rdev->netdev);
|
||||
|
||||
ibdev_info(&rdev->ibdev, "Handle device stop call from L2 driver");
|
||||
/* Check the current device state from L2 structure and move the
|
||||
* device to detached state if FW_FATAL_COND is set.
|
||||
* This prevents more commands to HW during clean-up,
|
||||
* in case the device is already in error.
|
||||
*/
|
||||
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
|
||||
set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
|
||||
|
||||
bnxt_re_dev_stop(rdev);
|
||||
bnxt_re_stop_irq(rdev);
|
||||
/* Move the device states to detached and avoid sending any more
|
||||
* commands to HW
|
||||
*/
|
||||
set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
|
||||
set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
|
||||
}
|
||||
|
||||
static void bnxt_re_start(void *p)
|
||||
|
@ -234,6 +266,8 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
|
|||
if (!rdev)
|
||||
return;
|
||||
|
||||
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
|
||||
return;
|
||||
rdev->num_vfs = num_vfs;
|
||||
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
|
||||
bnxt_re_set_resource_limits(rdev);
|
||||
|
@ -427,6 +461,9 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
|
|||
if (!en_dev)
|
||||
return rc;
|
||||
|
||||
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
|
||||
return 0;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
|
||||
|
@ -489,6 +526,9 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
|
|||
if (!en_dev)
|
||||
return rc;
|
||||
|
||||
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
|
||||
return 0;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
|
||||
|
@ -561,24 +601,12 @@ static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
|
|||
return container_of(ibdev, struct bnxt_re_dev, ibdev);
|
||||
}
|
||||
|
||||
static void bnxt_re_dev_unprobe(struct net_device *netdev,
|
||||
struct bnxt_en_dev *en_dev)
|
||||
{
|
||||
dev_put(netdev);
|
||||
module_put(en_dev->pdev->driver->driver.owner);
|
||||
}
|
||||
|
||||
static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(netdev);
|
||||
struct bnxt_en_dev *en_dev;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
/* Call bnxt_en's RoCE probe via indirect API */
|
||||
if (!bp->ulp_probe)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
en_dev = bp->ulp_probe(netdev);
|
||||
en_dev = bnxt_ulp_probe(netdev);
|
||||
if (IS_ERR(en_dev))
|
||||
return en_dev;
|
||||
|
||||
|
@ -593,10 +621,6 @@ static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
|
|||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
/* Bump net device reference count */
|
||||
if (!try_module_get(pdev->driver->driver.owner))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
dev_hold(netdev);
|
||||
|
||||
return en_dev;
|
||||
|
@ -1523,13 +1547,12 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
|
|||
|
||||
static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct net_device *netdev = rdev->netdev;
|
||||
|
||||
bnxt_re_dev_remove(rdev);
|
||||
|
||||
if (netdev)
|
||||
bnxt_re_dev_unprobe(netdev, en_dev);
|
||||
dev_put(netdev);
|
||||
}
|
||||
|
||||
static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
|
||||
|
@ -1551,7 +1574,7 @@ static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
|
|||
*rdev = bnxt_re_dev_add(netdev, en_dev);
|
||||
if (!*rdev) {
|
||||
rc = -ENOMEM;
|
||||
bnxt_re_dev_unprobe(netdev, en_dev);
|
||||
dev_put(netdev);
|
||||
goto exit;
|
||||
}
|
||||
exit:
|
||||
|
|
|
@ -2784,6 +2784,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
|
|||
dev_err(&cq->hwq.pdev->dev,
|
||||
"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
|
||||
cqe_cons, rq->max_wqe);
|
||||
rc = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
|
|
@ -212,6 +212,10 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
|||
u8 opcode, retry_cnt = 0xFF;
|
||||
int rc = 0;
|
||||
|
||||
/* Prevent posting if f/w is not in a state to process */
|
||||
if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
|
||||
return 0;
|
||||
|
||||
do {
|
||||
opcode = req->opcode;
|
||||
rc = __send_message(rcfw, req, resp, sb, is_block);
|
||||
|
|
|
@ -138,6 +138,8 @@ struct bnxt_qplib_qp_node {
|
|||
#define FIRMWARE_INITIALIZED_FLAG (0)
|
||||
#define FIRMWARE_FIRST_FLAG (31)
|
||||
#define FIRMWARE_TIMED_OUT (3)
|
||||
#define ERR_DEVICE_DETACHED (4)
|
||||
|
||||
struct bnxt_qplib_cmdq_mbox {
|
||||
struct bnxt_qplib_reg_desc reg;
|
||||
void __iomem *prod;
|
||||
|
|
|
@ -854,6 +854,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
|
|||
|
||||
unmap_io:
|
||||
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
|
||||
dpit->dbr_bar_reg_iomem = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -145,7 +145,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status);
|
|||
static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
|
||||
|
||||
static LIST_HEAD(timeout_list);
|
||||
static spinlock_t timeout_lock;
|
||||
static DEFINE_SPINLOCK(timeout_lock);
|
||||
|
||||
static void deref_cm_id(struct c4iw_ep_common *epc)
|
||||
{
|
||||
|
@ -4452,7 +4452,6 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
|
|||
|
||||
int __init c4iw_cm_init(void)
|
||||
{
|
||||
spin_lock_init(&timeout_lock);
|
||||
skb_queue_head_init(&rxq);
|
||||
|
||||
workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
|
||||
|
|
|
@ -341,11 +341,6 @@ static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
|
|||
return container_of(ibdev, struct c4iw_dev, ibdev);
|
||||
}
|
||||
|
||||
static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
|
||||
{
|
||||
return container_of(rdev, struct c4iw_dev, rdev);
|
||||
}
|
||||
|
||||
static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
|
||||
{
|
||||
return xa_load(&rhp->cqs, cqid);
|
||||
|
@ -659,12 +654,6 @@ static inline u32 c4iw_ib_to_tpt_access(int a)
|
|||
FW_RI_MEM_ACCESS_LOCAL_READ;
|
||||
}
|
||||
|
||||
static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
|
||||
{
|
||||
return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
|
||||
(acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
|
||||
}
|
||||
|
||||
enum c4iw_mmid_state {
|
||||
C4IW_STAG_STATE_VALID,
|
||||
C4IW_STAG_STATE_INVALID
|
||||
|
|
|
@ -237,12 +237,12 @@ static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
|
||||
static int c4iw_query_gid(struct ib_device *ibdev, u32 port, int index,
|
||||
union ib_gid *gid)
|
||||
{
|
||||
struct c4iw_dev *dev;
|
||||
|
||||
pr_debug("ibdev %p, port %d, index %d, gid %p\n",
|
||||
pr_debug("ibdev %p, port %u, index %d, gid %p\n",
|
||||
ibdev, port, index, gid);
|
||||
if (!port)
|
||||
return -EINVAL;
|
||||
|
@ -295,7 +295,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int c4iw_query_port(struct ib_device *ibdev, u8 port,
|
||||
static int c4iw_query_port(struct ib_device *ibdev, u32 port,
|
||||
struct ib_port_attr *props)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -378,7 +378,7 @@ static const char * const names[] = {
|
|||
};
|
||||
|
||||
static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
|
||||
u8 port_num)
|
||||
u32 port_num)
|
||||
{
|
||||
BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
|
||||
|
||||
|
@ -391,7 +391,7 @@ static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
|
|||
|
||||
static int c4iw_get_mib(struct ib_device *ibdev,
|
||||
struct rdma_hw_stats *stats,
|
||||
u8 port, int index)
|
||||
u32 port, int index)
|
||||
{
|
||||
struct tp_tcp_stats v4, v6;
|
||||
struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
|
||||
|
@ -420,7 +420,7 @@ static const struct attribute_group c4iw_attr_group = {
|
|||
.attrs = c4iw_class_attributes,
|
||||
};
|
||||
|
||||
static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
static int c4iw_port_immutable(struct ib_device *ibdev, u32 port_num,
|
||||
struct ib_port_immutable *immutable)
|
||||
{
|
||||
struct ib_port_attr attr;
|
||||
|
|
|
@ -216,7 +216,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
|
|||
goto out;
|
||||
entry->qid = qid;
|
||||
list_add_tail(&entry->entry, &uctx->cqids);
|
||||
for (i = qid; i & rdev->qpmask; i++) {
|
||||
for (i = qid + 1; i & rdev->qpmask; i++) {
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
goto out;
|
||||
|
|
|
@ -487,11 +487,6 @@ static inline int t4_rq_empty(struct t4_wq *wq)
|
|||
return wq->rq.in_use == 0;
|
||||
}
|
||||
|
||||
static inline int t4_rq_full(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.in_use == (wq->rq.size - 1);
|
||||
}
|
||||
|
||||
static inline u32 t4_rq_avail(struct t4_wq *wq)
|
||||
{
|
||||
return wq->rq.size - 1 - wq->rq.in_use;
|
||||
|
@ -534,11 +529,6 @@ static inline int t4_sq_empty(struct t4_wq *wq)
|
|||
return wq->sq.in_use == 0;
|
||||
}
|
||||
|
||||
static inline int t4_sq_full(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.in_use == (wq->sq.size - 1);
|
||||
}
|
||||
|
||||
static inline u32 t4_sq_avail(struct t4_wq *wq)
|
||||
{
|
||||
return wq->sq.size - 1 - wq->sq.in_use;
|
||||
|
@ -679,11 +669,6 @@ static inline void t4_enable_wq_db(struct t4_wq *wq)
|
|||
wq->rq.queue[wq->rq.size].status.db_off = 0;
|
||||
}
|
||||
|
||||
static inline int t4_wq_db_enabled(struct t4_wq *wq)
|
||||
{
|
||||
return !wq->rq.queue[wq->rq.size].status.db_off;
|
||||
}
|
||||
|
||||
enum t4_cq_flags {
|
||||
CQ_ARMED = 1,
|
||||
};
|
||||
|
@ -817,19 +802,6 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
|
||||
{
|
||||
if (cq->sw_in_use == cq->size) {
|
||||
pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
|
||||
__func__, cq->cqid);
|
||||
cq->error = 1;
|
||||
return NULL;
|
||||
}
|
||||
if (cq->sw_in_use)
|
||||
return &cq->sw_queue[cq->sw_cidx];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -843,11 +815,6 @@ static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int t4_cq_in_error(struct t4_cq *cq)
|
||||
{
|
||||
return *cq->qp_errp;
|
||||
}
|
||||
|
||||
static inline void t4_set_cq_in_error(struct t4_cq *cq)
|
||||
{
|
||||
*cq->qp_errp = 1;
|
||||
|
|
|
@ -120,14 +120,14 @@ struct efa_ah {
|
|||
int efa_query_device(struct ib_device *ibdev,
|
||||
struct ib_device_attr *props,
|
||||
struct ib_udata *udata);
|
||||
int efa_query_port(struct ib_device *ibdev, u8 port,
|
||||
int efa_query_port(struct ib_device *ibdev, u32 port,
|
||||
struct ib_port_attr *props);
|
||||
int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask,
|
||||
struct ib_qp_init_attr *qp_init_attr);
|
||||
int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
|
||||
int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
|
||||
union ib_gid *gid);
|
||||
int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
|
||||
int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
|
||||
u16 *pkey);
|
||||
int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
|
@ -142,7 +142,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
|||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata);
|
||||
int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
|
||||
int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
|
||||
struct ib_port_immutable *immutable);
|
||||
int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata);
|
||||
void efa_dealloc_ucontext(struct ib_ucontext *ibucontext);
|
||||
|
@ -156,9 +156,9 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags);
|
|||
int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_udata *udata);
|
||||
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
|
||||
u8 port_num);
|
||||
struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num);
|
||||
u32 port_num);
|
||||
struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u32 port_num);
|
||||
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
|
||||
u8 port_num, int index);
|
||||
u32 port_num, int index);
|
||||
|
||||
#endif /* _EFA_H_ */
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
|
||||
/*
|
||||
* Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
* Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -209,11 +209,11 @@ static void efa_set_host_info(struct efa_dev *dev)
|
|||
if (!hinf)
|
||||
return;
|
||||
|
||||
strlcpy(hinf->os_dist_str, utsname()->release,
|
||||
min(sizeof(hinf->os_dist_str), sizeof(utsname()->release)));
|
||||
strscpy(hinf->os_dist_str, utsname()->release,
|
||||
sizeof(hinf->os_dist_str));
|
||||
hinf->os_type = EFA_ADMIN_OS_LINUX;
|
||||
strlcpy(hinf->kernel_ver_str, utsname()->version,
|
||||
min(sizeof(hinf->kernel_ver_str), sizeof(utsname()->version)));
|
||||
strscpy(hinf->kernel_ver_str, utsname()->version,
|
||||
sizeof(hinf->kernel_ver_str));
|
||||
hinf->kernel_ver = LINUX_VERSION_CODE;
|
||||
EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
|
||||
EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
|
||||
|
|
|
@ -247,7 +247,7 @@ int efa_query_device(struct ib_device *ibdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int efa_query_port(struct ib_device *ibdev, u8 port,
|
||||
int efa_query_port(struct ib_device *ibdev, u32 port,
|
||||
struct ib_port_attr *props)
|
||||
{
|
||||
struct efa_dev *dev = to_edev(ibdev);
|
||||
|
@ -319,7 +319,7 @@ int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
|
||||
int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
|
||||
union ib_gid *gid)
|
||||
{
|
||||
struct efa_dev *dev = to_edev(ibdev);
|
||||
|
@ -329,7 +329,7 @@ int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
|
||||
int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
|
||||
u16 *pkey)
|
||||
{
|
||||
if (index > 0)
|
||||
|
@ -1619,7 +1619,7 @@ int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
|
||||
struct ib_port_immutable *immutable)
|
||||
{
|
||||
struct ib_port_attr attr;
|
||||
|
@ -1904,7 +1904,7 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
|
||||
struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u32 port_num)
|
||||
{
|
||||
return rdma_alloc_hw_stats_struct(efa_stats_names,
|
||||
ARRAY_SIZE(efa_stats_names),
|
||||
|
@ -1912,7 +1912,7 @@ struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
|
|||
}
|
||||
|
||||
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
|
||||
u8 port_num, int index)
|
||||
u32 port_num, int index)
|
||||
{
|
||||
struct efa_com_get_stats_params params = {};
|
||||
union efa_com_get_stats_result result;
|
||||
|
@ -1981,7 +1981,7 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
|
|||
}
|
||||
|
||||
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
|
||||
u8 port_num)
|
||||
u32 port_num)
|
||||
{
|
||||
return IB_LINK_LAYER_UNSPECIFIED;
|
||||
}
|
||||
|
|
|
@ -962,7 +962,6 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
|
|||
struct hfi1_msix_entry *msix)
|
||||
{
|
||||
struct cpu_mask_set *set = NULL;
|
||||
struct hfi1_ctxtdata *rcd;
|
||||
struct hfi1_affinity_node *entry;
|
||||
|
||||
mutex_lock(&node_affinity.lock);
|
||||
|
@ -976,14 +975,15 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
|
|||
case IRQ_GENERAL:
|
||||
/* Don't do accounting for general contexts */
|
||||
break;
|
||||
case IRQ_RCVCTXT:
|
||||
rcd = (struct hfi1_ctxtdata *)msix->arg;
|
||||
case IRQ_RCVCTXT: {
|
||||
struct hfi1_ctxtdata *rcd = msix->arg;
|
||||
|
||||
/* Don't do accounting for control contexts */
|
||||
if (rcd->ctxt != HFI1_CTRL_CTXT)
|
||||
set = &entry->rcv_intr;
|
||||
break;
|
||||
}
|
||||
case IRQ_NETDEVCTXT:
|
||||
rcd = (struct hfi1_ctxtdata *)msix->arg;
|
||||
set = &entry->def_intr;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -1322,7 +1322,7 @@ CNTR_ELEM(#name, \
|
|||
access_ibp_##cntr)
|
||||
|
||||
/**
|
||||
* hfi_addr_from_offset - return addr for readq/writeq
|
||||
* hfi1_addr_from_offset - return addr for readq/writeq
|
||||
* @dd: the dd device
|
||||
* @offset: the offset of the CSR within bar0
|
||||
*
|
||||
|
@ -8316,7 +8316,7 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
|
|||
}
|
||||
|
||||
/**
|
||||
* gerneral_interrupt() - General interrupt handler
|
||||
* general_interrupt - General interrupt handler
|
||||
* @irq: MSIx IRQ vector
|
||||
* @data: hfi1 devdata
|
||||
*
|
||||
|
@ -15243,8 +15243,8 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
|
|||
(dd->revision >> CCE_REVISION_SW_SHIFT)
|
||||
& CCE_REVISION_SW_MASK);
|
||||
|
||||
/* alloc netdev data */
|
||||
ret = hfi1_netdev_alloc(dd);
|
||||
/* alloc VNIC/AIP rx data */
|
||||
ret = hfi1_alloc_rx(dd);
|
||||
if (ret)
|
||||
goto bail_cleanup;
|
||||
|
||||
|
@ -15348,7 +15348,7 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
|
|||
hfi1_comp_vectors_clean_up(dd);
|
||||
msix_clean_up_interrupts(dd);
|
||||
bail_cleanup:
|
||||
hfi1_netdev_free(dd);
|
||||
hfi1_free_rx(dd);
|
||||
hfi1_pcie_ddcleanup(dd);
|
||||
bail_free:
|
||||
hfi1_free_devdata(dd);
|
||||
|
|
|
@ -822,11 +822,6 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
|
|||
int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
|
||||
#define LCB_START DC_LCB_CSRS
|
||||
#define LCB_END DC_8051_CSRS /* next block is 8051 */
|
||||
static inline int is_lcb_offset(u32 offset)
|
||||
{
|
||||
return (offset >= LCB_START && offset < LCB_END);
|
||||
}
|
||||
|
||||
extern uint num_vls;
|
||||
|
||||
extern uint disable_integrity;
|
||||
|
|
|
@ -1026,7 +1026,7 @@ static bool __set_armed_to_active(struct hfi1_packet *packet)
|
|||
}
|
||||
|
||||
/**
|
||||
* armed to active - the fast path for armed to active
|
||||
* set_armed_to_active - the fast path for armed to active
|
||||
* @packet: the packet structure
|
||||
*
|
||||
* Return true if packet processing needs to bail.
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
#include "trace.h"
|
||||
|
||||
/**
|
||||
* exp_tid_group_init - initialize exp_tid_set
|
||||
* hfi1_exp_tid_set_init - initialize exp_tid_set
|
||||
* @set: the set
|
||||
*/
|
||||
static void hfi1_exp_tid_set_init(struct exp_tid_set *set)
|
||||
|
@ -70,7 +70,7 @@ void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd)
|
|||
}
|
||||
|
||||
/**
|
||||
* alloc_ctxt_rcv_groups - initialize expected receive groups
|
||||
* hfi1_alloc_ctxt_rcv_groups - initialize expected receive groups
|
||||
* @rcd: the context to add the groupings to
|
||||
*/
|
||||
int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
|
||||
|
@ -100,7 +100,7 @@ int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
|
|||
}
|
||||
|
||||
/**
|
||||
* free_ctxt_rcv_groups - free expected receive groups
|
||||
* hfi1_free_ctxt_rcv_groups - free expected receive groups
|
||||
* @rcd: the context to free
|
||||
*
|
||||
* The routine dismantles the expect receive linked
|
||||
|
|
|
@ -1916,6 +1916,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
|
|||
dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
|
||||
__func__, (ptr -
|
||||
(u32 *)dd->platform_config.data));
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
}
|
||||
/* Jump the CRC DWORD */
|
||||
|
|
|
@ -69,7 +69,6 @@
|
|||
#include <rdma/ib_hdrs.h>
|
||||
#include <rdma/opa_addr.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <rdma/rdma_vt.h>
|
||||
|
||||
#include "chip_registers.h"
|
||||
|
@ -717,12 +716,6 @@ static inline void incr_cntr64(u64 *cntr)
|
|||
(*cntr)++;
|
||||
}
|
||||
|
||||
static inline void incr_cntr32(u32 *cntr)
|
||||
{
|
||||
if (*cntr < (u32)-1LL)
|
||||
(*cntr)++;
|
||||
}
|
||||
|
||||
#define MAX_NAME_SIZE 64
|
||||
struct hfi1_msix_entry {
|
||||
enum irq_type type;
|
||||
|
@ -864,7 +857,7 @@ struct hfi1_pportdata {
|
|||
u8 rx_pol_inv;
|
||||
|
||||
u8 hw_pidx; /* physical port index */
|
||||
u8 port; /* IB port number and index into dd->pports - 1 */
|
||||
u32 port; /* IB port number and index into dd->pports - 1 */
|
||||
/* type of neighbor node */
|
||||
u8 neighbor_type;
|
||||
u8 neighbor_normal;
|
||||
|
@ -1066,6 +1059,7 @@ struct sdma_vl_map;
|
|||
#define SERIAL_MAX 16 /* length of the serial number */
|
||||
|
||||
typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
|
||||
struct hfi1_netdev_rx;
|
||||
struct hfi1_devdata {
|
||||
struct hfi1_ibdev verbs_dev; /* must be first */
|
||||
/* pointers to related structs for this device */
|
||||
|
@ -1408,7 +1402,7 @@ struct hfi1_devdata {
|
|||
/* Lock to protect IRQ SRC register access */
|
||||
spinlock_t irq_src_lock;
|
||||
int vnic_num_vports;
|
||||
struct net_device *dummy_netdev;
|
||||
struct hfi1_netdev_rx *netdev_rx;
|
||||
struct hfi1_affinity_node *affinity_entry;
|
||||
|
||||
/* Keeps track of IPoIB RSM rule users */
|
||||
|
@ -1480,7 +1474,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
|
|||
struct hfi1_ctxtdata **rcd);
|
||||
void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd);
|
||||
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
|
||||
struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
|
||||
struct hfi1_devdata *dd, u8 hw_pidx, u32 port);
|
||||
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
|
||||
int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
|
||||
int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
|
||||
|
@ -1976,10 +1970,10 @@ static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi)
|
|||
return container_of(rdi, struct hfi1_ibdev, rdi);
|
||||
}
|
||||
|
||||
static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
|
||||
static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u32 port)
|
||||
{
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
|
||||
u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */
|
||||
|
||||
WARN_ON(pidx >= dd->num_pports);
|
||||
return &dd->pport[pidx].ibport_data;
|
||||
|
@ -2198,7 +2192,7 @@ extern const struct attribute_group ib_hfi1_attr_group;
|
|||
int hfi1_device_create(struct hfi1_devdata *dd);
|
||||
void hfi1_device_remove(struct hfi1_devdata *dd);
|
||||
|
||||
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||
int hfi1_create_port_files(struct ib_device *ibdev, u32 port_num,
|
||||
struct kobject *kobj);
|
||||
int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
|
||||
void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
|
||||
|
|
|
@ -627,7 +627,7 @@ static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
|
|||
* Common code for initializing the physical port structure.
|
||||
*/
|
||||
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
|
||||
struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
|
||||
struct hfi1_devdata *dd, u8 hw_pidx, u32 port)
|
||||
{
|
||||
int i;
|
||||
uint default_pkey_idx;
|
||||
|
@ -1775,7 +1775,7 @@ static void remove_one(struct pci_dev *pdev)
|
|||
hfi1_unregister_ib_device(dd);
|
||||
|
||||
/* free netdev data */
|
||||
hfi1_netdev_free(dd);
|
||||
hfi1_free_rx(dd);
|
||||
|
||||
/*
|
||||
* Disable the IB link, disable interrupts on the device,
|
||||
|
@ -1860,7 +1860,8 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
|
|||
}
|
||||
|
||||
/**
|
||||
* allocate eager buffers, both kernel and user contexts.
|
||||
* hfi1_setup_eagerbufs - llocate eager buffers, both kernel and user
|
||||
* contexts.
|
||||
* @rcd: the context we are setting up.
|
||||
*
|
||||
* Allocate the eager TID buffers and program them into hip.
|
||||
|
|
|
@ -321,7 +321,7 @@ static inline void iowait_drain_wakeup(struct iowait *wait)
|
|||
/**
|
||||
* iowait_get_txhead() - get packet off of iowait list
|
||||
*
|
||||
* @wait iowait_work struture
|
||||
* @wait: iowait_work structure
|
||||
*/
|
||||
static inline struct sdma_txreq *iowait_get_txhead(struct iowait_work *wait)
|
||||
{
|
||||
|
|
|
@ -52,8 +52,9 @@ union hfi1_ipoib_flow {
|
|||
* @producer_lock: producer sync lock
|
||||
* @consumer_lock: consumer sync lock
|
||||
*/
|
||||
struct ipoib_txreq;
|
||||
struct hfi1_ipoib_circ_buf {
|
||||
void **items;
|
||||
struct ipoib_txreq **items;
|
||||
unsigned long head;
|
||||
unsigned long tail;
|
||||
unsigned long max_items;
|
||||
|
@ -125,10 +126,10 @@ hfi1_ipoib_priv(const struct net_device *dev)
|
|||
return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
|
||||
}
|
||||
|
||||
int hfi1_ipoib_send_dma(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct ib_ah *address,
|
||||
u32 dqpn);
|
||||
int hfi1_ipoib_send(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct ib_ah *address,
|
||||
u32 dqpn);
|
||||
|
||||
int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
|
||||
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
|
||||
|
@ -143,8 +144,10 @@ struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
|
|||
int size, void *data);
|
||||
|
||||
int hfi1_ipoib_rn_get_params(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
enum rdma_netdev_t type,
|
||||
struct rdma_netdev_alloc_params *params);
|
||||
|
||||
void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q);
|
||||
|
||||
#endif /* _IPOIB_H */
|
||||
|
|
|
@ -101,14 +101,6 @@ static const struct net_device_ops hfi1_ipoib_netdev_ops = {
|
|||
.ndo_get_stats64 = dev_get_tstats64,
|
||||
};
|
||||
|
||||
static int hfi1_ipoib_send(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct ib_ah *address,
|
||||
u32 dqpn)
|
||||
{
|
||||
return hfi1_ipoib_send_dma(dev, skb, address, dqpn);
|
||||
}
|
||||
|
||||
static int hfi1_ipoib_mcast_attach(struct net_device *dev,
|
||||
struct ib_device *device,
|
||||
union ib_gid *mgid,
|
||||
|
@ -194,7 +186,7 @@ static void hfi1_ipoib_set_id(struct net_device *dev, int id)
|
|||
}
|
||||
|
||||
static int hfi1_ipoib_setup_rn(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
struct net_device *netdev,
|
||||
void *param)
|
||||
{
|
||||
|
@ -204,6 +196,7 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
|
|||
int rc;
|
||||
|
||||
rn->send = hfi1_ipoib_send;
|
||||
rn->tx_timeout = hfi1_ipoib_tx_timeout;
|
||||
rn->attach_mcast = hfi1_ipoib_mcast_attach;
|
||||
rn->detach_mcast = hfi1_ipoib_mcast_detach;
|
||||
rn->set_id = hfi1_ipoib_set_id;
|
||||
|
@ -243,7 +236,7 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
|
|||
}
|
||||
|
||||
int hfi1_ipoib_rn_get_params(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
enum rdma_netdev_t type,
|
||||
struct rdma_netdev_alloc_params *params)
|
||||
{
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "verbs.h"
|
||||
#include "trace_ibhdrs.h"
|
||||
#include "ipoib.h"
|
||||
#include "trace_tx.h"
|
||||
|
||||
/* Add a convenience helper */
|
||||
#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
|
||||
|
@ -63,12 +64,14 @@ static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
|
|||
|
||||
static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
|
||||
{
|
||||
trace_hfi1_txq_stop(txq);
|
||||
if (atomic_inc_return(&txq->stops) == 1)
|
||||
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
|
||||
{
|
||||
trace_hfi1_txq_wake(txq);
|
||||
if (atomic_dec_and_test(&txq->stops))
|
||||
netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
|
||||
}
|
||||
|
@ -89,8 +92,10 @@ static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
|
|||
{
|
||||
++txq->sent_txreqs;
|
||||
if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
|
||||
!atomic_xchg(&txq->ring_full, 1))
|
||||
!atomic_xchg(&txq->ring_full, 1)) {
|
||||
trace_hfi1_txq_full(txq);
|
||||
hfi1_ipoib_stop_txq(txq);
|
||||
}
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
|
||||
|
@ -112,8 +117,10 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
|
|||
* to protect against ring overflow.
|
||||
*/
|
||||
if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
|
||||
atomic_xchg(&txq->ring_full, 0))
|
||||
atomic_xchg(&txq->ring_full, 0)) {
|
||||
trace_hfi1_txq_xmit_unstopped(txq);
|
||||
hfi1_ipoib_wake_txq(txq);
|
||||
}
|
||||
}
|
||||
|
||||
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
|
||||
|
@ -202,7 +209,7 @@ static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx)
|
|||
|
||||
/* Finish storing txreq before incrementing head. */
|
||||
smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx));
|
||||
napi_schedule(tx->txq->napi);
|
||||
napi_schedule_irqoff(tx->txq->napi);
|
||||
} else {
|
||||
struct hfi1_ipoib_txq *txq = tx->txq;
|
||||
struct hfi1_ipoib_dev_priv *priv = tx->priv;
|
||||
|
@ -405,6 +412,7 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
|
|||
sdma_select_engine_sc(priv->dd,
|
||||
txp->flow.tx_queue,
|
||||
txp->flow.sc5);
|
||||
trace_hfi1_flow_switch(txp->txq);
|
||||
}
|
||||
|
||||
return tx;
|
||||
|
@ -525,6 +533,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
|
|||
if (txq->flow.as_int != txp->flow.as_int) {
|
||||
int ret;
|
||||
|
||||
trace_hfi1_flow_flush(txq);
|
||||
ret = hfi1_ipoib_flush_tx_list(dev, txq);
|
||||
if (unlikely(ret)) {
|
||||
if (ret == -EBUSY)
|
||||
|
@ -572,10 +581,10 @@ static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb)
|
|||
return (u8)skb_get_queue_mapping(skb);
|
||||
}
|
||||
|
||||
int hfi1_ipoib_send_dma(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct ib_ah *address,
|
||||
u32 dqpn)
|
||||
int hfi1_ipoib_send(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct ib_ah *address,
|
||||
u32 dqpn)
|
||||
{
|
||||
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
|
||||
struct ipoib_txparms txp;
|
||||
|
@ -635,8 +644,10 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
|
|||
/* came from non-list submit */
|
||||
list_add_tail(&txreq->list, &txq->tx_list);
|
||||
if (list_empty(&txq->wait.list)) {
|
||||
if (!atomic_xchg(&txq->no_desc, 1))
|
||||
if (!atomic_xchg(&txq->no_desc, 1)) {
|
||||
trace_hfi1_txq_queued(txq);
|
||||
hfi1_ipoib_stop_txq(txq);
|
||||
}
|
||||
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
|
||||
}
|
||||
|
||||
|
@ -659,6 +670,7 @@ static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
|
|||
struct hfi1_ipoib_txq *txq =
|
||||
container_of(wait, struct hfi1_ipoib_txq, wait);
|
||||
|
||||
trace_hfi1_txq_wakeup(txq);
|
||||
if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
|
||||
iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
|
||||
}
|
||||
|
@ -702,14 +714,14 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
|
|||
|
||||
priv->tx_napis = kcalloc_node(dev->num_tx_queues,
|
||||
sizeof(struct napi_struct),
|
||||
GFP_ATOMIC,
|
||||
GFP_KERNEL,
|
||||
priv->dd->node);
|
||||
if (!priv->tx_napis)
|
||||
goto free_txreq_cache;
|
||||
|
||||
priv->txqs = kcalloc_node(dev->num_tx_queues,
|
||||
sizeof(struct hfi1_ipoib_txq),
|
||||
GFP_ATOMIC,
|
||||
GFP_KERNEL,
|
||||
priv->dd->node);
|
||||
if (!priv->txqs)
|
||||
goto free_tx_napis;
|
||||
|
@ -741,9 +753,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
|
|||
priv->dd->node);
|
||||
|
||||
txq->tx_ring.items =
|
||||
vzalloc_node(array_size(tx_ring_size,
|
||||
sizeof(struct ipoib_txreq)),
|
||||
priv->dd->node);
|
||||
kcalloc_node(tx_ring_size,
|
||||
sizeof(struct ipoib_txreq *),
|
||||
GFP_KERNEL, priv->dd->node);
|
||||
if (!txq->tx_ring.items)
|
||||
goto free_txqs;
|
||||
|
||||
|
@ -764,7 +776,7 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
|
|||
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
|
||||
|
||||
netif_napi_del(txq->napi);
|
||||
vfree(txq->tx_ring.items);
|
||||
kfree(txq->tx_ring.items);
|
||||
}
|
||||
|
||||
kfree(priv->txqs);
|
||||
|
@ -817,7 +829,7 @@ void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
|
|||
hfi1_ipoib_drain_tx_list(txq);
|
||||
netif_napi_del(txq->napi);
|
||||
(void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
|
||||
vfree(txq->tx_ring.items);
|
||||
kfree(txq->tx_ring.items);
|
||||
}
|
||||
|
||||
kfree(priv->txqs);
|
||||
|
@ -854,3 +866,32 @@ void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
|
|||
(void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
|
||||
}
|
||||
}
|
||||
|
||||
void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
|
||||
{
|
||||
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
|
||||
struct hfi1_ipoib_txq *txq = &priv->txqs[q];
|
||||
u64 completed = atomic64_read(&txq->complete_txreqs);
|
||||
|
||||
dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
|
||||
(unsigned long long)txq, q,
|
||||
__netif_subqueue_stopped(dev, txq->q_idx),
|
||||
atomic_read(&txq->stops),
|
||||
atomic_read(&txq->no_desc),
|
||||
atomic_read(&txq->ring_full));
|
||||
dd_dev_info(priv->dd, "sde %llx engine %u\n",
|
||||
(unsigned long long)txq->sde,
|
||||
txq->sde ? txq->sde->this_idx : 0);
|
||||
dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
|
||||
dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
|
||||
txq->sent_txreqs, completed, hfi1_ipoib_used(txq));
|
||||
dd_dev_info(priv->dd, "tx_queue_len %u max_items %lu\n",
|
||||
dev->tx_queue_len, txq->tx_ring.max_items);
|
||||
dd_dev_info(priv->dd, "head %lu tail %lu\n",
|
||||
txq->tx_ring.head, txq->tx_ring.tail);
|
||||
dd_dev_info(priv->dd, "wait queued %u\n",
|
||||
!list_empty(&txq->wait.list));
|
||||
dd_dev_info(priv->dd, "tx_list empty %u\n",
|
||||
list_empty(&txq->tx_list));
|
||||
}
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
|
||||
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port)
|
||||
{
|
||||
struct ib_event event;
|
||||
|
||||
|
@ -297,7 +297,7 @@ static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid)
|
|||
struct rvt_qp *qp0;
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
struct hfi1_devdata *dd = dd_from_ppd(ppd);
|
||||
u8 port_num = ppd->port;
|
||||
u32 port_num = ppd->port;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
|
||||
|
@ -515,7 +515,7 @@ static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
|
|||
/*
|
||||
* Send a Port Capability Mask Changed trap (ch. 14.3.11).
|
||||
*/
|
||||
void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
|
||||
void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num)
|
||||
{
|
||||
struct trap_node *trap;
|
||||
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
|
||||
|
@ -581,7 +581,7 @@ void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
|
|||
|
||||
static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
|
||||
u8 *data, struct ib_device *ibdev,
|
||||
u8 port, u32 *resp_len, u32 max_len)
|
||||
u32 port, u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct opa_node_description *nd;
|
||||
|
||||
|
@ -601,12 +601,12 @@ static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
|
|||
}
|
||||
|
||||
static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct opa_node_info *ni;
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
|
||||
u32 pidx = port - 1; /* IB number port from 1, hw from 0 */
|
||||
|
||||
ni = (struct opa_node_info *)data;
|
||||
|
||||
|
@ -641,11 +641,11 @@ static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
|
||||
u8 port)
|
||||
u32 port)
|
||||
{
|
||||
struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
|
||||
u32 pidx = port - 1; /* IB number port from 1, hw from 0 */
|
||||
|
||||
/* GUID 0 is illegal */
|
||||
if (smp->attr_mod || pidx >= dd->num_pports ||
|
||||
|
@ -794,7 +794,7 @@ void read_ltp_rtt(struct hfi1_devdata *dd)
|
|||
}
|
||||
|
||||
static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
int i;
|
||||
|
@ -1009,7 +1009,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
|
|||
* @port: the IB port number
|
||||
* @pkeys: the pkey table is placed here
|
||||
*/
|
||||
static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
|
||||
static int get_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys)
|
||||
{
|
||||
struct hfi1_pportdata *ppd = dd->pport + port - 1;
|
||||
|
||||
|
@ -1019,7 +1019,7 @@ static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
|
|||
}
|
||||
|
||||
static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
|
@ -1349,7 +1349,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
|
|||
*
|
||||
*/
|
||||
static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len, int local_mad)
|
||||
{
|
||||
struct opa_port_info *pi = (struct opa_port_info *)data;
|
||||
|
@ -1667,7 +1667,7 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
|
|||
* @port: the IB port number
|
||||
* @pkeys: the PKEY table
|
||||
*/
|
||||
static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
|
||||
static int set_pkeys(struct hfi1_devdata *dd, u32 port, u16 *pkeys)
|
||||
{
|
||||
struct hfi1_pportdata *ppd;
|
||||
int i;
|
||||
|
@ -1718,7 +1718,7 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
|
|||
}
|
||||
|
||||
static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
|
@ -1732,7 +1732,7 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
|
|||
u32 size = 0;
|
||||
|
||||
if (n_blocks_sent == 0) {
|
||||
pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
|
||||
pr_warn("OPA Get PKey AM Invalid : P = %u; B = 0x%x; N = 0x%x\n",
|
||||
port, start_block, n_blocks_sent);
|
||||
smp->status |= IB_SMP_INVALID_FIELD;
|
||||
return reply((struct ib_mad_hdr *)smp);
|
||||
|
@ -1825,7 +1825,7 @@ static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
|
|||
}
|
||||
|
||||
static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_ibport *ibp = to_iport(ibdev, port);
|
||||
|
@ -1848,7 +1848,7 @@ static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_ibport *ibp = to_iport(ibdev, port);
|
||||
|
@ -1877,7 +1877,7 @@ static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_ibport *ibp = to_iport(ibdev, port);
|
||||
|
@ -1900,7 +1900,7 @@ static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_ibport *ibp = to_iport(ibdev, port);
|
||||
|
@ -1921,7 +1921,7 @@ static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
u32 n_blocks = OPA_AM_NBLK(am);
|
||||
|
@ -1943,7 +1943,7 @@ static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
u32 n_blocks = OPA_AM_NBLK(am);
|
||||
|
@ -1985,7 +1985,7 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
u32 n_blocks = OPA_AM_NPORT(am);
|
||||
|
@ -2010,7 +2010,7 @@ static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
u32 n_blocks = OPA_AM_NPORT(am);
|
||||
|
@ -2042,7 +2042,7 @@ static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
u32 nports = OPA_AM_NPORT(am);
|
||||
|
@ -2084,7 +2084,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len, int local_mad)
|
||||
{
|
||||
u32 nports = OPA_AM_NPORT(am);
|
||||
|
@ -2132,7 +2132,7 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
|
@ -2184,7 +2184,7 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port, u32 *resp_len,
|
||||
struct ib_device *ibdev, u32 port, u32 *resp_len,
|
||||
u32 max_len)
|
||||
{
|
||||
u32 num_ports = OPA_AM_NPORT(am);
|
||||
|
@ -2208,7 +2208,7 @@ static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port, u32 *resp_len,
|
||||
struct ib_device *ibdev, u32 port, u32 *resp_len,
|
||||
u32 max_len)
|
||||
{
|
||||
u32 num_ports = OPA_AM_NPORT(am);
|
||||
|
@ -2232,7 +2232,7 @@ static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
|
||||
|
@ -2274,7 +2274,7 @@ static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
|
||||
|
@ -2722,7 +2722,7 @@ u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd,
|
|||
|
||||
static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
|
||||
struct ib_device *ibdev,
|
||||
u8 port, u32 *resp_len)
|
||||
u32 port, u32 *resp_len)
|
||||
{
|
||||
struct opa_port_status_req *req =
|
||||
(struct opa_port_status_req *)pmp->data;
|
||||
|
@ -2732,7 +2732,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
|
|||
unsigned long vl;
|
||||
size_t response_data_size;
|
||||
u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
|
||||
u8 port_num = req->port_num;
|
||||
u32 port_num = req->port_num;
|
||||
u8 num_vls = hweight64(vl_select_mask);
|
||||
struct _vls_pctrs *vlinfo;
|
||||
struct hfi1_ibport *ibp = to_iport(ibdev, port);
|
||||
|
@ -2888,7 +2888,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
|
|||
return reply((struct ib_mad_hdr *)pmp);
|
||||
}
|
||||
|
||||
static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
|
||||
static u64 get_error_counter_summary(struct ib_device *ibdev, u32 port,
|
||||
u8 res_lli, u8 res_ler)
|
||||
{
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
|
@ -2973,7 +2973,7 @@ static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
|
|||
|
||||
static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
|
||||
struct ib_device *ibdev,
|
||||
u8 port, u32 *resp_len)
|
||||
u32 port, u32 *resp_len)
|
||||
{
|
||||
struct opa_port_data_counters_msg *req =
|
||||
(struct opa_port_data_counters_msg *)pmp->data;
|
||||
|
@ -2987,7 +2987,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
|
|||
u8 lq, num_vls;
|
||||
u8 res_lli, res_ler;
|
||||
u64 port_mask;
|
||||
u8 port_num;
|
||||
u32 port_num;
|
||||
unsigned long vl;
|
||||
unsigned long vl_select_mask;
|
||||
int vfi;
|
||||
|
@ -3123,7 +3123,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
|
|||
}
|
||||
|
||||
static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
|
||||
struct ib_device *ibdev, u8 port)
|
||||
struct ib_device *ibdev, u32 port)
|
||||
{
|
||||
struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
|
||||
pmp->data;
|
||||
|
@ -3151,7 +3151,7 @@ static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
|
|||
}
|
||||
|
||||
static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
|
||||
struct _port_ectrs *rsp, u8 port)
|
||||
struct _port_ectrs *rsp, u32 port)
|
||||
{
|
||||
u64 tmp, tmp2;
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
|
@ -3194,11 +3194,11 @@ static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
|
|||
|
||||
static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
|
||||
struct ib_device *ibdev,
|
||||
u8 port, u32 *resp_len)
|
||||
u32 port, u32 *resp_len)
|
||||
{
|
||||
size_t response_data_size;
|
||||
struct _port_ectrs *rsp;
|
||||
u8 port_num;
|
||||
u32 port_num;
|
||||
struct opa_port_error_counters64_msg *req;
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
u32 num_ports;
|
||||
|
@ -3283,7 +3283,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
|
|||
}
|
||||
|
||||
static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
|
||||
struct ib_device *ibdev, u8 port)
|
||||
struct ib_device *ibdev, u32 port)
|
||||
{
|
||||
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
|
||||
pmp->data;
|
||||
|
@ -3369,7 +3369,7 @@ static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
|
|||
|
||||
static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
|
||||
struct ib_device *ibdev,
|
||||
u8 port, u32 *resp_len)
|
||||
u32 port, u32 *resp_len)
|
||||
{
|
||||
size_t response_data_size;
|
||||
struct _port_ei *rsp;
|
||||
|
@ -3377,7 +3377,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
|
|||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
u64 port_mask;
|
||||
u32 num_ports;
|
||||
u8 port_num;
|
||||
u32 port_num;
|
||||
u8 num_pslm;
|
||||
u64 reg;
|
||||
|
||||
|
@ -3468,7 +3468,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
|
|||
|
||||
static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
|
||||
struct ib_device *ibdev,
|
||||
u8 port, u32 *resp_len)
|
||||
u32 port, u32 *resp_len)
|
||||
{
|
||||
struct opa_clear_port_status *req =
|
||||
(struct opa_clear_port_status *)pmp->data;
|
||||
|
@ -3620,14 +3620,14 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
|
|||
|
||||
static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
|
||||
struct ib_device *ibdev,
|
||||
u8 port, u32 *resp_len)
|
||||
u32 port, u32 *resp_len)
|
||||
{
|
||||
struct _port_ei *rsp;
|
||||
struct opa_port_error_info_msg *req;
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
u64 port_mask;
|
||||
u32 num_ports;
|
||||
u8 port_num;
|
||||
u32 port_num;
|
||||
u8 num_pslm;
|
||||
u32 error_info_select;
|
||||
|
||||
|
@ -3702,7 +3702,7 @@ struct opa_congestion_info_attr {
|
|||
} __packed;
|
||||
|
||||
static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct opa_congestion_info_attr *p =
|
||||
|
@ -3727,7 +3727,7 @@ static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
|
|||
|
||||
static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
|
||||
u8 *data, struct ib_device *ibdev,
|
||||
u8 port, u32 *resp_len, u32 max_len)
|
||||
u32 port, u32 *resp_len, u32 max_len)
|
||||
{
|
||||
int i;
|
||||
struct opa_congestion_setting_attr *p =
|
||||
|
@ -3819,7 +3819,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
|
|||
}
|
||||
|
||||
static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct opa_congestion_setting_attr *p =
|
||||
|
@ -3860,7 +3860,7 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
|
|||
|
||||
static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
|
||||
u8 *data, struct ib_device *ibdev,
|
||||
u8 port, u32 *resp_len, u32 max_len)
|
||||
u32 port, u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_ibport *ibp = to_iport(ibdev, port);
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
|
@ -3925,7 +3925,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
|
|||
}
|
||||
|
||||
static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct ib_cc_table_attr *cc_table_attr =
|
||||
|
@ -3977,7 +3977,7 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
|
||||
|
@ -4036,7 +4036,7 @@ struct opa_led_info {
|
|||
#define OPA_LED_MASK BIT(OPA_LED_SHIFT)
|
||||
|
||||
static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
|
@ -4066,7 +4066,7 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
|
||||
|
@ -4089,7 +4089,7 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
|
|||
}
|
||||
|
||||
static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
|
||||
u8 *data, struct ib_device *ibdev, u8 port,
|
||||
u8 *data, struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len)
|
||||
{
|
||||
int ret;
|
||||
|
@ -4179,7 +4179,7 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
|
|||
}
|
||||
|
||||
static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
|
||||
u8 *data, struct ib_device *ibdev, u8 port,
|
||||
u8 *data, struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, u32 max_len, int local_mad)
|
||||
{
|
||||
int ret;
|
||||
|
@ -4254,7 +4254,7 @@ static inline void set_aggr_error(struct opa_aggregate *ag)
|
|||
}
|
||||
|
||||
static int subn_get_opa_aggregate(struct opa_smp *smp,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len)
|
||||
{
|
||||
int i;
|
||||
|
@ -4303,7 +4303,7 @@ static int subn_get_opa_aggregate(struct opa_smp *smp,
|
|||
}
|
||||
|
||||
static int subn_set_opa_aggregate(struct opa_smp *smp,
|
||||
struct ib_device *ibdev, u8 port,
|
||||
struct ib_device *ibdev, u32 port,
|
||||
u32 *resp_len, int local_mad)
|
||||
{
|
||||
int i;
|
||||
|
@ -4509,7 +4509,7 @@ static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
|
|||
}
|
||||
|
||||
static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
|
||||
u8 port, const struct opa_mad *in_mad,
|
||||
u32 port, const struct opa_mad *in_mad,
|
||||
struct opa_mad *out_mad,
|
||||
u32 *resp_len, int local_mad)
|
||||
{
|
||||
|
@ -4614,7 +4614,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
|
|||
}
|
||||
|
||||
static int process_subn(struct ib_device *ibdev, int mad_flags,
|
||||
u8 port, const struct ib_mad *in_mad,
|
||||
u32 port, const struct ib_mad *in_mad,
|
||||
struct ib_mad *out_mad)
|
||||
{
|
||||
struct ib_smp *smp = (struct ib_smp *)out_mad;
|
||||
|
@ -4672,7 +4672,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int process_perf(struct ib_device *ibdev, u8 port,
|
||||
static int process_perf(struct ib_device *ibdev, u32 port,
|
||||
const struct ib_mad *in_mad,
|
||||
struct ib_mad *out_mad)
|
||||
{
|
||||
|
@ -4734,7 +4734,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int process_perf_opa(struct ib_device *ibdev, u8 port,
|
||||
static int process_perf_opa(struct ib_device *ibdev, u32 port,
|
||||
const struct opa_mad *in_mad,
|
||||
struct opa_mad *out_mad, u32 *resp_len)
|
||||
{
|
||||
|
@ -4816,7 +4816,7 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port,
|
|||
}
|
||||
|
||||
static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
|
||||
u8 port, const struct ib_wc *in_wc,
|
||||
u32 port, const struct ib_wc *in_wc,
|
||||
const struct ib_grh *in_grh,
|
||||
const struct opa_mad *in_mad,
|
||||
struct opa_mad *out_mad, size_t *out_mad_size,
|
||||
|
@ -4869,7 +4869,7 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
|
||||
static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u32 port,
|
||||
const struct ib_wc *in_wc,
|
||||
const struct ib_grh *in_grh,
|
||||
const struct ib_mad *in_mad,
|
||||
|
@ -4914,7 +4914,7 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
|
|||
*
|
||||
* This is called by the ib_mad module.
|
||||
*/
|
||||
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
|
||||
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
|
||||
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
|
||||
const struct ib_mad *in_mad, struct ib_mad *out_mad,
|
||||
size_t *out_mad_size, u16 *out_mad_pkey_index)
|
||||
|
|
|
@ -436,7 +436,7 @@ struct sc2vlnt {
|
|||
COUNTER_MASK(1, 3) | \
|
||||
COUNTER_MASK(1, 4))
|
||||
|
||||
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
|
||||
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u32 port);
|
||||
void hfi1_handle_trap_timer(struct timer_list *t);
|
||||
u16 tx_link_width(u16 link_width);
|
||||
u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd, u16 link_width,
|
||||
|
|
|
@ -89,7 +89,7 @@ int hfi1_mmu_rb_register(void *ops_arg,
|
|||
struct mmu_rb_handler *h;
|
||||
int ret;
|
||||
|
||||
h = kmalloc(sizeof(*h), GFP_KERNEL);
|
||||
h = kzalloc(sizeof(*h), GFP_KERNEL);
|
||||
if (!h)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -206,7 +206,7 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
|
|||
}
|
||||
|
||||
/**
|
||||
* msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
|
||||
* msix_netdev_request_rcd_irq - Helper function for RCVAVAIL IRQs
|
||||
* for netdev context
|
||||
* @rcd: valid netdev contexti
|
||||
*/
|
||||
|
@ -221,7 +221,7 @@ int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd)
|
|||
}
|
||||
|
||||
/**
|
||||
* msix_request_smda_ira() - Helper for getting SDMA IRQ resources
|
||||
* msix_request_sdma_irq - Helper for getting SDMA IRQ resources
|
||||
* @sde: valid sdma engine
|
||||
*
|
||||
*/
|
||||
|
@ -243,7 +243,7 @@ int msix_request_sdma_irq(struct sdma_engine *sde)
|
|||
}
|
||||
|
||||
/**
|
||||
* msix_request_general_irq(void) - Helper for getting general IRQ
|
||||
* msix_request_general_irq - Helper for getting general IRQ
|
||||
* resources
|
||||
* @dd: valid device data
|
||||
*/
|
||||
|
@ -269,7 +269,7 @@ int msix_request_general_irq(struct hfi1_devdata *dd)
|
|||
}
|
||||
|
||||
/**
|
||||
* enable_sdma_src() - Helper to enable SDMA IRQ srcs
|
||||
* enable_sdma_srcs - Helper to enable SDMA IRQ srcs
|
||||
* @dd: valid devdata structure
|
||||
* @i: index of SDMA engine
|
||||
*/
|
||||
|
@ -349,7 +349,7 @@ void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
|
|||
}
|
||||
|
||||
/**
|
||||
* hfi1_clean_up_msix_interrupts() - Free all MSIx IRQ resources
|
||||
* msix_clean_up_interrupts - Free all MSIx IRQ resources
|
||||
* @dd: valid device data data structure
|
||||
*
|
||||
* Free the MSIx and associated PCI resources, if they have been allocated.
|
||||
|
@ -372,7 +372,7 @@ void msix_clean_up_interrupts(struct hfi1_devdata *dd)
|
|||
}
|
||||
|
||||
/**
|
||||
* msix_netdev_syncrhonize_irq() - netdev IRQ synchronize
|
||||
* msix_netdev_synchronize_irq - netdev IRQ synchronize
|
||||
* @dd: valid devdata
|
||||
*/
|
||||
void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
|
||||
|
|
|
@ -14,15 +14,14 @@
|
|||
|
||||
/**
|
||||
* struct hfi1_netdev_rxq - Receive Queue for HFI
|
||||
* dummy netdev. Both IPoIB and VNIC netdevices will be working on
|
||||
* top of this device.
|
||||
* Both IPoIB and VNIC netdevices will be working on the rx abstraction.
|
||||
* @napi: napi object
|
||||
* @priv: ptr to netdev_priv
|
||||
* @rx: ptr to netdev_rx
|
||||
* @rcd: ptr to receive context data
|
||||
*/
|
||||
struct hfi1_netdev_rxq {
|
||||
struct napi_struct napi;
|
||||
struct hfi1_netdev_priv *priv;
|
||||
struct hfi1_netdev_rx *rx;
|
||||
struct hfi1_ctxtdata *rcd;
|
||||
};
|
||||
|
||||
|
@ -36,7 +35,8 @@ struct hfi1_netdev_rxq {
|
|||
#define NUM_NETDEV_MAP_ENTRIES HFI1_MAX_NETDEV_CTXTS
|
||||
|
||||
/**
|
||||
* struct hfi1_netdev_priv: data required to setup and run HFI netdev.
|
||||
* struct hfi1_netdev_rx: data required to setup and run HFI netdev.
|
||||
* @rx_napi: the dummy netdevice to support "polling" the receive contexts
|
||||
* @dd: hfi1_devdata
|
||||
* @rxq: pointer to dummy netdev receive queues.
|
||||
* @num_rx_q: number of receive queues
|
||||
|
@ -48,7 +48,8 @@ struct hfi1_netdev_rxq {
|
|||
* @netdevs: atomic counter of netdevs using dummy netdev.
|
||||
* When 0 receive queues will be freed.
|
||||
*/
|
||||
struct hfi1_netdev_priv {
|
||||
struct hfi1_netdev_rx {
|
||||
struct net_device rx_napi;
|
||||
struct hfi1_devdata *dd;
|
||||
struct hfi1_netdev_rxq *rxq;
|
||||
int num_rx_q;
|
||||
|
@ -60,42 +61,28 @@ struct hfi1_netdev_priv {
|
|||
atomic_t netdevs;
|
||||
};
|
||||
|
||||
static inline
|
||||
struct hfi1_netdev_priv *hfi1_netdev_priv(struct net_device *dev)
|
||||
{
|
||||
return (struct hfi1_netdev_priv *)&dev[1];
|
||||
}
|
||||
|
||||
static inline
|
||||
int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
|
||||
return priv->num_rx_q;
|
||||
return dd->netdev_rx->num_rx_q;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
|
||||
return priv->rxq[ctxt].rcd;
|
||||
return dd->netdev_rx->rxq[ctxt].rcd;
|
||||
}
|
||||
|
||||
static inline
|
||||
int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
|
||||
return priv->rmt_start;
|
||||
return dd->netdev_rx->rmt_start;
|
||||
}
|
||||
|
||||
static inline
|
||||
void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
|
||||
priv->rmt_start = rmt_idx;
|
||||
dd->netdev_rx->rmt_start = rmt_idx;
|
||||
}
|
||||
|
||||
u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
|
||||
|
@ -105,8 +92,8 @@ void hfi1_netdev_enable_queues(struct hfi1_devdata *dd);
|
|||
void hfi1_netdev_disable_queues(struct hfi1_devdata *dd);
|
||||
int hfi1_netdev_rx_init(struct hfi1_devdata *dd);
|
||||
int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd);
|
||||
int hfi1_netdev_alloc(struct hfi1_devdata *dd);
|
||||
void hfi1_netdev_free(struct hfi1_devdata *dd);
|
||||
int hfi1_alloc_rx(struct hfi1_devdata *dd);
|
||||
void hfi1_free_rx(struct hfi1_devdata *dd);
|
||||
int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data);
|
||||
void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id);
|
||||
void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
|
||||
|
|
|
@ -17,11 +17,11 @@
|
|||
#include <linux/etherdevice.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv,
|
||||
static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx,
|
||||
struct hfi1_ctxtdata *uctxt)
|
||||
{
|
||||
unsigned int rcvctrl_ops;
|
||||
struct hfi1_devdata *dd = priv->dd;
|
||||
struct hfi1_devdata *dd = rx->dd;
|
||||
int ret;
|
||||
|
||||
uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
|
||||
|
@ -118,11 +118,11 @@ static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
|
|||
hfi1_free_ctxt(uctxt);
|
||||
}
|
||||
|
||||
static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
|
||||
static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx,
|
||||
struct hfi1_ctxtdata **ctxt)
|
||||
{
|
||||
int rc;
|
||||
struct hfi1_devdata *dd = priv->dd;
|
||||
struct hfi1_devdata *dd = rx->dd;
|
||||
|
||||
rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
|
||||
if (rc) {
|
||||
|
@ -130,7 +130,7 @@ static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
|
|||
return rc;
|
||||
}
|
||||
|
||||
rc = hfi1_netdev_setup_ctxt(priv, *ctxt);
|
||||
rc = hfi1_netdev_setup_ctxt(rx, *ctxt);
|
||||
if (rc) {
|
||||
dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
|
||||
hfi1_netdev_deallocate_ctxt(dd, *ctxt);
|
||||
|
@ -183,31 +183,31 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
|
|||
(u32)HFI1_MAX_NETDEV_CTXTS);
|
||||
}
|
||||
|
||||
static int hfi1_netdev_rxq_init(struct net_device *dev)
|
||||
static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
|
||||
{
|
||||
int i;
|
||||
int rc;
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
|
||||
struct hfi1_devdata *dd = priv->dd;
|
||||
struct hfi1_devdata *dd = rx->dd;
|
||||
struct net_device *dev = &rx->rx_napi;
|
||||
|
||||
priv->num_rx_q = dd->num_netdev_contexts;
|
||||
priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq),
|
||||
GFP_KERNEL, dd->node);
|
||||
rx->num_rx_q = dd->num_netdev_contexts;
|
||||
rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
|
||||
GFP_KERNEL, dd->node);
|
||||
|
||||
if (!priv->rxq) {
|
||||
if (!rx->rxq) {
|
||||
dd_dev_err(dd, "Unable to allocate netdev queue data\n");
|
||||
return (-ENOMEM);
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->num_rx_q; i++) {
|
||||
struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
|
||||
for (i = 0; i < rx->num_rx_q; i++) {
|
||||
struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
|
||||
|
||||
rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd);
|
||||
rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
|
||||
if (rc)
|
||||
goto bail_context_irq_failure;
|
||||
|
||||
hfi1_rcd_get(rxq->rcd);
|
||||
rxq->priv = priv;
|
||||
rxq->rx = rx;
|
||||
rxq->rcd->napi = &rxq->napi;
|
||||
dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
|
||||
i, rxq->rcd->ctxt);
|
||||
|
@ -227,7 +227,7 @@ static int hfi1_netdev_rxq_init(struct net_device *dev)
|
|||
bail_context_irq_failure:
|
||||
dd_dev_err(dd, "Unable to allot receive context\n");
|
||||
for (; i >= 0; i--) {
|
||||
struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
|
||||
struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
|
||||
|
||||
if (rxq->rcd) {
|
||||
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
|
||||
|
@ -235,20 +235,19 @@ static int hfi1_netdev_rxq_init(struct net_device *dev)
|
|||
rxq->rcd = NULL;
|
||||
}
|
||||
}
|
||||
kfree(priv->rxq);
|
||||
priv->rxq = NULL;
|
||||
kfree(rx->rxq);
|
||||
rx->rxq = NULL;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void hfi1_netdev_rxq_deinit(struct net_device *dev)
|
||||
static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx)
|
||||
{
|
||||
int i;
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
|
||||
struct hfi1_devdata *dd = priv->dd;
|
||||
struct hfi1_devdata *dd = rx->dd;
|
||||
|
||||
for (i = 0; i < priv->num_rx_q; i++) {
|
||||
struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
|
||||
for (i = 0; i < rx->num_rx_q; i++) {
|
||||
struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
|
||||
|
||||
netif_napi_del(&rxq->napi);
|
||||
hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
|
||||
|
@ -256,41 +255,41 @@ static void hfi1_netdev_rxq_deinit(struct net_device *dev)
|
|||
rxq->rcd = NULL;
|
||||
}
|
||||
|
||||
kfree(priv->rxq);
|
||||
priv->rxq = NULL;
|
||||
priv->num_rx_q = 0;
|
||||
kfree(rx->rxq);
|
||||
rx->rxq = NULL;
|
||||
rx->num_rx_q = 0;
|
||||
}
|
||||
|
||||
static void enable_queues(struct hfi1_netdev_priv *priv)
|
||||
static void enable_queues(struct hfi1_netdev_rx *rx)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->num_rx_q; i++) {
|
||||
struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
|
||||
for (i = 0; i < rx->num_rx_q; i++) {
|
||||
struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
|
||||
|
||||
dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i,
|
||||
dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
|
||||
rxq->rcd->ctxt);
|
||||
napi_enable(&rxq->napi);
|
||||
hfi1_rcvctrl(priv->dd,
|
||||
hfi1_rcvctrl(rx->dd,
|
||||
HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
|
||||
rxq->rcd);
|
||||
}
|
||||
}
|
||||
|
||||
static void disable_queues(struct hfi1_netdev_priv *priv)
|
||||
static void disable_queues(struct hfi1_netdev_rx *rx)
|
||||
{
|
||||
int i;
|
||||
|
||||
msix_netdev_synchronize_irq(priv->dd);
|
||||
msix_netdev_synchronize_irq(rx->dd);
|
||||
|
||||
for (i = 0; i < priv->num_rx_q; i++) {
|
||||
struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
|
||||
for (i = 0; i < rx->num_rx_q; i++) {
|
||||
struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
|
||||
|
||||
dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i,
|
||||
dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
|
||||
rxq->rcd->ctxt);
|
||||
|
||||
/* wait for napi if it was scheduled */
|
||||
hfi1_rcvctrl(priv->dd,
|
||||
hfi1_rcvctrl(rx->dd,
|
||||
HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
|
||||
rxq->rcd);
|
||||
napi_synchronize(&rxq->napi);
|
||||
|
@ -307,15 +306,14 @@ static void disable_queues(struct hfi1_netdev_priv *priv)
|
|||
*/
|
||||
int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
struct hfi1_netdev_rx *rx = dd->netdev_rx;
|
||||
int res;
|
||||
|
||||
if (atomic_fetch_inc(&priv->netdevs))
|
||||
if (atomic_fetch_inc(&rx->netdevs))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&hfi1_mutex);
|
||||
init_dummy_netdev(dd->dummy_netdev);
|
||||
res = hfi1_netdev_rxq_init(dd->dummy_netdev);
|
||||
res = hfi1_netdev_rxq_init(rx);
|
||||
mutex_unlock(&hfi1_mutex);
|
||||
return res;
|
||||
}
|
||||
|
@ -328,12 +326,12 @@ int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
|
|||
*/
|
||||
int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
struct hfi1_netdev_rx *rx = dd->netdev_rx;
|
||||
|
||||
/* destroy the RX queues only if it is the last netdev going away */
|
||||
if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) {
|
||||
if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) {
|
||||
mutex_lock(&hfi1_mutex);
|
||||
hfi1_netdev_rxq_deinit(dd->dummy_netdev);
|
||||
hfi1_netdev_rxq_deinit(rx);
|
||||
mutex_unlock(&hfi1_mutex);
|
||||
}
|
||||
|
||||
|
@ -341,39 +339,43 @@ int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
|
|||
}
|
||||
|
||||
/**
|
||||
* hfi1_netdev_alloc - Allocates netdev and private data. It is required
|
||||
* because RMT index and MSI-X interrupt can be set only
|
||||
* during driver initialization.
|
||||
*
|
||||
* hfi1_alloc_rx - Allocates the rx support structure
|
||||
* @dd: hfi1 dev data
|
||||
*
|
||||
* Allocate the rx structure to support gathering the receive
|
||||
* resources and the dummy netdev.
|
||||
*
|
||||
* Updates dd struct pointer upon success.
|
||||
*
|
||||
* Return: 0 (success) -error on failure
|
||||
*
|
||||
*/
|
||||
int hfi1_netdev_alloc(struct hfi1_devdata *dd)
|
||||
int hfi1_alloc_rx(struct hfi1_devdata *dd)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv;
|
||||
const int netdev_size = sizeof(*dd->dummy_netdev) +
|
||||
sizeof(struct hfi1_netdev_priv);
|
||||
struct hfi1_netdev_rx *rx;
|
||||
|
||||
dd_dev_info(dd, "allocating netdev size %d\n", netdev_size);
|
||||
dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);
|
||||
dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx));
|
||||
rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
|
||||
|
||||
if (!dd->dummy_netdev)
|
||||
if (!rx)
|
||||
return -ENOMEM;
|
||||
rx->dd = dd;
|
||||
init_dummy_netdev(&rx->rx_napi);
|
||||
|
||||
priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
priv->dd = dd;
|
||||
xa_init(&priv->dev_tbl);
|
||||
atomic_set(&priv->enabled, 0);
|
||||
atomic_set(&priv->netdevs, 0);
|
||||
xa_init(&rx->dev_tbl);
|
||||
atomic_set(&rx->enabled, 0);
|
||||
atomic_set(&rx->netdevs, 0);
|
||||
dd->netdev_rx = rx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hfi1_netdev_free(struct hfi1_devdata *dd)
|
||||
void hfi1_free_rx(struct hfi1_devdata *dd)
|
||||
{
|
||||
if (dd->dummy_netdev) {
|
||||
dd_dev_info(dd, "hfi1 netdev freed\n");
|
||||
kfree(dd->dummy_netdev);
|
||||
dd->dummy_netdev = NULL;
|
||||
if (dd->netdev_rx) {
|
||||
dd_dev_info(dd, "hfi1 rx freed\n");
|
||||
kfree(dd->netdev_rx);
|
||||
dd->netdev_rx = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -388,33 +390,33 @@ void hfi1_netdev_free(struct hfi1_devdata *dd)
|
|||
*/
|
||||
void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv;
|
||||
struct hfi1_netdev_rx *rx;
|
||||
|
||||
if (!dd->dummy_netdev)
|
||||
if (!dd->netdev_rx)
|
||||
return;
|
||||
|
||||
priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
if (atomic_fetch_inc(&priv->enabled))
|
||||
rx = dd->netdev_rx;
|
||||
if (atomic_fetch_inc(&rx->enabled))
|
||||
return;
|
||||
|
||||
mutex_lock(&hfi1_mutex);
|
||||
enable_queues(priv);
|
||||
enable_queues(rx);
|
||||
mutex_unlock(&hfi1_mutex);
|
||||
}
|
||||
|
||||
void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv;
|
||||
struct hfi1_netdev_rx *rx;
|
||||
|
||||
if (!dd->dummy_netdev)
|
||||
if (!dd->netdev_rx)
|
||||
return;
|
||||
|
||||
priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
if (atomic_dec_if_positive(&priv->enabled))
|
||||
rx = dd->netdev_rx;
|
||||
if (atomic_dec_if_positive(&rx->enabled))
|
||||
return;
|
||||
|
||||
mutex_lock(&hfi1_mutex);
|
||||
disable_queues(priv);
|
||||
disable_queues(rx);
|
||||
mutex_unlock(&hfi1_mutex);
|
||||
}
|
||||
|
||||
|
@ -430,9 +432,9 @@ void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
|
|||
*/
|
||||
int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
struct hfi1_netdev_rx *rx = dd->netdev_rx;
|
||||
|
||||
return xa_insert(&priv->dev_tbl, id, data, GFP_NOWAIT);
|
||||
return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -444,9 +446,9 @@ int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
|
|||
*/
|
||||
void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
struct hfi1_netdev_rx *rx = dd->netdev_rx;
|
||||
|
||||
return xa_erase(&priv->dev_tbl, id);
|
||||
return xa_erase(&rx->dev_tbl, id);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -457,24 +459,24 @@ void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
|
|||
*/
|
||||
void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
struct hfi1_netdev_rx *rx = dd->netdev_rx;
|
||||
|
||||
return xa_load(&priv->dev_tbl, id);
|
||||
return xa_load(&rx->dev_tbl, id);
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_netdev_get_first_dat - Gets first entry with greater or equal id.
|
||||
* hfi1_netdev_get_first_data - Gets first entry with greater or equal id.
|
||||
*
|
||||
* @dd: hfi1 dev data
|
||||
* @start_id: requested integer id up to INT_MAX
|
||||
*/
|
||||
void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
|
||||
{
|
||||
struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
|
||||
struct hfi1_netdev_rx *rx = dd->netdev_rx;
|
||||
unsigned long index = *start_id;
|
||||
void *ret;
|
||||
|
||||
ret = xa_find(&priv->dev_tbl, &index, UINT_MAX, XA_PRESENT);
|
||||
ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT);
|
||||
*start_id = (int)index;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1285,7 +1285,7 @@ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
|
|||
}
|
||||
|
||||
/**
|
||||
* sdma_clean() Clean up allocated memory
|
||||
* sdma_clean - Clean up allocated memory
|
||||
* @dd: struct hfi1_devdata
|
||||
* @num_engines: num sdma engines
|
||||
*
|
||||
|
|
|
@ -907,24 +907,6 @@ static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sdma_iowait_schedule() - initialize wait structure
|
||||
* @sde: sdma_engine to schedule
|
||||
* @wait: wait struct to schedule
|
||||
*
|
||||
* This function initializes the iowait
|
||||
* structure embedded in the QP or PQ.
|
||||
*
|
||||
*/
|
||||
static inline void sdma_iowait_schedule(
|
||||
struct sdma_engine *sde,
|
||||
struct iowait *wait)
|
||||
{
|
||||
struct hfi1_pportdata *ppd = sde->dd->pport;
|
||||
|
||||
iowait_schedule(wait, ppd->hfi1_wq, sde->cpu);
|
||||
}
|
||||
|
||||
/* for use by interrupt handling */
|
||||
void sdma_engine_error(struct sdma_engine *sde, u64 status);
|
||||
void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
|
||||
|
|
|
@ -649,7 +649,7 @@ const struct attribute_group ib_hfi1_attr_group = {
|
|||
.attrs = hfi1_attributes,
|
||||
};
|
||||
|
||||
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||
int hfi1_create_port_files(struct ib_device *ibdev, u32 port_num,
|
||||
struct kobject *kobj)
|
||||
{
|
||||
struct hfi1_pportdata *ppd;
|
||||
|
|
|
@ -53,6 +53,8 @@
|
|||
#include "hfi.h"
|
||||
#include "mad.h"
|
||||
#include "sdma.h"
|
||||
#include "ipoib.h"
|
||||
#include "user_sdma.h"
|
||||
|
||||
const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
|
||||
|
||||
|
@ -653,6 +655,80 @@ TRACE_EVENT(hfi1_sdma_user_completion,
|
|||
__entry->code)
|
||||
);
|
||||
|
||||
TRACE_EVENT(hfi1_usdma_defer,
|
||||
TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
|
||||
struct sdma_engine *sde,
|
||||
struct iowait *wait),
|
||||
TP_ARGS(pq, sde, wait),
|
||||
TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
|
||||
__field(struct hfi1_user_sdma_pkt_q *, pq)
|
||||
__field(struct sdma_engine *, sde)
|
||||
__field(struct iowait *, wait)
|
||||
__field(int, engine)
|
||||
__field(int, empty)
|
||||
),
|
||||
TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
|
||||
__entry->pq = pq;
|
||||
__entry->sde = sde;
|
||||
__entry->wait = wait;
|
||||
__entry->engine = sde->this_idx;
|
||||
__entry->empty = list_empty(&__entry->wait->list);
|
||||
),
|
||||
TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
|
||||
__get_str(dev),
|
||||
(unsigned long long)__entry->pq,
|
||||
(unsigned long long)__entry->sde,
|
||||
(unsigned long long)__entry->wait,
|
||||
__entry->engine,
|
||||
__entry->empty
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(hfi1_usdma_activate,
|
||||
TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
|
||||
struct iowait *wait,
|
||||
int reason),
|
||||
TP_ARGS(pq, wait, reason),
|
||||
TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
|
||||
__field(struct hfi1_user_sdma_pkt_q *, pq)
|
||||
__field(struct iowait *, wait)
|
||||
__field(int, reason)
|
||||
),
|
||||
TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
|
||||
__entry->pq = pq;
|
||||
__entry->wait = wait;
|
||||
__entry->reason = reason;
|
||||
),
|
||||
TP_printk("[%s] pq %llx wait %llx reason %d",
|
||||
__get_str(dev),
|
||||
(unsigned long long)__entry->pq,
|
||||
(unsigned long long)__entry->wait,
|
||||
__entry->reason
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(hfi1_usdma_we,
|
||||
TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
|
||||
int we_ret),
|
||||
TP_ARGS(pq, we_ret),
|
||||
TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
|
||||
__field(struct hfi1_user_sdma_pkt_q *, pq)
|
||||
__field(int, state)
|
||||
__field(int, we_ret)
|
||||
),
|
||||
TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
|
||||
__entry->pq = pq;
|
||||
__entry->state = pq->state;
|
||||
__entry->we_ret = we_ret;
|
||||
),
|
||||
TP_printk("[%s] pq %llx state %d we_ret %d",
|
||||
__get_str(dev),
|
||||
(unsigned long long)__entry->pq,
|
||||
__entry->state,
|
||||
__entry->we_ret
|
||||
)
|
||||
);
|
||||
|
||||
const char *print_u32_array(struct trace_seq *, u32 *, int);
|
||||
#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
|
||||
|
||||
|
@ -858,6 +934,109 @@ DEFINE_EVENT(
|
|||
TP_ARGS(qp, flag)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(/* AIP */
|
||||
hfi1_ipoib_txq_template,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq),
|
||||
TP_STRUCT__entry(/* entry */
|
||||
DD_DEV_ENTRY(txq->priv->dd)
|
||||
__field(struct hfi1_ipoib_txq *, txq)
|
||||
__field(struct sdma_engine *, sde)
|
||||
__field(ulong, head)
|
||||
__field(ulong, tail)
|
||||
__field(uint, used)
|
||||
__field(uint, flow)
|
||||
__field(int, stops)
|
||||
__field(int, no_desc)
|
||||
__field(u8, idx)
|
||||
__field(u8, stopped)
|
||||
),
|
||||
TP_fast_assign(/* assign */
|
||||
DD_DEV_ASSIGN(txq->priv->dd)
|
||||
__entry->txq = txq;
|
||||
__entry->sde = txq->sde;
|
||||
__entry->head = txq->tx_ring.head;
|
||||
__entry->tail = txq->tx_ring.tail;
|
||||
__entry->idx = txq->q_idx;
|
||||
__entry->used =
|
||||
txq->sent_txreqs -
|
||||
atomic64_read(&txq->complete_txreqs);
|
||||
__entry->flow = txq->flow.as_int;
|
||||
__entry->stops = atomic_read(&txq->stops);
|
||||
__entry->no_desc = atomic_read(&txq->no_desc);
|
||||
__entry->stopped =
|
||||
__netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
|
||||
),
|
||||
TP_printk(/* print */
|
||||
"[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
|
||||
__get_str(dev),
|
||||
(unsigned long long)__entry->txq,
|
||||
__entry->idx,
|
||||
(unsigned long long)__entry->sde,
|
||||
__entry->head,
|
||||
__entry->tail,
|
||||
__entry->flow,
|
||||
__entry->used,
|
||||
__entry->stops,
|
||||
__entry->no_desc,
|
||||
__entry->stopped
|
||||
)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* queue stop */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_stop,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* queue wake */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_wake,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* flow flush */
|
||||
hfi1_ipoib_txq_template, hfi1_flow_flush,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* flow switch */
|
||||
hfi1_ipoib_txq_template, hfi1_flow_switch,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* wakeup */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_wakeup,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* full */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_full,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* queued */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_queued,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* xmit_stopped */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(/* xmit_unstopped */
|
||||
hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
|
||||
TP_PROTO(struct hfi1_ipoib_txq *txq),
|
||||
TP_ARGS(txq)
|
||||
);
|
||||
|
||||
#endif /* __HFI1_TRACE_TX_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
|
|
|
@ -133,6 +133,7 @@ static int defer_packet_queue(
|
|||
container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
|
||||
|
||||
write_seqlock(&sde->waitlock);
|
||||
trace_hfi1_usdma_defer(pq, sde, &pq->busy);
|
||||
if (sdma_progress(sde, seq, txreq))
|
||||
goto eagain;
|
||||
/*
|
||||
|
@ -157,7 +158,8 @@ static void activate_packet_queue(struct iowait *wait, int reason)
|
|||
{
|
||||
struct hfi1_user_sdma_pkt_q *pq =
|
||||
container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
|
||||
pq->busy.lock = NULL;
|
||||
|
||||
trace_hfi1_usdma_activate(pq, wait, reason);
|
||||
xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
|
||||
wake_up(&wait->wait_dma);
|
||||
};
|
||||
|
@ -599,13 +601,17 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
|||
while (req->seqsubmitted != req->info.npkts) {
|
||||
ret = user_sdma_send_pkts(req, pcount);
|
||||
if (ret < 0) {
|
||||
int we_ret;
|
||||
|
||||
if (ret != -EBUSY)
|
||||
goto free_req;
|
||||
if (wait_event_interruptible_timeout(
|
||||
we_ret = wait_event_interruptible_timeout(
|
||||
pq->busy.wait_dma,
|
||||
pq->state == SDMA_PKT_Q_ACTIVE,
|
||||
msecs_to_jiffies(
|
||||
SDMA_IOWAIT_TIMEOUT)) <= 0)
|
||||
SDMA_IOWAIT_TIMEOUT));
|
||||
trace_hfi1_usdma_we(pq, we_ret);
|
||||
if (we_ret <= 0)
|
||||
flush_pq_iowait(pq);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,6 +53,7 @@
|
|||
#include "common.h"
|
||||
#include "iowait.h"
|
||||
#include "user_exp_rcv.h"
|
||||
#include "mmu_rb.h"
|
||||
|
||||
/* The maximum number of Data io vectors per message/request */
|
||||
#define MAX_VECTORS_PER_REQ 8
|
||||
|
|
|
@ -1407,7 +1407,7 @@ static inline u16 opa_width_to_ib(u16 in)
|
|||
}
|
||||
}
|
||||
|
||||
static int query_port(struct rvt_dev_info *rdi, u8 port_num,
|
||||
static int query_port(struct rvt_dev_info *rdi, u32 port_num,
|
||||
struct ib_port_attr *props)
|
||||
{
|
||||
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
|
||||
|
@ -1485,7 +1485,7 @@ static int modify_device(struct ib_device *device,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
|
||||
static int shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
|
||||
{
|
||||
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
|
||||
struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
|
||||
|
@ -1694,7 +1694,7 @@ static int init_cntr_names(const char *names_in,
|
|||
}
|
||||
|
||||
static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
|
||||
u8 port_num)
|
||||
u32 port_num)
|
||||
{
|
||||
int i, err;
|
||||
|
||||
|
@ -1758,7 +1758,7 @@ static u64 hfi1_sps_ints(void)
|
|||
}
|
||||
|
||||
static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
|
||||
u8 port, int index)
|
||||
u32 port, int index)
|
||||
{
|
||||
u64 *values;
|
||||
int count;
|
||||
|
|
|
@ -325,10 +325,10 @@ static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
|
|||
*/
|
||||
void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
|
||||
u32 qp1, u32 qp2, u32 lid1, u32 lid2);
|
||||
void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
|
||||
void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num);
|
||||
void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
|
||||
void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
|
||||
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
|
||||
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
|
||||
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
|
||||
const struct ib_mad *in_mad, struct ib_mad *out_mad,
|
||||
size_t *out_mad_size, u16 *out_mad_pkey_index);
|
||||
|
|
|
@ -99,11 +99,6 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
|
|||
return tx;
|
||||
}
|
||||
|
||||
static inline struct sdma_txreq *get_sdma_txreq(struct verbs_txreq *tx)
|
||||
{
|
||||
return &tx->txreq;
|
||||
}
|
||||
|
||||
static inline struct verbs_txreq *get_waiting_verbs_txreq(struct iowait_work *w)
|
||||
{
|
||||
struct sdma_txreq *stx;
|
||||
|
|
|
@ -156,7 +156,7 @@ bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
|
|||
|
||||
/* vnic rdma netdev operations */
|
||||
struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
enum rdma_netdev_t type,
|
||||
const char *name,
|
||||
unsigned char name_assign_type,
|
||||
|
|
|
@ -593,7 +593,7 @@ static void hfi1_vnic_free_rn(struct net_device *netdev)
|
|||
}
|
||||
|
||||
struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
|
||||
u8 port_num,
|
||||
u32 port_num,
|
||||
enum rdma_netdev_t type,
|
||||
const char *name,
|
||||
unsigned char name_assign_type,
|
||||
|
|
|
@ -304,6 +304,9 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
|
|||
|
||||
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
|
||||
hns_roce_cleanup_xrcd_table(hr_dev);
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
|
||||
hns_roce_cleanup_srq_table(hr_dev);
|
||||
hns_roce_cleanup_qp_table(hr_dev);
|
||||
|
|
|
@ -38,22 +38,14 @@
|
|||
|
||||
#define CMD_POLL_TOKEN 0xffff
|
||||
#define CMD_MAX_NUM 32
|
||||
#define CMD_TOKEN_MASK 0x1f
|
||||
|
||||
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
u64 out_param, u32 in_modifier,
|
||||
u8 op_modifier, u16 op, u16 token,
|
||||
int event)
|
||||
{
|
||||
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&cmd->hcr_mutex);
|
||||
ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
|
||||
op_modifier, op, token, event);
|
||||
mutex_unlock(&cmd->hcr_mutex);
|
||||
|
||||
return ret;
|
||||
return hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
|
||||
op_modifier, op, token, event);
|
||||
}
|
||||
|
||||
/* this should be called with "poll_sem" */
|
||||
|
@ -62,18 +54,19 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
|
|||
u8 op_modifier, u16 op,
|
||||
unsigned int timeout)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op,
|
||||
CMD_POLL_TOKEN, 0);
|
||||
if (ret) {
|
||||
dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
|
||||
dev_err_ratelimited(hr_dev->dev,
|
||||
"failed to post mailbox %x in poll mode, ret = %d.\n",
|
||||
op, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return hr_dev->hw->chk_mbox(hr_dev, timeout);
|
||||
return hr_dev->hw->poll_mbox_done(hr_dev, timeout);
|
||||
}
|
||||
|
||||
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
|
@ -96,15 +89,18 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
|
|||
struct hns_roce_cmd_context *context =
|
||||
&hr_dev->cmd.context[token % hr_dev->cmd.max_cmds];
|
||||
|
||||
if (token != context->token)
|
||||
if (unlikely(token != context->token)) {
|
||||
dev_err_ratelimited(hr_dev->dev,
|
||||
"[cmd] invalid ae token %x,context token is %x!\n",
|
||||
token, context->token);
|
||||
return;
|
||||
}
|
||||
|
||||
context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO);
|
||||
context->out_param = out_param;
|
||||
complete(&context->done);
|
||||
}
|
||||
|
||||
/* this should be called with "use_events" */
|
||||
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
u64 out_param, unsigned long in_modifier,
|
||||
u8 op_modifier, u16 op,
|
||||
|
@ -116,44 +112,44 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
|
|||
int ret;
|
||||
|
||||
spin_lock(&cmd->context_lock);
|
||||
WARN_ON(cmd->free_head < 0);
|
||||
context = &cmd->context[cmd->free_head];
|
||||
context->token += cmd->token_mask + 1;
|
||||
cmd->free_head = context->next;
|
||||
|
||||
do {
|
||||
context = &cmd->context[cmd->free_head];
|
||||
cmd->free_head = context->next;
|
||||
} while (context->busy);
|
||||
|
||||
context->busy = 1;
|
||||
context->token += cmd->max_cmds;
|
||||
|
||||
spin_unlock(&cmd->context_lock);
|
||||
|
||||
init_completion(&context->done);
|
||||
reinit_completion(&context->done);
|
||||
|
||||
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op,
|
||||
context->token, 1);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err_ratelimited(dev,
|
||||
"failed to post mailbox %x in event mode, ret = %d.\n",
|
||||
op, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* It is timeout when wait_for_completion_timeout return 0
|
||||
* The return value is the time limit set in advance
|
||||
* how many seconds showing
|
||||
*/
|
||||
if (!wait_for_completion_timeout(&context->done,
|
||||
msecs_to_jiffies(timeout))) {
|
||||
dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
|
||||
dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n",
|
||||
context->token, op);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = context->result;
|
||||
if (ret) {
|
||||
dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
if (ret)
|
||||
dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n",
|
||||
context->token, op, ret);
|
||||
|
||||
out:
|
||||
spin_lock(&cmd->context_lock);
|
||||
context->next = cmd->free_head;
|
||||
cmd->free_head = context - cmd->context;
|
||||
spin_unlock(&cmd->context_lock);
|
||||
|
||||
context->busy = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -175,44 +171,28 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
|
|||
unsigned long in_modifier, u8 op_modifier, u16 op,
|
||||
unsigned int timeout)
|
||||
{
|
||||
int ret;
|
||||
bool is_busy;
|
||||
|
||||
if (hr_dev->hw->rst_prc_mbox) {
|
||||
ret = hr_dev->hw->rst_prc_mbox(hr_dev);
|
||||
if (ret == CMD_RST_PRC_SUCCESS)
|
||||
return 0;
|
||||
else if (ret == CMD_RST_PRC_EBUSY)
|
||||
return -EBUSY;
|
||||
}
|
||||
if (hr_dev->hw->chk_mbox_avail)
|
||||
if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy))
|
||||
return is_busy ? -EBUSY : 0;
|
||||
|
||||
if (hr_dev->cmd.use_events)
|
||||
ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op,
|
||||
timeout);
|
||||
return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op,
|
||||
timeout);
|
||||
else
|
||||
ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op,
|
||||
timeout);
|
||||
|
||||
if (ret == CMD_RST_PRC_EBUSY)
|
||||
return -EBUSY;
|
||||
|
||||
if (ret && (hr_dev->hw->rst_prc_mbox &&
|
||||
hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS))
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op,
|
||||
timeout);
|
||||
}
|
||||
|
||||
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
|
||||
mutex_init(&hr_dev->cmd.hcr_mutex);
|
||||
sema_init(&hr_dev->cmd.poll_sem, 1);
|
||||
hr_dev->cmd.use_events = 0;
|
||||
hr_dev->cmd.max_cmds = CMD_MAX_NUM;
|
||||
hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
|
||||
hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", hr_dev->dev,
|
||||
HNS_ROCE_MAILBOX_SIZE,
|
||||
HNS_ROCE_MAILBOX_SIZE, 0);
|
||||
if (!hr_dev->cmd.pool)
|
||||
|
@ -239,16 +219,16 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
|
|||
for (i = 0; i < hr_cmd->max_cmds; ++i) {
|
||||
hr_cmd->context[i].token = i;
|
||||
hr_cmd->context[i].next = i + 1;
|
||||
init_completion(&hr_cmd->context[i].done);
|
||||
}
|
||||
|
||||
hr_cmd->context[hr_cmd->max_cmds - 1].next = -1;
|
||||
hr_cmd->context[hr_cmd->max_cmds - 1].next = 0;
|
||||
hr_cmd->free_head = 0;
|
||||
|
||||
sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds);
|
||||
spin_lock_init(&hr_cmd->context_lock);
|
||||
|
||||
hr_cmd->token_mask = CMD_TOKEN_MASK;
|
||||
hr_cmd->use_events = 1;
|
||||
down(&hr_cmd->poll_sem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -259,6 +239,8 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
|
|||
|
||||
kfree(hr_cmd->context);
|
||||
hr_cmd->use_events = 0;
|
||||
|
||||
up(&hr_cmd->poll_sem);
|
||||
}
|
||||
|
||||
struct hns_roce_cmd_mailbox *
|
||||
|
|
|
@ -48,7 +48,8 @@
|
|||
#define roce_set_field(origin, mask, shift, val) \
|
||||
do { \
|
||||
(origin) &= ~cpu_to_le32(mask); \
|
||||
(origin) |= cpu_to_le32(((u32)(val) << (u32)(shift)) & (mask)); \
|
||||
(origin) |= \
|
||||
cpu_to_le32(((u32)(val) << (u32)(shift)) & (mask)); \
|
||||
} while (0)
|
||||
|
||||
#define roce_set_bit(origin, shift, val) \
|
||||
|
@ -59,9 +60,9 @@
|
|||
#define _hr_reg_enable(ptr, field_type, field_h, field_l) \
|
||||
({ \
|
||||
const field_type *_ptr = ptr; \
|
||||
*((__le32 *)_ptr + (field_h) / 32) |= \
|
||||
cpu_to_le32(BIT((field_l) % 32)) + \
|
||||
BUILD_BUG_ON_ZERO((field_h) != (field_l)); \
|
||||
*((__le32 *)_ptr + (field_h) / 32) |= cpu_to_le32( \
|
||||
BIT((field_l) % 32) + \
|
||||
BUILD_BUG_ON_ZERO((field_h) != (field_l))); \
|
||||
})
|
||||
|
||||
#define hr_reg_enable(ptr, field) _hr_reg_enable(ptr, field)
|
||||
|
@ -69,11 +70,9 @@
|
|||
#define _hr_reg_clear(ptr, field_type, field_h, field_l) \
|
||||
({ \
|
||||
const field_type *_ptr = ptr; \
|
||||
BUILD_BUG_ON(((field_h) / 32) != ((field_l) / 32)); \
|
||||
*((__le32 *)_ptr + (field_h) / 32) &= \
|
||||
cpu_to_le32( \
|
||||
~GENMASK((field_h) % 32, (field_l) % 32)) + \
|
||||
BUILD_BUG_ON_ZERO(((field_h) / 32) != \
|
||||
((field_l) / 32)); \
|
||||
~cpu_to_le32(GENMASK((field_h) % 32, (field_l) % 32)); \
|
||||
})
|
||||
|
||||
#define hr_reg_clear(ptr, field) _hr_reg_clear(ptr, field)
|
||||
|
@ -87,6 +86,16 @@
|
|||
|
||||
#define hr_reg_write(ptr, field, val) _hr_reg_write(ptr, field, val)
|
||||
|
||||
#define _hr_reg_read(ptr, field_type, field_h, field_l) \
|
||||
({ \
|
||||
const field_type *_ptr = ptr; \
|
||||
BUILD_BUG_ON(((field_h) / 32) != ((field_l) / 32)); \
|
||||
FIELD_GET(GENMASK((field_h) % 32, (field_l) % 32), \
|
||||
le32_to_cpu(*((__le32 *)_ptr + (field_h) / 32))); \
|
||||
})
|
||||
|
||||
#define hr_reg_read(ptr, field) _hr_reg_read(ptr, field)
|
||||
|
||||
#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
|
||||
#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
|
|||
struct ib_udata *udata, unsigned long addr,
|
||||
struct hns_roce_ib_create_cq_resp *resp)
|
||||
{
|
||||
bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB;
|
||||
bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB;
|
||||
struct hns_roce_ucontext *uctx;
|
||||
int err;
|
||||
|
||||
|
@ -250,8 +250,8 @@ static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
|
|||
*hr_cq->set_ci_db = 0;
|
||||
hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
|
||||
}
|
||||
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
|
||||
DB_REG_OFFSET * hr_dev->priv_uar.index;
|
||||
hr_cq->db_reg = hr_dev->reg_base + hr_dev->odb_offset +
|
||||
DB_REG_OFFSET * hr_dev->priv_uar.index;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -276,6 +276,57 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
|
|||
}
|
||||
}
|
||||
|
||||
static int verify_cq_create_attr(struct hns_roce_dev *hr_dev,
|
||||
const struct ib_cq_init_attr *attr)
|
||||
{
|
||||
struct ib_device *ibdev = &hr_dev->ib_dev;
|
||||
|
||||
if (!attr->cqe || attr->cqe > hr_dev->caps.max_cqes) {
|
||||
ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
|
||||
attr->cqe, hr_dev->caps.max_cqes);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (attr->comp_vector >= hr_dev->caps.num_comp_vectors) {
|
||||
ibdev_err(ibdev, "failed to check CQ vector = %u, max = %d.\n",
|
||||
attr->comp_vector, hr_dev->caps.num_comp_vectors);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
|
||||
struct hns_roce_ib_create_cq *ucmd)
|
||||
{
|
||||
struct ib_device *ibdev = hr_cq->ib_cq.device;
|
||||
int ret;
|
||||
|
||||
ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
|
||||
if (ret) {
|
||||
ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
|
||||
struct hns_roce_ib_create_cq *ucmd)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
|
||||
|
||||
cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
|
||||
cq_entries = roundup_pow_of_two(cq_entries);
|
||||
hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
|
||||
hr_cq->cq_depth = cq_entries;
|
||||
hr_cq->vector = vector;
|
||||
|
||||
spin_lock_init(&hr_cq->lock);
|
||||
INIT_LIST_HEAD(&hr_cq->sq_list);
|
||||
INIT_LIST_HEAD(&hr_cq->rq_list);
|
||||
}
|
||||
|
||||
static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
|
||||
struct hns_roce_ib_create_cq *ucmd)
|
||||
{
|
||||
|
@ -299,44 +350,23 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
|
|||
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
|
||||
struct ib_device *ibdev = &hr_dev->ib_dev;
|
||||
struct hns_roce_ib_create_cq ucmd = {};
|
||||
int vector = attr->comp_vector;
|
||||
u32 cq_entries = attr->cqe;
|
||||
int ret;
|
||||
|
||||
if (attr->flags)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
|
||||
ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
|
||||
cq_entries, hr_dev->caps.max_cqes);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vector >= hr_dev->caps.num_comp_vectors) {
|
||||
ibdev_err(ibdev, "failed to check CQ vector = %d, max = %d.\n",
|
||||
vector, hr_dev->caps.num_comp_vectors);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
|
||||
cq_entries = roundup_pow_of_two(cq_entries);
|
||||
hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
|
||||
hr_cq->cq_depth = cq_entries;
|
||||
hr_cq->vector = vector;
|
||||
spin_lock_init(&hr_cq->lock);
|
||||
INIT_LIST_HEAD(&hr_cq->sq_list);
|
||||
INIT_LIST_HEAD(&hr_cq->rq_list);
|
||||
ret = verify_cq_create_attr(hr_dev, attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (udata) {
|
||||
ret = ib_copy_from_udata(&ucmd, udata,
|
||||
min(udata->inlen, sizeof(ucmd)));
|
||||
if (ret) {
|
||||
ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n",
|
||||
ret);
|
||||
ret = get_cq_ucmd(hr_cq, udata, &ucmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
|
||||
|
||||
set_cqe_size(hr_cq, udata, &ucmd);
|
||||
|
||||
ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
|
||||
|
|
|
@ -137,6 +137,7 @@ enum {
|
|||
SERV_TYPE_UC,
|
||||
SERV_TYPE_RD,
|
||||
SERV_TYPE_UD,
|
||||
SERV_TYPE_XRC = 5,
|
||||
};
|
||||
|
||||
enum hns_roce_qp_state {
|
||||
|
@ -168,6 +169,8 @@ enum hns_roce_event {
|
|||
HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
|
||||
HNS_ROCE_EVENT_TYPE_MB = 0x13,
|
||||
HNS_ROCE_EVENT_TYPE_FLR = 0x15,
|
||||
HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION = 0x16,
|
||||
HNS_ROCE_EVENT_TYPE_INVALID_XRCETH = 0x17,
|
||||
};
|
||||
|
||||
#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
|
||||
|
@ -176,9 +179,10 @@ enum {
|
|||
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
|
||||
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
|
||||
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
|
||||
HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
|
||||
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
|
||||
HNS_ROCE_CAP_FLAG_CQ_RECORD_DB = BIT(3),
|
||||
HNS_ROCE_CAP_FLAG_QP_RECORD_DB = BIT(4),
|
||||
HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
|
||||
HNS_ROCE_CAP_FLAG_XRC = BIT(6),
|
||||
HNS_ROCE_CAP_FLAG_MW = BIT(7),
|
||||
HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
|
||||
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
|
||||
|
@ -214,12 +218,6 @@ enum {
|
|||
HNS_ROCE_RST_DIRECT_RETURN = 0,
|
||||
};
|
||||
|
||||
enum {
|
||||
CMD_RST_PRC_OTHERS,
|
||||
CMD_RST_PRC_SUCCESS,
|
||||
CMD_RST_PRC_EBUSY,
|
||||
};
|
||||
|
||||
#define HNS_ROCE_CMD_SUCCESS 1
|
||||
|
||||
/* The minimum page size is 4K for hardware */
|
||||
|
@ -244,6 +242,11 @@ struct hns_roce_pd {
|
|||
unsigned long pdn;
|
||||
};
|
||||
|
||||
struct hns_roce_xrcd {
|
||||
struct ib_xrcd ibxrcd;
|
||||
u32 xrcdn;
|
||||
};
|
||||
|
||||
struct hns_roce_bitmap {
|
||||
/* Bitmap Traversal last a bit which is 1 */
|
||||
unsigned long last;
|
||||
|
@ -363,7 +366,7 @@ struct hns_roce_wq {
|
|||
int wqe_shift; /* WQE size */
|
||||
u32 head;
|
||||
u32 tail;
|
||||
void __iomem *db_reg_l;
|
||||
void __iomem *db_reg;
|
||||
};
|
||||
|
||||
struct hns_roce_sge {
|
||||
|
@ -437,7 +440,7 @@ struct hns_roce_cq {
|
|||
u32 cq_depth;
|
||||
u32 cons_index;
|
||||
u32 *set_ci_db;
|
||||
void __iomem *cq_db_l;
|
||||
void __iomem *db_reg;
|
||||
u16 *tptr_addr;
|
||||
int arm_sn;
|
||||
int cqe_size;
|
||||
|
@ -467,7 +470,8 @@ struct hns_roce_srq {
|
|||
u32 rsv_sge;
|
||||
int wqe_shift;
|
||||
u32 cqn;
|
||||
void __iomem *db_reg_l;
|
||||
u32 xrcdn;
|
||||
void __iomem *db_reg;
|
||||
|
||||
atomic_t refcount;
|
||||
struct completion free;
|
||||
|
@ -546,6 +550,7 @@ struct hns_roce_cmd_context {
|
|||
int next;
|
||||
u64 out_param;
|
||||
u16 token;
|
||||
u16 busy;
|
||||
};
|
||||
|
||||
struct hns_roce_cmdq {
|
||||
|
@ -561,11 +566,6 @@ struct hns_roce_cmdq {
|
|||
spinlock_t context_lock;
|
||||
int free_head;
|
||||
struct hns_roce_cmd_context *context;
|
||||
/*
|
||||
* Result of get integer part
|
||||
* which max_comds compute according a power of 2
|
||||
*/
|
||||
u16 token_mask;
|
||||
/*
|
||||
* Process whether use event mode, init default non-zero
|
||||
* After the event queue of cmd event ready,
|
||||
|
@ -640,6 +640,8 @@ struct hns_roce_qp {
|
|||
enum hns_roce_event event_type);
|
||||
unsigned long qpn;
|
||||
|
||||
u32 xrcdn;
|
||||
|
||||
atomic_t refcount;
|
||||
struct completion free;
|
||||
|
||||
|
@ -695,7 +697,7 @@ struct hns_roce_aeqe {
|
|||
|
||||
struct hns_roce_eq {
|
||||
struct hns_roce_dev *hr_dev;
|
||||
void __iomem *doorbell;
|
||||
void __iomem *db_reg;
|
||||
|
||||
int type_flag; /* Aeq:1 ceq:0 */
|
||||
int eqn;
|
||||
|
@ -723,6 +725,13 @@ struct hns_roce_eq_table {
|
|||
void __iomem **eqc_base; /* only for hw v1 */
|
||||
};
|
||||
|
||||
enum cong_type {
|
||||
CONG_TYPE_DCQCN,
|
||||
CONG_TYPE_LDCP,
|
||||
CONG_TYPE_HC3,
|
||||
CONG_TYPE_DIP,
|
||||
};
|
||||
|
||||
struct hns_roce_caps {
|
||||
u64 fw_ver;
|
||||
u8 num_ports;
|
||||
|
@ -759,13 +768,14 @@ struct hns_roce_caps {
|
|||
int num_other_vectors;
|
||||
u32 num_mtpts;
|
||||
u32 num_mtt_segs;
|
||||
u32 num_cqe_segs;
|
||||
u32 num_srqwqe_segs;
|
||||
u32 num_idx_segs;
|
||||
int reserved_mrws;
|
||||
int reserved_uars;
|
||||
int num_pds;
|
||||
int reserved_pds;
|
||||
u32 num_xrcds;
|
||||
u32 reserved_xrcds;
|
||||
u32 mtt_entry_sz;
|
||||
u32 cqe_sz;
|
||||
u32 page_size_cap;
|
||||
|
@ -794,6 +804,9 @@ struct hns_roce_caps {
|
|||
u32 cqc_bt_num;
|
||||
u32 cqc_timer_bt_num;
|
||||
u32 mpt_bt_num;
|
||||
u32 eqc_bt_num;
|
||||
u32 smac_bt_num;
|
||||
u32 sgid_bt_num;
|
||||
u32 sccc_bt_num;
|
||||
u32 gmv_bt_num;
|
||||
u32 qpc_ba_pg_sz;
|
||||
|
@ -851,6 +864,7 @@ struct hns_roce_caps {
|
|||
u16 default_aeq_period;
|
||||
u16 default_aeq_arm_st;
|
||||
u16 default_ceq_arm_st;
|
||||
enum cong_type cong_type;
|
||||
};
|
||||
|
||||
struct hns_roce_dfx_hw {
|
||||
|
@ -874,9 +888,10 @@ struct hns_roce_hw {
|
|||
int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
|
||||
u16 token, int event);
|
||||
int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned int timeout);
|
||||
int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
|
||||
int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
|
||||
int (*poll_mbox_done)(struct hns_roce_dev *hr_dev,
|
||||
unsigned int timeout);
|
||||
bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
|
||||
int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index,
|
||||
const union ib_gid *gid, const struct ib_gid_attr *attr);
|
||||
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
|
||||
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
|
||||
|
@ -897,33 +912,17 @@ struct hns_roce_hw {
|
|||
int (*clear_hem)(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, int obj,
|
||||
int step_idx);
|
||||
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
|
||||
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
||||
int attr_mask, enum ib_qp_state cur_state,
|
||||
enum ib_qp_state new_state);
|
||||
int (*destroy_qp)(struct ib_qp *ibqp, struct ib_udata *udata);
|
||||
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_qp *hr_qp);
|
||||
int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
|
||||
const struct ib_recv_wr **bad_recv_wr);
|
||||
int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
|
||||
struct ib_udata *udata);
|
||||
int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
int (*init_eq)(struct hns_roce_dev *hr_dev);
|
||||
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
|
||||
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
|
||||
int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
|
||||
enum ib_srq_attr_mask srq_attr_mask,
|
||||
struct ib_udata *udata);
|
||||
int (*query_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
|
||||
int (*post_srq_recv)(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
const struct ib_device_ops *hns_roce_dev_ops;
|
||||
const struct ib_device_ops *hns_roce_dev_srq_ops;
|
||||
};
|
||||
|
@ -945,6 +944,8 @@ struct hns_roce_dev {
|
|||
enum hns_roce_device_state state;
|
||||
struct list_head qp_list; /* list of all qps on this dev */
|
||||
spinlock_t qp_list_lock; /* protect qp_list */
|
||||
struct list_head dip_list; /* list of all dest ips on this dev */
|
||||
spinlock_t dip_list_lock; /* protect dip_list */
|
||||
|
||||
struct list_head pgdir_list;
|
||||
struct mutex pgdir_mutex;
|
||||
|
@ -963,6 +964,7 @@ struct hns_roce_dev {
|
|||
|
||||
struct hns_roce_cmdq cmd;
|
||||
struct hns_roce_bitmap pd_bitmap;
|
||||
struct hns_roce_bitmap xrcd_bitmap;
|
||||
struct hns_roce_uar_table uar_table;
|
||||
struct hns_roce_mr_table mr_table;
|
||||
struct hns_roce_cq_table cq_table;
|
||||
|
@ -986,6 +988,9 @@ struct hns_roce_dev {
|
|||
void *priv;
|
||||
struct workqueue_struct *irq_workq;
|
||||
const struct hns_roce_dfx_hw *dfx;
|
||||
u32 func_num;
|
||||
u32 is_vf;
|
||||
u32 cong_algo_tmpl_id;
|
||||
};
|
||||
|
||||
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
|
||||
|
@ -1004,6 +1009,11 @@ static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
|
|||
return container_of(ibpd, struct hns_roce_pd, ibpd);
|
||||
}
|
||||
|
||||
static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
|
||||
{
|
||||
return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
|
||||
}
|
||||
|
||||
static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
|
||||
{
|
||||
return container_of(ibah, struct hns_roce_ah, ibah);
|
||||
|
@ -1136,6 +1146,7 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
|
|||
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
|
||||
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
|
||||
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
|
||||
int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
|
||||
|
||||
void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
|
||||
|
@ -1143,6 +1154,7 @@ void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
|
|||
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev);
|
||||
|
||||
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
|
||||
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
|
||||
|
@ -1207,6 +1219,9 @@ int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
|
|||
struct ib_udata *udata);
|
||||
int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
|
||||
|
||||
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
|
||||
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
|
||||
|
||||
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
|
@ -1246,7 +1261,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
|
|||
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
|
||||
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
|
||||
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
|
||||
u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
|
||||
u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
|
||||
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
|
||||
int hns_roce_init(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_exit(struct hns_roce_dev *hr_dev);
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
* GID[0][0], GID[1][0],.....GID[N - 1][0],
|
||||
* And so on
|
||||
*/
|
||||
u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
|
||||
u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index)
|
||||
{
|
||||
return gid_index * hr_dev->caps.num_ports + port;
|
||||
}
|
||||
|
@ -345,7 +345,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
|
|||
doorbell[0] = sq_db.u32_4;
|
||||
doorbell[1] = sq_db.u32_8;
|
||||
|
||||
hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
|
||||
hns_roce_write64_k(doorbell, qp->sq.db_reg);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&qp->sq.lock, flags);
|
||||
|
@ -440,7 +440,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
|
|||
doorbell[0] = rq_db.u32_4;
|
||||
doorbell[1] = rq_db.u32_8;
|
||||
|
||||
hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
|
||||
hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
|
||||
|
@ -538,7 +538,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
|
|||
/*
|
||||
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
|
||||
* using 4K page, and shift more 32 because of
|
||||
* caculating the high 32 bit value evaluated to hardware.
|
||||
* calculating the high 32 bit value evaluated to hardware.
|
||||
*/
|
||||
roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
|
||||
ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
|
||||
|
@ -711,7 +711,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
|
|||
int i, j;
|
||||
u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
|
||||
u8 phy_port;
|
||||
u8 port = 0;
|
||||
u32 port = 0;
|
||||
u8 sl;
|
||||
|
||||
/* Reserved cq for loop qp */
|
||||
|
@ -1189,7 +1189,7 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
|
|||
/*
|
||||
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
|
||||
* using 4K page, and shift more 32 because of
|
||||
* caculating the high 32 bit value evaluated to hardware.
|
||||
* calculating the high 32 bit value evaluated to hardware.
|
||||
*/
|
||||
roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
|
||||
ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
|
||||
|
@ -1382,7 +1382,6 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
|
|||
ret = hns_roce_v1_rsv_lp_qp(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
|
||||
flush_workqueue(free_mr->free_mr_wq);
|
||||
destroy_workqueue(free_mr->free_mr_wq);
|
||||
}
|
||||
|
||||
|
@ -1394,7 +1393,6 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
|
|||
struct hns_roce_v1_priv *priv = hr_dev->priv;
|
||||
struct hns_roce_free_mr *free_mr = &priv->free_mr;
|
||||
|
||||
flush_workqueue(free_mr->free_mr_wq);
|
||||
destroy_workqueue(free_mr->free_mr_wq);
|
||||
|
||||
hns_roce_v1_release_lp_qp(hr_dev);
|
||||
|
@ -1676,7 +1674,7 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
|
||||
static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port,
|
||||
int gid_index, const union ib_gid *gid,
|
||||
const struct ib_gid_attr *attr)
|
||||
{
|
||||
|
@ -1939,7 +1937,7 @@ static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
|
|||
roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
|
||||
ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
|
||||
|
||||
hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
|
||||
hns_roce_write64_k(doorbell, hr_cq->db_reg);
|
||||
}
|
||||
|
||||
static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
|
||||
|
@ -2041,7 +2039,7 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
|
|||
/**
|
||||
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
|
||||
* using 4K page, and shift more 32 because of
|
||||
* caculating the high 32 bit value evaluated to hardware.
|
||||
* calculating the high 32 bit value evaluated to hardware.
|
||||
*/
|
||||
roce_set_field(cq_context->cqc_byte_20,
|
||||
CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
|
||||
|
@ -2092,7 +2090,7 @@ static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
|
|||
ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
|
||||
hr_cq->cqn | notification_flag);
|
||||
|
||||
hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
|
||||
hns_roce_write64_k(doorbell, hr_cq->db_reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2673,8 +2671,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
|||
int ret = -EINVAL;
|
||||
u64 sq_ba = 0;
|
||||
u64 rq_ba = 0;
|
||||
int port;
|
||||
u8 port_num;
|
||||
u32 port;
|
||||
u32 port_num;
|
||||
u8 *dmac;
|
||||
u8 *smac;
|
||||
|
||||
|
@ -3217,12 +3215,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
|||
roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
|
||||
|
||||
if (ibqp->uobject) {
|
||||
hr_qp->rq.db_reg_l = hr_dev->reg_base +
|
||||
hr_qp->rq.db_reg = hr_dev->reg_base +
|
||||
hr_dev->odb_offset +
|
||||
DB_REG_OFFSET * hr_dev->priv_uar.index;
|
||||
}
|
||||
|
||||
hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
|
||||
hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
|
||||
}
|
||||
|
||||
hr_qp->state = new_state;
|
||||
|
@ -3449,8 +3447,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
|||
((roce_get_bit(context->qpc_bytes_4,
|
||||
QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
|
||||
|
||||
if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
|
||||
hr_qp->ibqp.qp_type == IB_QPT_UC) {
|
||||
if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
|
||||
struct ib_global_route *grh =
|
||||
rdma_ah_retrieve_grh(&qp_attr->ah_attr);
|
||||
|
||||
|
@ -3604,7 +3601,7 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|||
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not)
|
||||
{
|
||||
roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
|
||||
(req_not << eq->log_entries), eq->doorbell);
|
||||
(req_not << eq->log_entries), eq->db_reg);
|
||||
}
|
||||
|
||||
static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
|
||||
|
@ -4170,7 +4167,7 @@ static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
|
|||
* Configure eq extended address 45~49 bit.
|
||||
* 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
|
||||
* using 4K page, and shift more 32 because of
|
||||
* caculating the high 32 bit value evaluated to hardware.
|
||||
* calculating the high 32 bit value evaluated to hardware.
|
||||
*/
|
||||
roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
|
||||
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
|
||||
|
@ -4234,9 +4231,9 @@ static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
|
|||
ROCEE_CAEP_CEQC_SHIFT_0_REG +
|
||||
CEQ_REG_OFFSET * i;
|
||||
eq->type_flag = HNS_ROCE_CEQ;
|
||||
eq->doorbell = hr_dev->reg_base +
|
||||
ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
|
||||
CEQ_REG_OFFSET * i;
|
||||
eq->db_reg = hr_dev->reg_base +
|
||||
ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
|
||||
CEQ_REG_OFFSET * i;
|
||||
eq->entries = hr_dev->caps.ceqe_depth;
|
||||
eq->log_entries = ilog2(eq->entries);
|
||||
eq->eqe_size = HNS_ROCE_CEQE_SIZE;
|
||||
|
@ -4245,8 +4242,8 @@ static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
|
|||
eq_table->eqc_base[i] = hr_dev->reg_base +
|
||||
ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
|
||||
eq->type_flag = HNS_ROCE_AEQ;
|
||||
eq->doorbell = hr_dev->reg_base +
|
||||
ROCEE_CAEP_AEQE_CONS_IDX_REG;
|
||||
eq->db_reg = hr_dev->reg_base +
|
||||
ROCEE_CAEP_AEQE_CONS_IDX_REG;
|
||||
eq->entries = hr_dev->caps.aeqe_depth;
|
||||
eq->log_entries = ilog2(eq->entries);
|
||||
eq->eqe_size = HNS_ROCE_AEQE_SIZE;
|
||||
|
@ -4349,7 +4346,7 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
|
|||
.hw_init = hns_roce_v1_init,
|
||||
.hw_exit = hns_roce_v1_exit,
|
||||
.post_mbox = hns_roce_v1_post_mbox,
|
||||
.chk_mbox = hns_roce_v1_chk_mbox,
|
||||
.poll_mbox_done = hns_roce_v1_chk_mbox,
|
||||
.set_gid = hns_roce_v1_set_gid,
|
||||
.set_mac = hns_roce_v1_set_mac,
|
||||
.set_mtu = hns_roce_v1_set_mtu,
|
||||
|
@ -4357,12 +4354,6 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
|
|||
.write_cqc = hns_roce_v1_write_cqc,
|
||||
.clear_hem = hns_roce_v1_clear_hem,
|
||||
.modify_qp = hns_roce_v1_modify_qp,
|
||||
.query_qp = hns_roce_v1_query_qp,
|
||||
.destroy_qp = hns_roce_v1_destroy_qp,
|
||||
.post_send = hns_roce_v1_post_send,
|
||||
.post_recv = hns_roce_v1_post_recv,
|
||||
.req_notify_cq = hns_roce_v1_req_notify_cq,
|
||||
.poll_cq = hns_roce_v1_poll_cq,
|
||||
.dereg_mr = hns_roce_v1_dereg_mr,
|
||||
.destroy_cq = hns_roce_v1_destroy_cq,
|
||||
.init_eq = hns_roce_v1_init_eq_table,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -40,13 +40,11 @@
|
|||
#define HNS_ROCE_VF_SRQC_BT_NUM 64
|
||||
#define HNS_ROCE_VF_CQC_BT_NUM 64
|
||||
#define HNS_ROCE_VF_MPT_BT_NUM 64
|
||||
#define HNS_ROCE_VF_EQC_NUM 64
|
||||
#define HNS_ROCE_VF_SMAC_NUM 32
|
||||
#define HNS_ROCE_VF_SGID_NUM 32
|
||||
#define HNS_ROCE_VF_SL_NUM 8
|
||||
#define HNS_ROCE_VF_GMV_BT_NUM 256
|
||||
|
||||
#define HNS_ROCE_V2_MAX_QP_NUM 0x100000
|
||||
#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
|
||||
#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
|
||||
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
|
||||
#define HNS_ROCE_V2_MAX_SRQ 0x100000
|
||||
|
@ -61,6 +59,7 @@
|
|||
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
|
||||
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
|
||||
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
|
||||
#define HNS_ROCE_V2_MAX_SQ_INL_EXT 0x400
|
||||
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
|
||||
#define HNS_ROCE_V2_UAR_NUM 256
|
||||
#define HNS_ROCE_V2_PHY_UAR_NUM 1
|
||||
|
@ -74,6 +73,8 @@
|
|||
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
|
||||
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
|
||||
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
|
||||
#define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000
|
||||
#define HNS_ROCE_V2_RSV_XRCD_NUM 0
|
||||
#define HNS_ROCE_V2_MAX_QP_INIT_RDMA 128
|
||||
#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
|
||||
#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
|
||||
|
@ -121,7 +122,7 @@
|
|||
|
||||
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6
|
||||
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2
|
||||
#define HNS_ROCE_V2_GID_INDEX_NUM 256
|
||||
#define HNS_ROCE_V2_GID_INDEX_NUM 16
|
||||
|
||||
#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
|
||||
|
||||
|
@ -143,6 +144,8 @@
|
|||
|
||||
#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5
|
||||
|
||||
#define HNS_ROCE_CONG_SIZE 64
|
||||
|
||||
#define check_whether_last_step(hop_num, step_idx) \
|
||||
((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
|
||||
(step_idx == 1 && hop_num == 1) || \
|
||||
|
@ -195,11 +198,11 @@ enum {
|
|||
};
|
||||
|
||||
enum {
|
||||
HNS_ROCE_V2_SQ_DB = 0x0,
|
||||
HNS_ROCE_V2_RQ_DB = 0x1,
|
||||
HNS_ROCE_V2_SRQ_DB = 0x2,
|
||||
HNS_ROCE_V2_CQ_DB_PTR = 0x3,
|
||||
HNS_ROCE_V2_CQ_DB_NTR = 0x4,
|
||||
HNS_ROCE_V2_SQ_DB,
|
||||
HNS_ROCE_V2_RQ_DB,
|
||||
HNS_ROCE_V2_SRQ_DB,
|
||||
HNS_ROCE_V2_CQ_DB,
|
||||
HNS_ROCE_V2_CQ_DB_NOTIFY
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -233,6 +236,7 @@ enum hns_roce_opcode_type {
|
|||
HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
|
||||
HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
|
||||
HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
|
||||
HNS_ROCE_OPC_QUERY_FUNC_INFO = 0x8407,
|
||||
HNS_ROCE_OPC_QUERY_PF_CAPS_NUM = 0x8408,
|
||||
HNS_ROCE_OPC_CFG_ENTRY_SIZE = 0x8409,
|
||||
HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
|
||||
|
@ -244,6 +248,7 @@ enum hns_roce_opcode_type {
|
|||
HNS_ROCE_OPC_CLR_SCCC = 0x8509,
|
||||
HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
|
||||
HNS_ROCE_OPC_RESET_SCCC = 0x850b,
|
||||
HNS_ROCE_OPC_QUERY_VF_RES = 0x850e,
|
||||
HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
|
||||
HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
|
||||
HNS_SWITCH_PARAMETER_CFG = 0x1033,
|
||||
|
@ -255,10 +260,20 @@ enum {
|
|||
};
|
||||
|
||||
enum hns_roce_cmd_return_status {
|
||||
CMD_EXEC_SUCCESS = 0,
|
||||
CMD_NO_AUTH = 1,
|
||||
CMD_NOT_EXEC = 2,
|
||||
CMD_QUEUE_FULL = 3,
|
||||
CMD_EXEC_SUCCESS,
|
||||
CMD_NO_AUTH,
|
||||
CMD_NOT_EXIST,
|
||||
CMD_CRQ_FULL,
|
||||
CMD_NEXT_ERR,
|
||||
CMD_NOT_EXEC,
|
||||
CMD_PARA_ERR,
|
||||
CMD_RESULT_ERR,
|
||||
CMD_TIMEOUT,
|
||||
CMD_HILINK_ERR,
|
||||
CMD_INFO_ILLEGAL,
|
||||
CMD_INVALID,
|
||||
CMD_ROH_CHECK_FAIL,
|
||||
CMD_OTHER_ERR = 0xff
|
||||
};
|
||||
|
||||
enum hns_roce_sgid_type {
|
||||
|
@ -399,7 +414,8 @@ struct hns_roce_srq_context {
|
|||
#define SRQC_CONSUMER_IDX SRQC_FIELD_LOC(127, 112)
|
||||
#define SRQC_WQE_BT_BA_L SRQC_FIELD_LOC(159, 128)
|
||||
#define SRQC_WQE_BT_BA_H SRQC_FIELD_LOC(188, 160)
|
||||
#define SRQC_RSV2 SRQC_FIELD_LOC(191, 189)
|
||||
#define SRQC_RSV2 SRQC_FIELD_LOC(190, 189)
|
||||
#define SRQC_SRQ_TYPE SRQC_FIELD_LOC(191, 191)
|
||||
#define SRQC_PD SRQC_FIELD_LOC(215, 192)
|
||||
#define SRQC_RQWS SRQC_FIELD_LOC(219, 216)
|
||||
#define SRQC_RSV3 SRQC_FIELD_LOC(223, 220)
|
||||
|
@ -572,6 +588,10 @@ struct hns_roce_v2_qp_context {
|
|||
struct hns_roce_v2_qp_context_ex ext;
|
||||
};
|
||||
|
||||
#define QPC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context, h, l)
|
||||
|
||||
#define QPC_CONG_ALGO_TMPL_ID QPC_FIELD_LOC(455, 448)
|
||||
|
||||
#define V2_QPC_BYTE_4_TST_S 0
|
||||
#define V2_QPC_BYTE_4_TST_M GENMASK(2, 0)
|
||||
|
||||
|
@ -663,9 +683,6 @@ struct hns_roce_v2_qp_context {
|
|||
#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
|
||||
#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
|
||||
|
||||
#define V2_QPC_BYTE_60_TEMPID_S 0
|
||||
#define V2_QPC_BYTE_60_TEMPID_M GENMASK(7, 0)
|
||||
|
||||
#define V2_QPC_BYTE_60_SCC_TOKEN_S 8
|
||||
#define V2_QPC_BYTE_60_SCC_TOKEN_M GENMASK(26, 8)
|
||||
|
||||
|
@ -698,6 +715,8 @@ struct hns_roce_v2_qp_context {
|
|||
#define V2_QPC_BYTE_80_RX_CQN_S 0
|
||||
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
|
||||
|
||||
#define V2_QPC_BYTE_80_XRC_QP_TYPE_S 24
|
||||
|
||||
#define V2_QPC_BYTE_80_MIN_RNR_TIME_S 27
|
||||
#define V2_QPC_BYTE_80_MIN_RNR_TIME_M GENMASK(31, 27)
|
||||
|
||||
|
@ -940,6 +959,10 @@ struct hns_roce_v2_qp_context {
|
|||
|
||||
#define QPCEX_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context_ex, h, l)
|
||||
|
||||
#define QPCEX_CONG_ALG_SEL QPCEX_FIELD_LOC(0, 0)
|
||||
#define QPCEX_CONG_ALG_SUB_SEL QPCEX_FIELD_LOC(1, 1)
|
||||
#define QPCEX_DIP_CTX_IDX_VLD QPCEX_FIELD_LOC(2, 2)
|
||||
#define QPCEX_DIP_CTX_IDX QPCEX_FIELD_LOC(22, 3)
|
||||
#define QPCEX_STASH QPCEX_FIELD_LOC(82, 82)
|
||||
|
||||
#define V2_QP_RWE_S 1 /* rdma write enable */
|
||||
|
@ -1130,33 +1153,27 @@ struct hns_roce_v2_mpt_entry {
|
|||
#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S 28
|
||||
#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M GENMASK(31, 28)
|
||||
|
||||
#define V2_DB_BYTE_4_TAG_S 0
|
||||
#define V2_DB_BYTE_4_TAG_M GENMASK(23, 0)
|
||||
#define V2_DB_TAG_S 0
|
||||
#define V2_DB_TAG_M GENMASK(23, 0)
|
||||
|
||||
#define V2_DB_BYTE_4_CMD_S 24
|
||||
#define V2_DB_BYTE_4_CMD_M GENMASK(27, 24)
|
||||
#define V2_DB_CMD_S 24
|
||||
#define V2_DB_CMD_M GENMASK(27, 24)
|
||||
|
||||
#define V2_DB_FLAG_S 31
|
||||
|
||||
#define V2_DB_PARAMETER_IDX_S 0
|
||||
#define V2_DB_PARAMETER_IDX_M GENMASK(15, 0)
|
||||
#define V2_DB_PRODUCER_IDX_S 0
|
||||
#define V2_DB_PRODUCER_IDX_M GENMASK(15, 0)
|
||||
|
||||
#define V2_DB_PARAMETER_SL_S 16
|
||||
#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
|
||||
#define V2_DB_SL_S 16
|
||||
#define V2_DB_SL_M GENMASK(18, 16)
|
||||
|
||||
#define V2_CQ_DB_BYTE_4_TAG_S 0
|
||||
#define V2_CQ_DB_BYTE_4_TAG_M GENMASK(23, 0)
|
||||
#define V2_CQ_DB_CONS_IDX_S 0
|
||||
#define V2_CQ_DB_CONS_IDX_M GENMASK(23, 0)
|
||||
|
||||
#define V2_CQ_DB_BYTE_4_CMD_S 24
|
||||
#define V2_CQ_DB_BYTE_4_CMD_M GENMASK(27, 24)
|
||||
#define V2_CQ_DB_NOTIFY_TYPE_S 24
|
||||
|
||||
#define V2_CQ_DB_PARAMETER_CONS_IDX_S 0
|
||||
#define V2_CQ_DB_PARAMETER_CONS_IDX_M GENMASK(23, 0)
|
||||
|
||||
#define V2_CQ_DB_PARAMETER_CMD_SN_S 25
|
||||
#define V2_CQ_DB_PARAMETER_CMD_SN_M GENMASK(26, 25)
|
||||
|
||||
#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
|
||||
#define V2_CQ_DB_CMD_SN_S 25
|
||||
#define V2_CQ_DB_CMD_SN_M GENMASK(26, 25)
|
||||
|
||||
struct hns_roce_v2_ud_send_wqe {
|
||||
__le32 byte_4;
|
||||
|
@ -1359,194 +1376,44 @@ struct hns_roce_cfg_llm_b {
|
|||
#define CFG_LLM_TAIL_PTR_S 0
|
||||
#define CFG_LLM_TAIL_PTR_M GENMASK(11, 0)
|
||||
|
||||
struct hns_roce_cfg_global_param {
|
||||
__le32 time_cfg_udp_port;
|
||||
__le32 rsv[5];
|
||||
};
|
||||
/* Fields of HNS_ROCE_OPC_CFG_GLOBAL_PARAM */
|
||||
#define CFG_GLOBAL_PARAM_1US_CYCLES CMQ_REQ_FIELD_LOC(9, 0)
|
||||
#define CFG_GLOBAL_PARAM_UDP_PORT CMQ_REQ_FIELD_LOC(31, 16)
|
||||
|
||||
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S 0
|
||||
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M GENMASK(9, 0)
|
||||
/*
|
||||
* Fields of HNS_ROCE_OPC_QUERY_PF_RES, HNS_ROCE_OPC_QUERY_VF_RES
|
||||
* and HNS_ROCE_OPC_ALLOC_VF_RES
|
||||
*/
|
||||
#define FUNC_RES_A_VF_ID CMQ_REQ_FIELD_LOC(7, 0)
|
||||
#define FUNC_RES_A_QPC_BT_IDX CMQ_REQ_FIELD_LOC(42, 32)
|
||||
#define FUNC_RES_A_QPC_BT_NUM CMQ_REQ_FIELD_LOC(59, 48)
|
||||
#define FUNC_RES_A_SRQC_BT_IDX CMQ_REQ_FIELD_LOC(72, 64)
|
||||
#define FUNC_RES_A_SRQC_BT_NUM CMQ_REQ_FIELD_LOC(89, 80)
|
||||
#define FUNC_RES_A_CQC_BT_IDX CMQ_REQ_FIELD_LOC(104, 96)
|
||||
#define FUNC_RES_A_CQC_BT_NUM CMQ_REQ_FIELD_LOC(121, 112)
|
||||
#define FUNC_RES_A_MPT_BT_IDX CMQ_REQ_FIELD_LOC(136, 128)
|
||||
#define FUNC_RES_A_MPT_BT_NUM CMQ_REQ_FIELD_LOC(153, 144)
|
||||
#define FUNC_RES_A_EQC_BT_IDX CMQ_REQ_FIELD_LOC(168, 160)
|
||||
#define FUNC_RES_A_EQC_BT_NUM CMQ_REQ_FIELD_LOC(185, 176)
|
||||
#define FUNC_RES_B_SMAC_IDX CMQ_REQ_FIELD_LOC(39, 32)
|
||||
#define FUNC_RES_B_SMAC_NUM CMQ_REQ_FIELD_LOC(48, 40)
|
||||
#define FUNC_RES_B_SGID_IDX CMQ_REQ_FIELD_LOC(71, 64)
|
||||
#define FUNC_RES_B_SGID_NUM CMQ_REQ_FIELD_LOC(80, 72)
|
||||
#define FUNC_RES_B_QID_IDX CMQ_REQ_FIELD_LOC(105, 96)
|
||||
#define FUNC_RES_B_QID_NUM CMQ_REQ_FIELD_LOC(122, 112)
|
||||
#define FUNC_RES_V_QID_NUM CMQ_REQ_FIELD_LOC(115, 112)
|
||||
|
||||
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S 16
|
||||
#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M GENMASK(31, 16)
|
||||
#define FUNC_RES_B_SCCC_BT_IDX CMQ_REQ_FIELD_LOC(136, 128)
|
||||
#define FUNC_RES_B_SCCC_BT_NUM CMQ_REQ_FIELD_LOC(145, 137)
|
||||
#define FUNC_RES_B_GMV_BT_IDX CMQ_REQ_FIELD_LOC(167, 160)
|
||||
#define FUNC_RES_B_GMV_BT_NUM CMQ_REQ_FIELD_LOC(176, 168)
|
||||
#define FUNC_RES_V_GMV_BT_NUM CMQ_REQ_FIELD_LOC(184, 176)
|
||||
|
||||
struct hns_roce_pf_res_a {
|
||||
__le32 rsv;
|
||||
__le32 qpc_bt_idx_num;
|
||||
__le32 srqc_bt_idx_num;
|
||||
__le32 cqc_bt_idx_num;
|
||||
__le32 mpt_bt_idx_num;
|
||||
__le32 eqc_bt_idx_num;
|
||||
};
|
||||
|
||||
#define PF_RES_DATA_1_PF_QPC_BT_IDX_S 0
|
||||
#define PF_RES_DATA_1_PF_QPC_BT_IDX_M GENMASK(10, 0)
|
||||
|
||||
#define PF_RES_DATA_1_PF_QPC_BT_NUM_S 16
|
||||
#define PF_RES_DATA_1_PF_QPC_BT_NUM_M GENMASK(27, 16)
|
||||
|
||||
#define PF_RES_DATA_2_PF_SRQC_BT_IDX_S 0
|
||||
#define PF_RES_DATA_2_PF_SRQC_BT_IDX_M GENMASK(8, 0)
|
||||
|
||||
#define PF_RES_DATA_2_PF_SRQC_BT_NUM_S 16
|
||||
#define PF_RES_DATA_2_PF_SRQC_BT_NUM_M GENMASK(25, 16)
|
||||
|
||||
#define PF_RES_DATA_3_PF_CQC_BT_IDX_S 0
|
||||
#define PF_RES_DATA_3_PF_CQC_BT_IDX_M GENMASK(8, 0)
|
||||
|
||||
#define PF_RES_DATA_3_PF_CQC_BT_NUM_S 16
|
||||
#define PF_RES_DATA_3_PF_CQC_BT_NUM_M GENMASK(25, 16)
|
||||
|
||||
#define PF_RES_DATA_4_PF_MPT_BT_IDX_S 0
|
||||
#define PF_RES_DATA_4_PF_MPT_BT_IDX_M GENMASK(8, 0)
|
||||
|
||||
#define PF_RES_DATA_4_PF_MPT_BT_NUM_S 16
|
||||
#define PF_RES_DATA_4_PF_MPT_BT_NUM_M GENMASK(25, 16)
|
||||
|
||||
#define PF_RES_DATA_5_PF_EQC_BT_IDX_S 0
|
||||
#define PF_RES_DATA_5_PF_EQC_BT_IDX_M GENMASK(8, 0)
|
||||
|
||||
#define PF_RES_DATA_5_PF_EQC_BT_NUM_S 16
|
||||
#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16)
|
||||
|
||||
struct hns_roce_pf_res_b {
|
||||
__le32 rsv0;
|
||||
__le32 smac_idx_num;
|
||||
__le32 sgid_idx_num;
|
||||
__le32 qid_idx_sl_num;
|
||||
__le32 sccc_bt_idx_num;
|
||||
__le32 gmv_idx_num;
|
||||
};
|
||||
|
||||
#define PF_RES_DATA_1_PF_SMAC_IDX_S 0
|
||||
#define PF_RES_DATA_1_PF_SMAC_IDX_M GENMASK(7, 0)
|
||||
|
||||
#define PF_RES_DATA_1_PF_SMAC_NUM_S 8
|
||||
#define PF_RES_DATA_1_PF_SMAC_NUM_M GENMASK(16, 8)
|
||||
|
||||
#define PF_RES_DATA_2_PF_SGID_IDX_S 0
|
||||
#define PF_RES_DATA_2_PF_SGID_IDX_M GENMASK(7, 0)
|
||||
|
||||
#define PF_RES_DATA_2_PF_SGID_NUM_S 8
|
||||
#define PF_RES_DATA_2_PF_SGID_NUM_M GENMASK(16, 8)
|
||||
|
||||
#define PF_RES_DATA_3_PF_QID_IDX_S 0
|
||||
#define PF_RES_DATA_3_PF_QID_IDX_M GENMASK(9, 0)
|
||||
|
||||
#define PF_RES_DATA_3_PF_SL_NUM_S 16
|
||||
#define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16)
|
||||
|
||||
#define PF_RES_DATA_4_PF_SCCC_BT_IDX_S 0
|
||||
#define PF_RES_DATA_4_PF_SCCC_BT_IDX_M GENMASK(8, 0)
|
||||
|
||||
#define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9
|
||||
#define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9)
|
||||
|
||||
#define PF_RES_DATA_5_PF_GMV_BT_IDX_S 0
|
||||
#define PF_RES_DATA_5_PF_GMV_BT_IDX_M GENMASK(7, 0)
|
||||
|
||||
#define PF_RES_DATA_5_PF_GMV_BT_NUM_S 8
|
||||
#define PF_RES_DATA_5_PF_GMV_BT_NUM_M GENMASK(16, 8)
|
||||
|
||||
struct hns_roce_pf_timer_res_a {
|
||||
__le32 rsv0;
|
||||
__le32 qpc_timer_bt_idx_num;
|
||||
__le32 cqc_timer_bt_idx_num;
|
||||
__le32 rsv[3];
|
||||
};
|
||||
|
||||
#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_S 0
|
||||
#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_M GENMASK(11, 0)
|
||||
|
||||
#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S 16
|
||||
#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M GENMASK(28, 16)
|
||||
|
||||
#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_S 0
|
||||
#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_M GENMASK(10, 0)
|
||||
|
||||
#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S 16
|
||||
#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M GENMASK(27, 16)
|
||||
|
||||
struct hns_roce_vf_res_a {
|
||||
__le32 vf_id;
|
||||
__le32 vf_qpc_bt_idx_num;
|
||||
__le32 vf_srqc_bt_idx_num;
|
||||
__le32 vf_cqc_bt_idx_num;
|
||||
__le32 vf_mpt_bt_idx_num;
|
||||
__le32 vf_eqc_bt_idx_num;
|
||||
};
|
||||
|
||||
#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_S 0
|
||||
#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_M GENMASK(10, 0)
|
||||
|
||||
#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_S 16
|
||||
#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_M GENMASK(27, 16)
|
||||
|
||||
#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S 0
|
||||
#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M GENMASK(8, 0)
|
||||
|
||||
#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S 16
|
||||
#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M GENMASK(25, 16)
|
||||
|
||||
#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_S 0
|
||||
#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_M GENMASK(8, 0)
|
||||
|
||||
#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_S 16
|
||||
#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_M GENMASK(25, 16)
|
||||
|
||||
#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_S 0
|
||||
#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_M GENMASK(8, 0)
|
||||
|
||||
#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_S 16
|
||||
#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_M GENMASK(25, 16)
|
||||
|
||||
#define VF_RES_A_DATA_5_VF_EQC_IDX_S 0
|
||||
#define VF_RES_A_DATA_5_VF_EQC_IDX_M GENMASK(8, 0)
|
||||
|
||||
#define VF_RES_A_DATA_5_VF_EQC_NUM_S 16
|
||||
#define VF_RES_A_DATA_5_VF_EQC_NUM_M GENMASK(25, 16)
|
||||
|
||||
struct hns_roce_vf_res_b {
|
||||
__le32 rsv0;
|
||||
__le32 vf_smac_idx_num;
|
||||
__le32 vf_sgid_idx_num;
|
||||
__le32 vf_qid_idx_sl_num;
|
||||
__le32 vf_sccc_idx_num;
|
||||
__le32 vf_gmv_idx_num;
|
||||
};
|
||||
|
||||
#define VF_RES_B_DATA_0_VF_ID_S 0
|
||||
#define VF_RES_B_DATA_0_VF_ID_M GENMASK(7, 0)
|
||||
|
||||
#define VF_RES_B_DATA_1_VF_SMAC_IDX_S 0
|
||||
#define VF_RES_B_DATA_1_VF_SMAC_IDX_M GENMASK(7, 0)
|
||||
|
||||
#define VF_RES_B_DATA_1_VF_SMAC_NUM_S 8
|
||||
#define VF_RES_B_DATA_1_VF_SMAC_NUM_M GENMASK(16, 8)
|
||||
|
||||
#define VF_RES_B_DATA_2_VF_SGID_IDX_S 0
|
||||
#define VF_RES_B_DATA_2_VF_SGID_IDX_M GENMASK(7, 0)
|
||||
|
||||
#define VF_RES_B_DATA_2_VF_SGID_NUM_S 8
|
||||
#define VF_RES_B_DATA_2_VF_SGID_NUM_M GENMASK(16, 8)
|
||||
|
||||
#define VF_RES_B_DATA_3_VF_QID_IDX_S 0
|
||||
#define VF_RES_B_DATA_3_VF_QID_IDX_M GENMASK(9, 0)
|
||||
|
||||
#define VF_RES_B_DATA_3_VF_SL_NUM_S 16
|
||||
#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
|
||||
|
||||
#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S 0
|
||||
#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M GENMASK(8, 0)
|
||||
|
||||
#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9
|
||||
#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9)
|
||||
|
||||
#define VF_RES_B_DATA_5_VF_GMV_BT_IDX_S 0
|
||||
#define VF_RES_B_DATA_5_VF_GMV_BT_IDX_M GENMASK(7, 0)
|
||||
|
||||
#define VF_RES_B_DATA_5_VF_GMV_BT_NUM_S 16
|
||||
#define VF_RES_B_DATA_5_VF_GMV_BT_NUM_M GENMASK(24, 16)
|
||||
/* Fields of HNS_ROCE_OPC_QUERY_PF_TIMER_RES */
|
||||
#define PF_TIMER_RES_QPC_ITEM_IDX CMQ_REQ_FIELD_LOC(43, 32)
|
||||
#define PF_TIMER_RES_QPC_ITEM_NUM CMQ_REQ_FIELD_LOC(60, 48)
|
||||
#define PF_TIMER_RES_CQC_ITEM_IDX CMQ_REQ_FIELD_LOC(74, 64)
|
||||
#define PF_TIMER_RES_CQC_ITEM_NUM CMQ_REQ_FIELD_LOC(91, 80)
|
||||
|
||||
struct hns_roce_vf_switch {
|
||||
__le32 rocee_sel;
|
||||
|
@ -1578,59 +1445,43 @@ struct hns_roce_mbox_status {
|
|||
__le32 rsv[5];
|
||||
};
|
||||
|
||||
struct hns_roce_cfg_bt_attr {
|
||||
__le32 vf_qpc_cfg;
|
||||
__le32 vf_srqc_cfg;
|
||||
__le32 vf_cqc_cfg;
|
||||
__le32 vf_mpt_cfg;
|
||||
__le32 vf_sccc_cfg;
|
||||
__le32 rsv;
|
||||
#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
|
||||
|
||||
#define MB_ST_HW_RUN_M BIT(31)
|
||||
#define MB_ST_COMPLETE_M GENMASK(7, 0)
|
||||
|
||||
#define MB_ST_COMPLETE_SUCC 1
|
||||
|
||||
/* Fields of HNS_ROCE_OPC_CFG_BT_ATTR */
|
||||
#define CFG_BT_ATTR_QPC_BA_PGSZ CMQ_REQ_FIELD_LOC(3, 0)
|
||||
#define CFG_BT_ATTR_QPC_BUF_PGSZ CMQ_REQ_FIELD_LOC(7, 4)
|
||||
#define CFG_BT_ATTR_QPC_HOPNUM CMQ_REQ_FIELD_LOC(9, 8)
|
||||
#define CFG_BT_ATTR_SRQC_BA_PGSZ CMQ_REQ_FIELD_LOC(35, 32)
|
||||
#define CFG_BT_ATTR_SRQC_BUF_PGSZ CMQ_REQ_FIELD_LOC(39, 36)
|
||||
#define CFG_BT_ATTR_SRQC_HOPNUM CMQ_REQ_FIELD_LOC(41, 40)
|
||||
#define CFG_BT_ATTR_CQC_BA_PGSZ CMQ_REQ_FIELD_LOC(67, 64)
|
||||
#define CFG_BT_ATTR_CQC_BUF_PGSZ CMQ_REQ_FIELD_LOC(71, 68)
|
||||
#define CFG_BT_ATTR_CQC_HOPNUM CMQ_REQ_FIELD_LOC(73, 72)
|
||||
#define CFG_BT_ATTR_MPT_BA_PGSZ CMQ_REQ_FIELD_LOC(99, 96)
|
||||
#define CFG_BT_ATTR_MPT_BUF_PGSZ CMQ_REQ_FIELD_LOC(103, 100)
|
||||
#define CFG_BT_ATTR_MPT_HOPNUM CMQ_REQ_FIELD_LOC(105, 104)
|
||||
#define CFG_BT_ATTR_SCCC_BA_PGSZ CMQ_REQ_FIELD_LOC(131, 128)
|
||||
#define CFG_BT_ATTR_SCCC_BUF_PGSZ CMQ_REQ_FIELD_LOC(135, 132)
|
||||
#define CFG_BT_ATTR_SCCC_HOPNUM CMQ_REQ_FIELD_LOC(137, 136)
|
||||
|
||||
/* Fields of HNS_ROCE_OPC_CFG_ENTRY_SIZE */
|
||||
#define CFG_HEM_ENTRY_SIZE_TYPE CMQ_REQ_FIELD_LOC(31, 0)
|
||||
enum {
|
||||
HNS_ROCE_CFG_QPC_SIZE = BIT(0),
|
||||
HNS_ROCE_CFG_SCCC_SIZE = BIT(1),
|
||||
};
|
||||
|
||||
#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
|
||||
#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M GENMASK(3, 0)
|
||||
#define CFG_HEM_ENTRY_SIZE_VALUE CMQ_REQ_FIELD_LOC(191, 160)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S 4
|
||||
#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M GENMASK(7, 4)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S 8
|
||||
#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M GENMASK(9, 8)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S 0
|
||||
#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M GENMASK(3, 0)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S 4
|
||||
#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M GENMASK(7, 4)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S 8
|
||||
#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M GENMASK(9, 8)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S 0
|
||||
#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M GENMASK(3, 0)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S 4
|
||||
#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M GENMASK(7, 4)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S 8
|
||||
#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M GENMASK(9, 8)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S 0
|
||||
#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M GENMASK(3, 0)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S 4
|
||||
#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M GENMASK(7, 4)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
|
||||
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S 0
|
||||
#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M GENMASK(3, 0)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S 4
|
||||
#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M GENMASK(7, 4)
|
||||
|
||||
#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S 8
|
||||
#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M GENMASK(9, 8)
|
||||
/* Fields of HNS_ROCE_OPC_CFG_GMV_BT */
|
||||
#define CFG_GMV_BT_BA_L CMQ_REQ_FIELD_LOC(31, 0)
|
||||
#define CFG_GMV_BT_BA_H CMQ_REQ_FIELD_LOC(51, 32)
|
||||
#define CFG_GMV_BT_IDX CMQ_REQ_FIELD_LOC(95, 64)
|
||||
|
||||
struct hns_roce_cfg_sgid_tb {
|
||||
__le32 table_idx_rsv;
|
||||
|
@ -1641,17 +1492,6 @@ struct hns_roce_cfg_sgid_tb {
|
|||
__le32 vf_sgid_type_rsv;
|
||||
};
|
||||
|
||||
enum {
|
||||
HNS_ROCE_CFG_QPC_SIZE = BIT(0),
|
||||
HNS_ROCE_CFG_SCCC_SIZE = BIT(1),
|
||||
};
|
||||
|
||||
struct hns_roce_cfg_entry_size {
|
||||
__le32 type;
|
||||
__le32 rsv[4];
|
||||
__le32 size;
|
||||
};
|
||||
|
||||
#define CFG_SGID_TB_TABLE_IDX_S 0
|
||||
#define CFG_SGID_TB_TABLE_IDX_M GENMASK(7, 0)
|
||||
|
||||
|
@ -1670,16 +1510,6 @@ struct hns_roce_cfg_smac_tb {
|
|||
#define CFG_SMAC_TB_VF_SMAC_H_S 0
|
||||
#define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0)
|
||||
|
||||
struct hns_roce_cfg_gmv_bt {
|
||||
__le32 gmv_ba_l;
|
||||
__le32 gmv_ba_h;
|
||||
__le32 gmv_bt_idx;
|
||||
__le32 rsv[3];
|
||||
};
|
||||
|
||||
#define CFG_GMV_BA_H_S 0
|
||||
#define CFG_GMV_BA_H_M GENMASK(19, 0)
|
||||
|
||||
struct hns_roce_cfg_gmv_tb_a {
|
||||
__le32 vf_sgid_l;
|
||||
__le32 vf_sgid_ml;
|
||||
|
@ -1805,6 +1635,14 @@ struct hns_roce_query_pf_caps_d {
|
|||
#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S 24
|
||||
#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M GENMASK(25, 24)
|
||||
|
||||
#define V2_QUERY_PF_CAPS_D_CONG_TYPE_S 26
|
||||
#define V2_QUERY_PF_CAPS_D_CONG_TYPE_M GENMASK(29, 26)
|
||||
|
||||
struct hns_roce_congestion_algorithm {
|
||||
u8 alg_sel;
|
||||
u8 alg_sub_sel;
|
||||
u8 dip_vld;
|
||||
};
|
||||
|
||||
#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S 0
|
||||
#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M GENMASK(21, 0)
|
||||
|
@ -1859,19 +1697,28 @@ struct hns_roce_query_pf_caps_e {
|
|||
#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_S 0
|
||||
#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_M GENMASK(19, 0)
|
||||
|
||||
struct hns_roce_cmq_req {
|
||||
__le32 data[6];
|
||||
};
|
||||
|
||||
#define CMQ_REQ_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cmq_req, h, l)
|
||||
|
||||
struct hns_roce_cmq_desc {
|
||||
__le16 opcode;
|
||||
__le16 flag;
|
||||
__le16 retval;
|
||||
__le16 rsv;
|
||||
__le32 data[6];
|
||||
union {
|
||||
__le32 data[6];
|
||||
struct {
|
||||
__le32 own_func_num;
|
||||
__le32 own_mac_id;
|
||||
__le32 rsv[4];
|
||||
} func_info;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
|
||||
|
||||
#define HNS_ROCE_HW_RUN_BIT_SHIFT 31
|
||||
#define HNS_ROCE_HW_MB_STATUS_MASK 0xFF
|
||||
|
||||
struct hns_roce_v2_cmq_ring {
|
||||
dma_addr_t desc_dma_addr;
|
||||
struct hns_roce_cmq_desc *desc;
|
||||
|
@ -1932,6 +1779,12 @@ struct hns_roce_eq_context {
|
|||
__le32 rsv[5];
|
||||
};
|
||||
|
||||
struct hns_roce_dip {
|
||||
u8 dgid[GID_LEN_V2];
|
||||
u8 dip_idx;
|
||||
struct list_head node; /* all dips are on a list */
|
||||
};
|
||||
|
||||
#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
|
||||
#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
|
||||
#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
|
||||
|
@ -1966,8 +1819,7 @@ struct hns_roce_eq_context {
|
|||
#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000
|
||||
|
||||
#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
|
||||
#define HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S 1
|
||||
#define HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S 2
|
||||
#define HNS_ROCE_V2_VF_INT_ST_RAS_INT_S 1
|
||||
|
||||
#define HNS_ROCE_EQ_DB_CMD_AEQ 0x0
|
||||
#define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1
|
||||
|
@ -1982,96 +1834,38 @@ struct hns_roce_eq_context {
|
|||
#define HNS_ROCE_INT_NAME_LEN 32
|
||||
#define HNS_ROCE_V2_EQN_M GENMASK(23, 0)
|
||||
|
||||
#define HNS_ROCE_V2_CONS_IDX_M GENMASK(23, 0)
|
||||
|
||||
#define HNS_ROCE_V2_VF_ABN_INT_EN_S 0
|
||||
#define HNS_ROCE_V2_VF_ABN_INT_EN_M GENMASK(0, 0)
|
||||
#define HNS_ROCE_V2_VF_ABN_INT_ST_M GENMASK(2, 0)
|
||||
#define HNS_ROCE_V2_VF_ABN_INT_CFG_M GENMASK(2, 0)
|
||||
#define HNS_ROCE_V2_VF_EVENT_INT_EN_M GENMASK(0, 0)
|
||||
|
||||
/* WORD0 */
|
||||
#define HNS_ROCE_EQC_EQ_ST_S 0
|
||||
#define HNS_ROCE_EQC_EQ_ST_M GENMASK(1, 0)
|
||||
#define EQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_eq_context, h, l)
|
||||
|
||||
#define HNS_ROCE_EQC_HOP_NUM_S 2
|
||||
#define HNS_ROCE_EQC_HOP_NUM_M GENMASK(3, 2)
|
||||
|
||||
#define HNS_ROCE_EQC_OVER_IGNORE_S 4
|
||||
#define HNS_ROCE_EQC_OVER_IGNORE_M GENMASK(4, 4)
|
||||
|
||||
#define HNS_ROCE_EQC_COALESCE_S 5
|
||||
#define HNS_ROCE_EQC_COALESCE_M GENMASK(5, 5)
|
||||
|
||||
#define HNS_ROCE_EQC_ARM_ST_S 6
|
||||
#define HNS_ROCE_EQC_ARM_ST_M GENMASK(7, 6)
|
||||
|
||||
#define HNS_ROCE_EQC_EQN_S 8
|
||||
#define HNS_ROCE_EQC_EQN_M GENMASK(15, 8)
|
||||
|
||||
#define HNS_ROCE_EQC_EQE_CNT_S 16
|
||||
#define HNS_ROCE_EQC_EQE_CNT_M GENMASK(31, 16)
|
||||
|
||||
/* WORD1 */
|
||||
#define HNS_ROCE_EQC_BA_PG_SZ_S 0
|
||||
#define HNS_ROCE_EQC_BA_PG_SZ_M GENMASK(3, 0)
|
||||
|
||||
#define HNS_ROCE_EQC_BUF_PG_SZ_S 4
|
||||
#define HNS_ROCE_EQC_BUF_PG_SZ_M GENMASK(7, 4)
|
||||
|
||||
#define HNS_ROCE_EQC_PROD_INDX_S 8
|
||||
#define HNS_ROCE_EQC_PROD_INDX_M GENMASK(31, 8)
|
||||
|
||||
/* WORD2 */
|
||||
#define HNS_ROCE_EQC_MAX_CNT_S 0
|
||||
#define HNS_ROCE_EQC_MAX_CNT_M GENMASK(15, 0)
|
||||
|
||||
#define HNS_ROCE_EQC_PERIOD_S 16
|
||||
#define HNS_ROCE_EQC_PERIOD_M GENMASK(31, 16)
|
||||
|
||||
/* WORD3 */
|
||||
#define HNS_ROCE_EQC_REPORT_TIMER_S 0
|
||||
#define HNS_ROCE_EQC_REPORT_TIMER_M GENMASK(31, 0)
|
||||
|
||||
/* WORD4 */
|
||||
#define HNS_ROCE_EQC_EQE_BA_L_S 0
|
||||
#define HNS_ROCE_EQC_EQE_BA_L_M GENMASK(31, 0)
|
||||
|
||||
/* WORD5 */
|
||||
#define HNS_ROCE_EQC_EQE_BA_H_S 0
|
||||
#define HNS_ROCE_EQC_EQE_BA_H_M GENMASK(28, 0)
|
||||
|
||||
/* WORD6 */
|
||||
#define HNS_ROCE_EQC_SHIFT_S 0
|
||||
#define HNS_ROCE_EQC_SHIFT_M GENMASK(7, 0)
|
||||
|
||||
#define HNS_ROCE_EQC_MSI_INDX_S 8
|
||||
#define HNS_ROCE_EQC_MSI_INDX_M GENMASK(15, 8)
|
||||
|
||||
#define HNS_ROCE_EQC_CUR_EQE_BA_L_S 16
|
||||
#define HNS_ROCE_EQC_CUR_EQE_BA_L_M GENMASK(31, 16)
|
||||
|
||||
/* WORD7 */
|
||||
#define HNS_ROCE_EQC_CUR_EQE_BA_M_S 0
|
||||
#define HNS_ROCE_EQC_CUR_EQE_BA_M_M GENMASK(31, 0)
|
||||
|
||||
/* WORD8 */
|
||||
#define HNS_ROCE_EQC_CUR_EQE_BA_H_S 0
|
||||
#define HNS_ROCE_EQC_CUR_EQE_BA_H_M GENMASK(3, 0)
|
||||
|
||||
#define HNS_ROCE_EQC_CONS_INDX_S 8
|
||||
#define HNS_ROCE_EQC_CONS_INDX_M GENMASK(31, 8)
|
||||
|
||||
/* WORD9 */
|
||||
#define HNS_ROCE_EQC_NXT_EQE_BA_L_S 0
|
||||
#define HNS_ROCE_EQC_NXT_EQE_BA_L_M GENMASK(31, 0)
|
||||
|
||||
/* WORD10 */
|
||||
#define HNS_ROCE_EQC_NXT_EQE_BA_H_S 0
|
||||
#define HNS_ROCE_EQC_NXT_EQE_BA_H_M GENMASK(19, 0)
|
||||
|
||||
#define HNS_ROCE_EQC_EQE_SIZE_S 20
|
||||
#define HNS_ROCE_EQC_EQE_SIZE_M GENMASK(21, 20)
|
||||
#define EQC_EQ_ST EQC_FIELD_LOC(1, 0)
|
||||
#define EQC_EQE_HOP_NUM EQC_FIELD_LOC(3, 2)
|
||||
#define EQC_OVER_IGNORE EQC_FIELD_LOC(4, 4)
|
||||
#define EQC_COALESCE EQC_FIELD_LOC(5, 5)
|
||||
#define EQC_ARM_ST EQC_FIELD_LOC(7, 6)
|
||||
#define EQC_EQN EQC_FIELD_LOC(15, 8)
|
||||
#define EQC_EQE_CNT EQC_FIELD_LOC(31, 16)
|
||||
#define EQC_EQE_BA_PG_SZ EQC_FIELD_LOC(35, 32)
|
||||
#define EQC_EQE_BUF_PG_SZ EQC_FIELD_LOC(39, 36)
|
||||
#define EQC_EQ_PROD_INDX EQC_FIELD_LOC(63, 40)
|
||||
#define EQC_EQ_MAX_CNT EQC_FIELD_LOC(79, 64)
|
||||
#define EQC_EQ_PERIOD EQC_FIELD_LOC(95, 80)
|
||||
#define EQC_EQE_REPORT_TIMER EQC_FIELD_LOC(127, 96)
|
||||
#define EQC_EQE_BA_L EQC_FIELD_LOC(159, 128)
|
||||
#define EQC_EQE_BA_H EQC_FIELD_LOC(188, 160)
|
||||
#define EQC_SHIFT EQC_FIELD_LOC(199, 192)
|
||||
#define EQC_MSI_INDX EQC_FIELD_LOC(207, 200)
|
||||
#define EQC_CUR_EQE_BA_L EQC_FIELD_LOC(223, 208)
|
||||
#define EQC_CUR_EQE_BA_M EQC_FIELD_LOC(255, 224)
|
||||
#define EQC_CUR_EQE_BA_H EQC_FIELD_LOC(259, 256)
|
||||
#define EQC_EQ_CONS_INDX EQC_FIELD_LOC(287, 264)
|
||||
#define EQC_NEX_EQE_BA_L EQC_FIELD_LOC(319, 288)
|
||||
#define EQC_NEX_EQE_BA_H EQC_FIELD_LOC(339, 320)
|
||||
#define EQC_EQE_SIZE EQC_FIELD_LOC(341, 340)
|
||||
|
||||
#define HNS_ROCE_V2_CEQE_COMP_CQN_S 0
|
||||
#define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0)
|
||||
|
@ -2082,14 +1876,14 @@ struct hns_roce_eq_context {
|
|||
#define HNS_ROCE_V2_AEQE_SUB_TYPE_S 8
|
||||
#define HNS_ROCE_V2_AEQE_SUB_TYPE_M GENMASK(15, 8)
|
||||
|
||||
#define HNS_ROCE_V2_EQ_DB_CMD_S 16
|
||||
#define HNS_ROCE_V2_EQ_DB_CMD_M GENMASK(17, 16)
|
||||
#define V2_EQ_DB_TAG_S 0
|
||||
#define V2_EQ_DB_TAG_M GENMASK(7, 0)
|
||||
|
||||
#define HNS_ROCE_V2_EQ_DB_TAG_S 0
|
||||
#define HNS_ROCE_V2_EQ_DB_TAG_M GENMASK(7, 0)
|
||||
#define V2_EQ_DB_CMD_S 16
|
||||
#define V2_EQ_DB_CMD_M GENMASK(17, 16)
|
||||
|
||||
#define HNS_ROCE_V2_EQ_DB_PARA_S 0
|
||||
#define HNS_ROCE_V2_EQ_DB_PARA_M GENMASK(23, 0)
|
||||
#define V2_EQ_DB_CONS_IDX_S 0
|
||||
#define V2_EQ_DB_CONS_IDX_M GENMASK(23, 0)
|
||||
|
||||
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
|
||||
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
#include "hns_roce_device.h"
|
||||
#include "hns_roce_hem.h"
|
||||
|
||||
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
|
||||
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, u8 *addr)
|
||||
{
|
||||
u8 phy_port;
|
||||
u32 i;
|
||||
|
@ -63,7 +63,7 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
|
|||
static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
|
||||
u8 port = attr->port_num - 1;
|
||||
u32 port = attr->port_num - 1;
|
||||
int ret;
|
||||
|
||||
if (port >= hr_dev->caps.num_ports)
|
||||
|
@ -77,7 +77,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
|
|||
static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
|
||||
u8 port = attr->port_num - 1;
|
||||
u32 port = attr->port_num - 1;
|
||||
int ret;
|
||||
|
||||
if (port >= hr_dev->caps.num_ports)
|
||||
|
@ -88,7 +88,7 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
|
||||
static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
|
||||
unsigned long event)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
|
@ -128,7 +128,7 @@ static int hns_roce_netdev_event(struct notifier_block *self,
|
|||
struct hns_roce_ib_iboe *iboe = NULL;
|
||||
struct hns_roce_dev *hr_dev = NULL;
|
||||
int ret;
|
||||
u8 port;
|
||||
u32 port;
|
||||
|
||||
hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
|
||||
iboe = &hr_dev->iboe;
|
||||
|
@ -207,10 +207,13 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
|
|||
props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
|
||||
}
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
|
||||
props->device_cap_flags |= IB_DEVICE_XRC;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
|
||||
static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
|
||||
struct ib_port_attr *props)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
|
||||
|
@ -218,7 +221,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
|
|||
struct net_device *net_dev;
|
||||
unsigned long flags;
|
||||
enum ib_mtu mtu;
|
||||
u8 port;
|
||||
u32 port;
|
||||
|
||||
port = port_num - 1;
|
||||
|
||||
|
@ -258,12 +261,12 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
|
|||
}
|
||||
|
||||
static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
|
||||
u8 port_num)
|
||||
u32 port_num)
|
||||
{
|
||||
return IB_LINK_LAYER_ETHERNET;
|
||||
}
|
||||
|
||||
static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
|
||||
static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
|
||||
u16 *pkey)
|
||||
{
|
||||
*pkey = PKEY_ID;
|
||||
|
@ -300,12 +303,14 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
|
|||
return -EAGAIN;
|
||||
|
||||
resp.qp_tab_size = hr_dev->caps.num_qps;
|
||||
resp.srq_tab_size = hr_dev->caps.num_srqs;
|
||||
|
||||
ret = hns_roce_uar_alloc(hr_dev, &context->uar);
|
||||
if (ret)
|
||||
goto error_fail_uar_alloc;
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
|
||||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
|
||||
INIT_LIST_HEAD(&context->page_list);
|
||||
mutex_init(&context->page_mutex);
|
||||
}
|
||||
|
@ -365,7 +370,7 @@ static int hns_roce_mmap(struct ib_ucontext *context,
|
|||
}
|
||||
}
|
||||
|
||||
static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
|
||||
static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
|
||||
struct ib_port_immutable *immutable)
|
||||
{
|
||||
struct ib_port_attr attr;
|
||||
|
@ -390,6 +395,19 @@ static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
|||
{
|
||||
}
|
||||
|
||||
static void hns_roce_get_fw_ver(struct ib_device *device, char *str)
|
||||
{
|
||||
u64 fw_ver = to_hr_dev(device)->caps.fw_ver;
|
||||
unsigned int major, minor, sub_minor;
|
||||
|
||||
major = upper_32_bits(fw_ver);
|
||||
minor = high_16_bits(lower_32_bits(fw_ver));
|
||||
sub_minor = low_16_bits(fw_ver);
|
||||
|
||||
snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%04u", major, minor,
|
||||
sub_minor);
|
||||
}
|
||||
|
||||
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
|
||||
|
@ -405,6 +423,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
|
|||
.uverbs_abi_ver = 1,
|
||||
.uverbs_no_driver_id_binding = 1,
|
||||
|
||||
.get_dev_fw_str = hns_roce_get_fw_ver,
|
||||
.add_gid = hns_roce_add_gid,
|
||||
.alloc_pd = hns_roce_alloc_pd,
|
||||
.alloc_ucontext = hns_roce_alloc_ucontext,
|
||||
|
@ -461,6 +480,13 @@ static const struct ib_device_ops hns_roce_dev_srq_ops = {
|
|||
INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
|
||||
};
|
||||
|
||||
static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
|
||||
.alloc_xrcd = hns_roce_alloc_xrcd,
|
||||
.dealloc_xrcd = hns_roce_dealloc_xrcd,
|
||||
|
||||
INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
|
||||
};
|
||||
|
||||
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
int ret;
|
||||
|
@ -484,20 +510,20 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
|
|||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR)
|
||||
ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
|
||||
|
||||
/* MW */
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW)
|
||||
ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
|
||||
|
||||
/* FRMR */
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
|
||||
ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
|
||||
|
||||
/* SRQ */
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
|
||||
ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
|
||||
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
|
||||
}
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
|
||||
ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
|
||||
|
||||
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
|
||||
ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
|
||||
for (i = 0; i < hr_dev->caps.num_ports; i++) {
|
||||
|
@ -704,7 +730,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
|
|||
spin_lock_init(&hr_dev->sm_lock);
|
||||
spin_lock_init(&hr_dev->bt_cmd_lock);
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
|
||||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
|
||||
INIT_LIST_HEAD(&hr_dev->pgdir_list);
|
||||
mutex_init(&hr_dev->pgdir_mutex);
|
||||
}
|
||||
|
@ -727,10 +754,19 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
|
|||
goto err_uar_alloc_free;
|
||||
}
|
||||
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
|
||||
ret = hns_roce_init_xrcd_table(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to init xrcd table, ret = %d.\n",
|
||||
ret);
|
||||
goto err_pd_table_free;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hns_roce_init_mr_table(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to init memory region table.\n");
|
||||
goto err_pd_table_free;
|
||||
goto err_xrcd_table_free;
|
||||
}
|
||||
|
||||
hns_roce_init_cq_table(hr_dev);
|
||||
|
@ -759,6 +795,10 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
|
|||
hns_roce_cleanup_cq_table(hr_dev);
|
||||
hns_roce_cleanup_mr_table(hr_dev);
|
||||
|
||||
err_xrcd_table_free:
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
|
||||
hns_roce_cleanup_xrcd_table(hr_dev);
|
||||
|
||||
err_pd_table_free:
|
||||
hns_roce_cleanup_pd_table(hr_dev);
|
||||
|
||||
|
@ -886,6 +926,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
|
|||
|
||||
INIT_LIST_HEAD(&hr_dev->qp_list);
|
||||
spin_lock_init(&hr_dev->qp_list_lock);
|
||||
INIT_LIST_HEAD(&hr_dev->dip_list);
|
||||
spin_lock_init(&hr_dev->dip_list_lock);
|
||||
|
||||
ret = hns_roce_register_device(hr_dev);
|
||||
if (ret)
|
||||
|
|
|
@ -137,3 +137,62 @@ void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
|
|||
{
|
||||
hns_roce_bitmap_cleanup(&hr_dev->uar_table.bitmap);
|
||||
}
|
||||
|
||||
static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)
|
||||
{
|
||||
unsigned long obj;
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_bitmap_alloc(&hr_dev->xrcd_bitmap, &obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*xrcdn = obj;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hns_roce_xrcd_free(struct hns_roce_dev *hr_dev,
|
||||
u32 xrcdn)
|
||||
{
|
||||
hns_roce_bitmap_free(&hr_dev->xrcd_bitmap, xrcdn, BITMAP_NO_RR);
|
||||
}
|
||||
|
||||
int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
return hns_roce_bitmap_init(&hr_dev->xrcd_bitmap,
|
||||
hr_dev->caps.num_xrcds,
|
||||
hr_dev->caps.num_xrcds - 1,
|
||||
hr_dev->caps.reserved_xrcds, 0);
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
hns_roce_bitmap_cleanup(&hr_dev->xrcd_bitmap);
|
||||
}
|
||||
|
||||
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
|
||||
struct hns_roce_xrcd *xrcd = to_hr_xrcd(ib_xrcd);
|
||||
int ret;
|
||||
|
||||
if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
|
||||
return -EINVAL;
|
||||
|
||||
ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
|
||||
if (ret) {
|
||||
dev_err(hr_dev->dev, "failed to alloc xrcdn, ret = %d.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
|
||||
{
|
||||
hns_roce_xrcd_free(to_hr_dev(ib_xrcd->device),
|
||||
to_hr_xrcd(ib_xrcd)->xrcdn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -98,7 +98,9 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
|
|||
if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
|
||||
(event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
|
||||
event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
|
||||
event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
|
||||
qp->state = IB_QPS_ERR;
|
||||
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
|
||||
init_flush_work(hr_dev, qp);
|
||||
|
@ -142,6 +144,8 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
|
|||
event.event = IB_EVENT_QP_REQ_ERR;
|
||||
break;
|
||||
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
|
||||
case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
|
||||
case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
|
||||
event.event = IB_EVENT_QP_ACCESS_ERR;
|
||||
break;
|
||||
default:
|
||||
|
@ -366,8 +370,13 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
|
|||
unsigned long flags;
|
||||
|
||||
list_del(&hr_qp->node);
|
||||
list_del(&hr_qp->sq_node);
|
||||
list_del(&hr_qp->rq_node);
|
||||
|
||||
if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
|
||||
list_del(&hr_qp->sq_node);
|
||||
|
||||
if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI &&
|
||||
hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
|
||||
list_del(&hr_qp->rq_node);
|
||||
|
||||
xa_lock_irqsave(xa, flags);
|
||||
__xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
|
||||
|
@ -478,7 +487,9 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
|
|||
hr_qp->rq.max_gs);
|
||||
|
||||
hr_qp->rq.wqe_cnt = cnt;
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
|
||||
hr_qp->ibqp.qp_type != IB_QPT_UD &&
|
||||
hr_qp->ibqp.qp_type != IB_QPT_GSI)
|
||||
hr_qp->rq_inl_buf.wqe_cnt = cnt;
|
||||
else
|
||||
hr_qp->rq_inl_buf.wqe_cnt = 0;
|
||||
|
@ -776,7 +787,7 @@ static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
|
|||
struct hns_roce_ib_create_qp_resp *resp,
|
||||
struct hns_roce_ib_create_qp *ucmd)
|
||||
{
|
||||
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
|
||||
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
|
||||
udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
|
||||
hns_roce_qp_has_sq(init_attr) &&
|
||||
udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
|
||||
|
@ -787,7 +798,7 @@ static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
|
|||
struct ib_udata *udata,
|
||||
struct hns_roce_ib_create_qp_resp *resp)
|
||||
{
|
||||
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
|
||||
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
|
||||
udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
|
||||
hns_roce_qp_has_rq(init_attr));
|
||||
}
|
||||
|
@ -795,7 +806,7 @@ static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
|
|||
static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
|
||||
struct ib_qp_init_attr *init_attr)
|
||||
{
|
||||
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
|
||||
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
|
||||
hns_roce_qp_has_rq(init_attr));
|
||||
}
|
||||
|
||||
|
@ -840,11 +851,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|
|||
resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
|
||||
}
|
||||
} else {
|
||||
/* QP doorbell register address */
|
||||
hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
|
||||
DB_REG_OFFSET * hr_dev->priv_uar.index;
|
||||
hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
|
||||
DB_REG_OFFSET * hr_dev->priv_uar.index;
|
||||
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
|
||||
hr_qp->sq.db_reg = hr_dev->mem_base +
|
||||
HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
|
||||
else
|
||||
hr_qp->sq.db_reg =
|
||||
hr_dev->reg_base + hr_dev->sdb_offset +
|
||||
DB_REG_OFFSET * hr_dev->priv_uar.index;
|
||||
|
||||
hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
|
||||
DB_REG_OFFSET * hr_dev->priv_uar.index;
|
||||
|
||||
if (kernel_qp_has_rdb(hr_dev, init_attr)) {
|
||||
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
|
||||
|
@ -1011,36 +1027,36 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||
}
|
||||
}
|
||||
|
||||
ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
|
||||
if (ret) {
|
||||
ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
|
||||
ret);
|
||||
goto err_wrid;
|
||||
}
|
||||
|
||||
ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
|
||||
if (ret) {
|
||||
ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
|
||||
goto err_db;
|
||||
goto err_buf;
|
||||
}
|
||||
|
||||
ret = alloc_qpn(hr_dev, hr_qp);
|
||||
if (ret) {
|
||||
ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
|
||||
goto err_buf;
|
||||
goto err_qpn;
|
||||
}
|
||||
|
||||
ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
|
||||
if (ret) {
|
||||
ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
|
||||
ret);
|
||||
goto err_db;
|
||||
}
|
||||
|
||||
ret = alloc_qpc(hr_dev, hr_qp);
|
||||
if (ret) {
|
||||
ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
|
||||
ret);
|
||||
goto err_qpn;
|
||||
goto err_qpc;
|
||||
}
|
||||
|
||||
ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
|
||||
if (ret) {
|
||||
ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
|
||||
goto err_qpc;
|
||||
goto err_store;
|
||||
}
|
||||
|
||||
if (udata) {
|
||||
|
@ -1055,7 +1071,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
|
||||
ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
|
||||
if (ret)
|
||||
goto err_store;
|
||||
goto err_flow_ctrl;
|
||||
}
|
||||
|
||||
hr_qp->ibqp.qp_num = hr_qp->qpn;
|
||||
|
@ -1065,17 +1081,17 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
|
|||
|
||||
return 0;
|
||||
|
||||
err_store:
|
||||
err_flow_ctrl:
|
||||
hns_roce_qp_remove(hr_dev, hr_qp);
|
||||
err_qpc:
|
||||
err_store:
|
||||
free_qpc(hr_dev, hr_qp);
|
||||
err_qpn:
|
||||
free_qpn(hr_dev, hr_qp);
|
||||
err_buf:
|
||||
free_qp_buf(hr_dev, hr_qp);
|
||||
err_db:
|
||||
err_qpc:
|
||||
free_qp_db(hr_dev, hr_qp, udata);
|
||||
err_wrid:
|
||||
err_db:
|
||||
free_qpn(hr_dev, hr_qp);
|
||||
err_qpn:
|
||||
free_qp_buf(hr_dev, hr_qp);
|
||||
err_buf:
|
||||
free_kernel_wrid(hr_qp);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1100,11 +1116,16 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
|
|||
bool is_user)
|
||||
{
|
||||
switch (type) {
|
||||
case IB_QPT_XRC_INI:
|
||||
case IB_QPT_XRC_TGT:
|
||||
if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
|
||||
goto out;
|
||||
break;
|
||||
case IB_QPT_UD:
|
||||
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 &&
|
||||
is_user)
|
||||
goto out;
|
||||
fallthrough;
|
||||
break;
|
||||
case IB_QPT_RC:
|
||||
case IB_QPT_GSI:
|
||||
break;
|
||||
|
@ -1124,8 +1145,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
|
|||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
|
||||
struct ib_device *ibdev = &hr_dev->ib_dev;
|
||||
struct ib_device *ibdev = pd ? pd->device : init_attr->xrcd->device;
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
|
||||
struct hns_roce_qp *hr_qp;
|
||||
int ret;
|
||||
|
||||
|
@ -1137,6 +1158,15 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
|
|||
if (!hr_qp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (init_attr->qp_type == IB_QPT_XRC_INI)
|
||||
init_attr->recv_cq = NULL;
|
||||
|
||||
if (init_attr->qp_type == IB_QPT_XRC_TGT) {
|
||||
hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
|
||||
init_attr->recv_cq = NULL;
|
||||
init_attr->send_cq = NULL;
|
||||
}
|
||||
|
||||
if (init_attr->qp_type == IB_QPT_GSI) {
|
||||
hr_qp->port = init_attr->port_num - 1;
|
||||
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
|
||||
|
@ -1156,20 +1186,18 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
|
|||
|
||||
int to_hr_qp_type(int qp_type)
|
||||
{
|
||||
int transport_type;
|
||||
|
||||
if (qp_type == IB_QPT_RC)
|
||||
transport_type = SERV_TYPE_RC;
|
||||
else if (qp_type == IB_QPT_UC)
|
||||
transport_type = SERV_TYPE_UC;
|
||||
else if (qp_type == IB_QPT_UD)
|
||||
transport_type = SERV_TYPE_UD;
|
||||
else if (qp_type == IB_QPT_GSI)
|
||||
transport_type = SERV_TYPE_UD;
|
||||
else
|
||||
transport_type = -1;
|
||||
|
||||
return transport_type;
|
||||
switch (qp_type) {
|
||||
case IB_QPT_RC:
|
||||
return SERV_TYPE_RC;
|
||||
case IB_QPT_UD:
|
||||
case IB_QPT_GSI:
|
||||
return SERV_TYPE_UD;
|
||||
case IB_QPT_XRC_INI:
|
||||
case IB_QPT_XRC_TGT:
|
||||
return SERV_TYPE_XRC;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
static int check_mtu_validate(struct hns_roce_dev *hr_dev,
|
||||
|
|
|
@ -314,6 +314,9 @@ static void set_srq_ext_param(struct hns_roce_srq *srq,
|
|||
{
|
||||
srq->cqn = ib_srq_has_cq(init_attr->srq_type) ?
|
||||
to_hr_cq(init_attr->ext.cq)->cqn : 0;
|
||||
|
||||
srq->xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
|
||||
to_hr_xrcd(init_attr->ext.xrc.xrcd)->xrcdn : 0;
|
||||
}
|
||||
|
||||
static int set_srq_param(struct hns_roce_srq *srq,
|
||||
|
@ -412,7 +415,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
|
|||
}
|
||||
}
|
||||
|
||||
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
|
||||
srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
|
||||
srq->event = hns_roce_ib_srq_event;
|
||||
atomic_set(&srq->refcount, 1);
|
||||
init_completion(&srq->free);
|
||||
|
|
|
@ -504,15 +504,6 @@ static inline void i40iw_free_resource(struct i40iw_device *iwdev,
|
|||
spin_unlock_irqrestore(&iwdev->resource_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* to_iwhdl - Get the handler from the device pointer
|
||||
* @iwdev: device pointer
|
||||
**/
|
||||
static inline struct i40iw_handler *to_iwhdl(struct i40iw_device *iw_dev)
|
||||
{
|
||||
return container_of(iw_dev, struct i40iw_handler, device);
|
||||
}
|
||||
|
||||
struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev);
|
||||
|
||||
/**
|
||||
|
|
|
@ -905,7 +905,7 @@ static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
|
|||
}
|
||||
|
||||
/**
|
||||
* recv_mpa - process an IETF MPA frame
|
||||
* i40iw_parse_mpa - process an IETF MPA frame
|
||||
* @cm_node: connection's node
|
||||
* @buffer: Data pointer
|
||||
* @type: to return accept or reject
|
||||
|
@ -4360,7 +4360,7 @@ void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
|
|||
}
|
||||
|
||||
/**
|
||||
* i40iw_ifdown_notify - process an ifdown on an interface
|
||||
* i40iw_if_notify - process an ifdown on an interface
|
||||
* @iwdev: device pointer
|
||||
* @netdev: network interface device structure
|
||||
* @ipaddr: Pointer to IPv4 or IPv6 address
|
||||
|
|
|
@ -285,7 +285,7 @@ static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *d
|
|||
}
|
||||
|
||||
/**
|
||||
* i40iw_create_iw_hmc_obj - allocate backing store for hmc objects
|
||||
* i40iw_sc_create_hmc_obj - allocate backing store for hmc objects
|
||||
* @dev: pointer to the device structure
|
||||
* @info: pointer to i40iw_hmc_iw_create_obj_info struct
|
||||
*
|
||||
|
@ -434,7 +434,7 @@ static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
|
|||
}
|
||||
|
||||
/**
|
||||
* i40iw_del_iw_hmc_obj - remove pe hmc objects
|
||||
* i40iw_sc_del_hmc_obj - remove pe hmc objects
|
||||
* @dev: pointer to the device structure
|
||||
* @info: pointer to i40iw_hmc_del_obj_info struct
|
||||
* @reset: true if called before reset
|
||||
|
|
|
@ -78,7 +78,7 @@ static struct i40e_client i40iw_client;
|
|||
static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
|
||||
|
||||
static LIST_HEAD(i40iw_handlers);
|
||||
static spinlock_t i40iw_handler_lock;
|
||||
static DEFINE_SPINLOCK(i40iw_handler_lock);
|
||||
|
||||
static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
|
||||
u32 vf_id, u8 *msg, u16 len);
|
||||
|
@ -251,7 +251,7 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
|
|||
}
|
||||
|
||||
/**
|
||||
* i40iw_disable_irqs - disable device interrupts
|
||||
* i40iw_disable_irq - disable device interrupts
|
||||
* @dev: hardware control device structure
|
||||
* @msix_vec: msix vector to disable irq
|
||||
* @dev_id: parameter to pass to free_irq (used during irq setup)
|
||||
|
@ -2043,7 +2043,6 @@ static int __init i40iw_init_module(void)
|
|||
i40iw_client.ops = &i40e_ops;
|
||||
memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
|
||||
i40iw_client.type = I40E_CLIENT_IWARP;
|
||||
spin_lock_init(&i40iw_handler_lock);
|
||||
ret = i40e_register_client(&i40iw_client);
|
||||
i40iw_register_notifiers();
|
||||
|
||||
|
|
|
@ -50,17 +50,6 @@ static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value)
|
|||
wqe_words[byte_index >> 3] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* set_32bit_val - set 32 value to hw wqe
|
||||
* @wqe_words: wqe addr to write
|
||||
* @byte_index: index in wqe
|
||||
* @value: value to write
|
||||
**/
|
||||
static inline void set_32bit_val(u32 *wqe_words, u32 byte_index, u32 value)
|
||||
{
|
||||
wqe_words[byte_index >> 2] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_64bit_val - read 64 bit value from wqe
|
||||
* @wqe_words: wqe addr
|
||||
|
@ -72,17 +61,6 @@ static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value)
|
|||
*value = wqe_words[byte_index >> 3];
|
||||
}
|
||||
|
||||
/**
|
||||
* get_32bit_val - read 32 bit value from wqe
|
||||
* @wqe_words: wqe addr
|
||||
* @byte_index: index to reaad from
|
||||
* @value: return 32 bit value
|
||||
**/
|
||||
static inline void get_32bit_val(u32 *wqe_words, u32 byte_index, u32 *value)
|
||||
{
|
||||
*value = wqe_words[byte_index >> 2];
|
||||
}
|
||||
|
||||
struct i40iw_dma_mem {
|
||||
void *va;
|
||||
dma_addr_t pa;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue