IB/core: Introduce and use rdma_gid_table()

There are several places a gid table is accessed.
Have a helper tiny function rdma_gid_table() to avoid code
duplication at such places.

Signed-off-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Parav Pandit 2018-05-22 20:33:46 +03:00 committed by Jason Gunthorpe
parent 25e62655c7
commit 724631a9c6
1 changed files with 15 additions and 10 deletions

View File

@ -159,6 +159,11 @@ int ib_cache_gid_parse_type_str(const char *buf)
} }
EXPORT_SYMBOL(ib_cache_gid_parse_type_str); EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
{
return device->cache.ports[port - rdma_start_port(device)].gid;
}
static void del_roce_gid(struct ib_device *device, u8 port_num, static void del_roce_gid(struct ib_device *device, u8 port_num,
struct ib_gid_table *table, int ix) struct ib_gid_table *table, int ix)
{ {
@ -376,7 +381,7 @@ static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
if (rdma_is_zero_gid(gid)) if (rdma_is_zero_gid(gid))
return -EINVAL; return -EINVAL;
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock); mutex_lock(&table->lock);
@ -443,7 +448,7 @@ _ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
int ret = 0; int ret = 0;
int ix; int ix;
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock); mutex_lock(&table->lock);
@ -482,7 +487,7 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
int ix; int ix;
bool deleted = false; bool deleted = false;
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; table = rdma_gid_table(ib_dev, port);
mutex_lock(&table->lock); mutex_lock(&table->lock);
@ -506,7 +511,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
{ {
struct ib_gid_table *table; struct ib_gid_table *table;
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; table = rdma_gid_table(ib_dev, port);
if (index < 0 || index >= table->sz) if (index < 0 || index >= table->sz)
return -EINVAL; return -EINVAL;
@ -599,7 +604,7 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
if (!rdma_is_port_valid(ib_dev, port)) if (!rdma_is_port_valid(ib_dev, port))
return -ENOENT; return -ENOENT;
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; table = rdma_gid_table(ib_dev, port);
if (ndev) if (ndev)
mask |= GID_ATTR_FIND_MASK_NETDEV; mask |= GID_ATTR_FIND_MASK_NETDEV;
@ -657,7 +662,7 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
!rdma_protocol_roce(ib_dev, port)) !rdma_protocol_roce(ib_dev, port))
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; table = rdma_gid_table(ib_dev, port);
read_lock_irqsave(&table->rwlock, flags); read_lock_irqsave(&table->rwlock, flags);
for (i = 0; i < table->sz; i++) { for (i = 0; i < table->sz; i++) {
@ -756,7 +761,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
unsigned int gid_type; unsigned int gid_type;
unsigned long mask; unsigned long mask;
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; table = rdma_gid_table(ib_dev, port);
mask = GID_ATTR_FIND_MASK_GID_TYPE | mask = GID_ATTR_FIND_MASK_GID_TYPE |
GID_ATTR_FIND_MASK_DEFAULT | GID_ATTR_FIND_MASK_DEFAULT |
@ -877,7 +882,7 @@ int ib_get_cached_gid(struct ib_device *device,
if (!rdma_is_port_valid(device, port_num)) if (!rdma_is_port_valid(device, port_num))
return -EINVAL; return -EINVAL;
table = device->cache.ports[port_num - rdma_start_port(device)].gid; table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags); read_lock_irqsave(&table->rwlock, flags);
res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr); res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
read_unlock_irqrestore(&table->rwlock, flags); read_unlock_irqrestore(&table->rwlock, flags);
@ -1095,7 +1100,7 @@ static int config_non_roce_gid_cache(struct ib_device *device,
gid_attr.device = device; gid_attr.device = device;
gid_attr.port_num = port; gid_attr.port_num = port;
table = device->cache.ports[port - rdma_start_port(device)].gid; table = rdma_gid_table(device, port);
mutex_lock(&table->lock); mutex_lock(&table->lock);
for (i = 0; i < gid_tbl_len; ++i) { for (i = 0; i < gid_tbl_len; ++i) {
@ -1128,7 +1133,7 @@ static void ib_cache_update(struct ib_device *device,
if (!rdma_is_port_valid(device, port)) if (!rdma_is_port_valid(device, port))
return; return;
table = device->cache.ports[port - rdma_start_port(device)].gid; table = rdma_gid_table(device, port);
tprops = kmalloc(sizeof *tprops, GFP_KERNEL); tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
if (!tprops) if (!tprops)