mlx4_core: Add module parameter for number of MTTs per segment
The current MTT allocator uses kmalloc() to allocate a buffer for its buddy allocator, and thus is limited in the amount of MTT segments that it can control. As a result, the size of memory that can be registered is limited too. This patch uses a module parameter to control the number of MTT entries that each segment represents, allowing more memory to be registered with the same number of segments. Signed-off-by: Eli Cohen <eli@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
210af919c9
commit
ab6bf42e23
|
@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444);
|
||||||
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
|
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
|
||||||
"(0/1, default 0)");
|
"(0/1, default 0)");
|
||||||
|
|
||||||
|
static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
|
||||||
|
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
|
||||||
|
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
|
||||||
|
|
||||||
int mlx4_check_port_params(struct mlx4_dev *dev,
|
int mlx4_check_port_params(struct mlx4_dev *dev,
|
||||||
enum mlx4_port_type *port_type)
|
enum mlx4_port_type *port_type)
|
||||||
{
|
{
|
||||||
|
@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
||||||
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
|
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
|
||||||
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
|
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
|
||||||
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
|
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
|
||||||
|
dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
|
||||||
dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
|
dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
|
||||||
MLX4_MTT_ENTRY_PER_SEG);
|
dev->caps.mtts_per_seg);
|
||||||
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
|
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
|
||||||
dev->caps.reserved_uars = dev_cap->reserved_uars;
|
dev->caps.reserved_uars = dev_cap->reserved_uars;
|
||||||
dev->caps.reserved_pds = dev_cap->reserved_pds;
|
dev->caps.reserved_pds = dev_cap->reserved_pds;
|
||||||
dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
|
dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
|
||||||
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
|
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
|
||||||
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
|
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
|
||||||
dev->caps.flags = dev_cap->flags;
|
dev->caps.flags = dev_cap->flags;
|
||||||
|
@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
|
||||||
|
printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
|
||||||
} else
|
} else
|
||||||
mtt->page_shift = page_shift;
|
mtt->page_shift = page_shift;
|
||||||
|
|
||||||
for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1)
|
for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
|
||||||
++mtt->order;
|
++mtt->order;
|
||||||
|
|
||||||
mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
|
mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
|
||||||
|
@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
|
||||||
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
|
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
|
||||||
MLX4_MPT_PD_FLAG_RAE);
|
MLX4_MPT_PD_FLAG_RAE);
|
||||||
mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
|
mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
|
||||||
MLX4_MTT_ENTRY_PER_SEG);
|
dev->caps.mtts_per_seg);
|
||||||
} else {
|
} else {
|
||||||
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
|
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
|
||||||
}
|
}
|
||||||
|
@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
||||||
(start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
|
(start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1))
|
if (start_index & (dev->caps.mtts_per_seg - 1))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
|
mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
|
||||||
|
|
|
@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
||||||
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
|
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
|
||||||
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
|
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
|
||||||
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
|
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
|
||||||
profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
|
profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
|
||||||
profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
|
profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
|
||||||
|
|
||||||
profile[MLX4_RES_QP].num = request->num_qp;
|
profile[MLX4_RES_QP].num = request->num_qp;
|
||||||
|
|
|
@ -210,6 +210,7 @@ struct mlx4_caps {
|
||||||
int num_comp_vectors;
|
int num_comp_vectors;
|
||||||
int num_mpts;
|
int num_mpts;
|
||||||
int num_mtt_segs;
|
int num_mtt_segs;
|
||||||
|
int mtts_per_seg;
|
||||||
int fmr_reserved_mtts;
|
int fmr_reserved_mtts;
|
||||||
int reserved_mtts;
|
int reserved_mtts;
|
||||||
int reserved_mrws;
|
int reserved_mrws;
|
||||||
|
|
Loading…
Reference in New Issue