IB: Replace ib_umem page_size by page_shift

Size of pages are held by struct ib_umem in page_size field.

It is better to store it as an exponent, because page size by nature
is always power-of-two and used as a factor, divisor or ilog2's argument.

The conversion of page_size to be page_shift allows to have portable
code and avoid following error while compiling on ARM:

  ERROR: "__aeabi_uldivmod" [drivers/infiniband/core/ib_core.ko] undefined!

CC: Selvin Xavier <selvin.xavier@broadcom.com>
CC: Steve Wise <swise@chelsio.com>
CC: Lijun Ou <oulijun@huawei.com>
CC: Shiraz Saleem <shiraz.saleem@intel.com>
CC: Adit Ranadive <aditr@vmware.com>
CC: Dennis Dalessandro <dennis.dalessandro@intel.com>
CC: Ram Amrani <Ram.Amrani@Cavium.com>
Signed-off-by: Artemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Acked-by: Ram Amrani <Ram.Amrani@cavium.com>
Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
Acked-by: Adit Ranadive <aditr@vmware.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Artemy Kovalyov 2017-04-05 09:23:50 +03:00 committed by Doug Ledford
parent 8d2216be28
commit 3e7e1193e2
23 changed files with 67 additions and 77 deletions

View File

@ -115,11 +115,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (!umem)
return ERR_PTR(-ENOMEM);
umem->context = context;
umem->length = size;
umem->address = addr;
umem->page_size = PAGE_SIZE;
umem->pid = get_task_pid(current, PIDTYPE_PID);
umem->context = context;
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
umem->pid = get_task_pid(current, PIDTYPE_PID);
/*
* We ask for writable memory if any of the following
* access flags are set. "Local write" and "remote write"
@ -315,7 +315,6 @@ EXPORT_SYMBOL(ib_umem_release);
int ib_umem_page_count(struct ib_umem *umem)
{
int shift;
int i;
int n;
struct scatterlist *sg;
@ -323,11 +322,9 @@ int ib_umem_page_count(struct ib_umem *umem)
if (umem->odp_data)
return ib_umem_num_pages(umem);
shift = ilog2(umem->page_size);
n = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
n += sg_dma_len(sg) >> shift;
n += sg_dma_len(sg) >> umem->page_shift;
return n;
}

View File

@ -254,11 +254,11 @@ struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
if (!umem)
return ERR_PTR(-ENOMEM);
umem->context = context;
umem->length = size;
umem->address = addr;
umem->page_size = PAGE_SIZE;
umem->writable = 1;
umem->context = context;
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
umem->writable = 1;
odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
if (!odp_data) {
@ -707,7 +707,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
* invalidations, so we must make sure we free each page only
* once. */
mutex_lock(&umem->odp_data->umem_mutex);
for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
if (umem->odp_data->page_list[idx]) {
struct page *page = umem->odp_data->page_list[idx];

View File

@ -3016,7 +3016,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct bnxt_re_mr *mr;
struct ib_umem *umem;
u64 *pbl_tbl, *pbl_tbl_orig;
int i, umem_pgs, pages, page_shift, rc;
int i, umem_pgs, pages, rc;
struct scatterlist *sg;
int entry;
@ -3062,22 +3062,22 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
}
pbl_tbl_orig = pbl_tbl;
page_shift = ilog2(umem->page_size);
if (umem->hugetlb) {
dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
rc = -EFAULT;
goto fail;
}
if (umem->page_size != PAGE_SIZE) {
dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
if (umem->page_shift != PAGE_SHIFT) {
dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
rc = -EFAULT;
goto fail;
}
/* Map umem buf ptrs to the PBL */
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
pages = sg_dma_len(sg) >> page_shift;
pages = sg_dma_len(sg) >> umem->page_shift;
for (i = 0; i < pages; i++, pbl_tbl++)
*pbl_tbl = sg_dma_address(sg) + (i << page_shift);
*pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
}
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
umem_pgs, false);

View File

@ -581,7 +581,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(err);
}
shift = ffs(mhp->umem->page_size) - 1;
shift = mhp->umem->page_shift;
n = mhp->umem->nmap;
@ -601,7 +601,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
len = sg_dma_len(sg) >> shift;
for (k = 0; k < len; ++k) {
pages[i++] = cpu_to_be64(sg_dma_address(sg) +
mhp->umem->page_size * k);
(k << shift));
if (i == PAGE_SIZE / sizeof *pages) {
err = iwch_write_pbl(mhp, pages, i, n);
if (err)

View File

@ -515,7 +515,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(err);
}
shift = ffs(mhp->umem->page_size) - 1;
shift = mhp->umem->page_shift;
n = mhp->umem->nmap;
err = alloc_pbl(mhp, n);
@ -534,7 +534,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
len = sg_dma_len(sg) >> shift;
for (k = 0; k < len; ++k) {
pages[i++] = cpu_to_be64(sg_dma_address(sg) +
mhp->umem->page_size * k);
(k << shift));
if (i == PAGE_SIZE / sizeof *pages) {
err = write_pbl(&mhp->rhp->rdev,
pages,

View File

@ -219,8 +219,7 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
return PTR_ERR(*umem);
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
ilog2((unsigned int)(*umem)->page_size),
&buf->hr_mtt);
(*umem)->page_shift, &buf->hr_mtt);
if (ret)
goto err_buf;

View File

@ -504,7 +504,8 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
len = sg_dma_len(sg) >> mtt->page_shift;
for (k = 0; k < len; ++k) {
pages[i++] = sg_dma_address(sg) + umem->page_size * k;
pages[i++] = sg_dma_address(sg) +
(k << umem->page_shift);
if (i == PAGE_SIZE / sizeof(u64)) {
ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
pages);
@ -564,9 +565,9 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
n = ib_umem_page_count(mr->umem);
if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
mr->umem->page_size);
if (mr->umem->page_shift != HNS_ROCE_HEM_PAGE_SHIFT) {
dev_err(dev, "Just support 4K page size but is 0x%lx now!\n",
BIT(mr->umem->page_shift));
ret = -EINVAL;
goto err_umem;
}

View File

@ -437,8 +437,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
ilog2((unsigned int)hr_qp->umem->page_size),
&hr_qp->mtt);
hr_qp->umem->page_shift, &hr_qp->mtt);
if (ret) {
dev_err(dev, "hns_roce_mtt_init error for create qp\n");
goto err_buf;

View File

@ -1345,7 +1345,7 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
{
struct ib_umem *region = iwmr->region;
struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
int chunk_pages, entry, pg_shift, i;
int chunk_pages, entry, i;
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
struct i40iw_pble_info *pinfo;
struct scatterlist *sg;
@ -1354,14 +1354,14 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
pg_shift = ffs(region->page_size) - 1;
for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
chunk_pages = sg_dma_len(sg) >> pg_shift;
chunk_pages = sg_dma_len(sg) >> region->page_shift;
if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
!iwpbl->qp_mr.sq_page)
iwpbl->qp_mr.sq_page = sg_page(sg);
for (i = 0; i < chunk_pages; i++) {
pg_addr = sg_dma_address(sg) + region->page_size * i;
pg_addr = sg_dma_address(sg) +
(i << region->page_shift);
if ((entry + i) == 0)
*pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
@ -1847,7 +1847,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
iwmr->ibmr.device = pd->device;
ucontext = to_ucontext(pd->uobject->context);
iwmr->page_size = region->page_size;
iwmr->page_size = PAGE_SIZE;
iwmr->page_msk = PAGE_MASK;
if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))

View File

@ -147,7 +147,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
return PTR_ERR(*umem);
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
ilog2((*umem)->page_size), &buf->mtt);
(*umem)->page_shift, &buf->mtt);
if (err)
goto err_buf;

View File

@ -107,7 +107,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
len = sg_dma_len(sg) >> mtt->page_shift;
for (k = 0; k < len; ++k) {
pages[i++] = sg_dma_address(sg) +
umem->page_size * k;
(k << umem->page_shift);
/*
* Be friendly to mlx4_write_mtt() and
* pass it chunks of appropriate size.
@ -155,7 +155,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
n = ib_umem_page_count(mr->umem);
shift = ilog2(mr->umem->page_size);
shift = mr->umem->page_shift;
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr);
@ -239,7 +239,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
goto release_mpt_entry;
}
n = ib_umem_page_count(mmr->umem);
shift = ilog2(mmr->umem->page_size);
shift = mmr->umem->page_shift;
err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
virt_addr, length, n, shift,

View File

@ -745,7 +745,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
ilog2(qp->umem->page_size), &qp->mtt);
qp->umem->page_shift, &qp->mtt);
if (err)
goto err_buf;

View File

@ -122,7 +122,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
}
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
ilog2(srq->umem->page_size), &srq->mtt);
srq->umem->page_shift, &srq->mtt);
if (err)
goto err_buf;

View File

@ -59,7 +59,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
u64 pfn;
struct scatterlist *sg;
int entry;
unsigned long page_shift = ilog2(umem->page_size);
unsigned long page_shift = umem->page_shift;
/* With ODP we must always match OS page size. */
if (umem->odp_data) {
@ -156,7 +156,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, size_t offset, size_t num_pages,
__be64 *pas, int access_flags)
{
unsigned long umem_page_shift = ilog2(umem->page_size);
unsigned long umem_page_shift = umem->page_shift;
int shift = page_shift - umem_page_shift;
int mask = (1 << shift) - 1;
int i, k, idx;

View File

@ -206,7 +206,7 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
* but they will write 0s as well, so no difference in the end result.
*/
for (addr = start; addr < end; addr += (u64)umem->page_size) {
for (addr = start; addr < end; addr += BIT(umem->page_shift)) {
idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
/*
* Strive to write the MTTs in chunks, but avoid overwriting

View File

@ -937,7 +937,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err;
}
shift = ffs(mr->umem->page_size) - 1;
shift = mr->umem->page_shift;
n = mr->umem->nmap;
mr->mtt = mthca_alloc_mtt(dev, n);
@ -959,8 +959,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
len = sg_dma_len(sg) >> shift;
for (k = 0; k < len; ++k) {
pages[i++] = sg_dma_address(sg) +
mr->umem->page_size * k;
pages[i++] = sg_dma_address(sg) + (k << shift);
/*
* Be friendly to write_mtt and pass it chunks
* of appropriate size.

View File

@ -2165,9 +2165,9 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
" offset = %u, page size = %u.\n",
" offset = %u, page size = %lu.\n",
(unsigned long int)start, (unsigned long int)virt, (u32)length,
ib_umem_offset(region), region->page_size);
ib_umem_offset(region), BIT(region->page_shift));
skip_pages = ((u32)ib_umem_offset(region)) >> 12;

View File

@ -914,21 +914,18 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
pbe = (struct ocrdma_pbe *)pbl_tbl->va;
pbe_cnt = 0;
shift = ilog2(umem->page_size);
shift = umem->page_shift;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
pages = sg_dma_len(sg) >> shift;
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
/* store the page address in pbe */
pbe->pa_lo =
cpu_to_le32(sg_dma_address
(sg) +
(umem->page_size * pg_cnt));
cpu_to_le32(sg_dma_address(sg) +
(pg_cnt << shift));
pbe->pa_hi =
cpu_to_le32(upper_32_bits
((sg_dma_address
(sg) +
umem->page_size * pg_cnt)));
cpu_to_le32(upper_32_bits(sg_dma_address(sg) +
(pg_cnt << shift)));
pbe_cnt += 1;
total_num_pbes += 1;
pbe++;
@ -978,7 +975,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
if (status)
goto umem_err;
mr->hwmr.pbe_size = mr->umem->page_size;
mr->hwmr.pbe_size = BIT(mr->umem->page_shift);
mr->hwmr.fbo = ib_umem_offset(mr->umem);
mr->hwmr.va = usr_addr;
mr->hwmr.len = len;

View File

@ -681,16 +681,16 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
pbe_cnt = 0;
shift = ilog2(umem->page_size);
shift = umem->page_shift;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
pages = sg_dma_len(sg) >> shift;
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
/* store the page address in pbe */
pbe->lo = cpu_to_le32(sg_dma_address(sg) +
umem->page_size * pg_cnt);
(pg_cnt << shift));
addr = upper_32_bits(sg_dma_address(sg) +
umem->page_size * pg_cnt);
(pg_cnt << shift));
pbe->hi = cpu_to_le32(addr);
pbe_cnt++;
total_num_pbes++;
@ -2190,7 +2190,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
mr->hw_mr.page_size_log = mr->umem->page_shift;
mr->hw_mr.fbo = ib_umem_offset(mr->umem);
mr->hw_mr.length = len;
mr->hw_mr.vaddr = usr_addr;

View File

@ -194,7 +194,7 @@ int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (j = 0; j < len; j++) {
dma_addr_t addr = sg_dma_address(sg) +
umem->page_size * j;
(j << umem->page_shift);
ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
if (ret)

View File

@ -408,8 +408,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
if (is_power_of_2(umem->page_size))
mr->mr.page_shift = ilog2(umem->page_size);
mr->mr.page_shift = umem->page_shift;
m = 0;
n = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
@ -421,8 +420,9 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto bail_inval;
}
mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].length = umem->page_size;
trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, umem->page_size);
mr->mr.map[m]->segs[n].length = BIT(umem->page_shift);
trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr,
BIT(umem->page_shift));
n++;
if (n == RVT_SEGSZ) {
m++;

View File

@ -191,10 +191,8 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
goto err1;
}
WARN_ON_ONCE(!is_power_of_2(umem->page_size));
mem->page_shift = ilog2(umem->page_size);
mem->page_mask = umem->page_size - 1;
mem->page_shift = umem->page_shift;
mem->page_mask = BIT(umem->page_shift) - 1;
num_buf = 0;
map = mem->map;
@ -210,7 +208,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
}
buf->addr = (uintptr_t)vaddr;
buf->size = umem->page_size;
buf->size = BIT(umem->page_shift);
num_buf++;
buf++;

View File

@ -44,7 +44,7 @@ struct ib_umem {
struct ib_ucontext *context;
size_t length;
unsigned long address;
int page_size;
int page_shift;
int writable;
int hugetlb;
struct work_struct work;
@ -60,7 +60,7 @@ struct ib_umem {
/* Returns the offset of the umem start relative to the first page. */
static inline int ib_umem_offset(struct ib_umem *umem)
{
return umem->address & ((unsigned long)umem->page_size - 1);
return umem->address & (BIT(umem->page_shift) - 1);
}
/* Returns the first page of an ODP umem. */