diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2020-09-04 19:41:56 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2020-09-11 10:24:54 -0300 |
commit | 81655d3c4a95f05495f3213acbbc5423fc4dbdfb (patch) | |
tree | 79904021eb611bea54d82fb01153139aca60bdb8 | |
parent | 87aebd3f8c7490b845f5ec3ce9c8658f17e0f96d (diff) | |
download | lwn-81655d3c4a95f05495f3213acbbc5423fc4dbdfb.tar.gz lwn-81655d3c4a95f05495f3213acbbc5423fc4dbdfb.zip |
RDMA/mlx4: Use ib_umem_num_dma_blocks()
For the calls linked to mlx4_ib_umem_calc_optimal_mtt_size() use
ib_umem_num_dma_blocks() inside the function, it is just some weird static
default.
All other places are just using it with PAGE_SIZE, switch to
ib_umem_num_dma_blocks().
As this is the last call site, remove ib_umem_num_count().
Link: https://lore.kernel.org/r/15-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-rw-r--r-- | drivers/infiniband/core/umem.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/cq.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/srq.c | 5 | ||||
-rw-r--r-- | include/rdma/ib_umem.h | 2 |
6 files changed, 6 insertions, 21 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index b57dbb14de83..c1ab6a4f2bc3 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -350,18 +350,6 @@ void ib_umem_release(struct ib_umem *umem) } EXPORT_SYMBOL(ib_umem_release); -int ib_umem_page_count(struct ib_umem *umem) -{ - int i, n = 0; - struct scatterlist *sg; - - for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) - n += sg_dma_len(sg) >> PAGE_SHIFT; - - return n; -} -EXPORT_SYMBOL(ib_umem_page_count); - /* * Copy from the given ib_umem's pages to the given buffer. * diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index ee50dd823a8e..e9b5a4d57fb1 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -149,7 +149,6 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata, if (IS_ERR(*umem)) return PTR_ERR(*umem); - n = ib_umem_page_count(*umem); shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n); err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 1d5ef0de12c9..bfb779b5eeb3 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -271,6 +271,8 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, u64 total_len = 0; int i; + *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE); + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { /* * Initialization - save the first chunk start as the @@ -421,7 +423,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, goto err_free; } - n = ib_umem_page_count(mr->umem); shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, @@ -511,7 +512,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, mmr->umem = NULL; goto release_mpt_entry; } - n = ib_umem_page_count(mmr->umem); + n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE); shift = PAGE_SHIFT; err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index b7a0c3f97713..1c94a2c4463a 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -922,7 +922,6 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, goto err; } - n = ib_umem_page_count(qp->umem); shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); @@ -1117,7 +1116,6 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, goto err; } - n = ib_umem_page_count(qp->umem); shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 2651b68a1c04..bf618529e734 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -115,8 +115,9 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq, if (IS_ERR(srq->umem)) return PTR_ERR(srq->umem); - err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), - PAGE_SHIFT, &srq->mtt); + err = mlx4_mtt_init( + dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE), + PAGE_SHIFT, &srq->mtt); if (err) goto err_buf; diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index cf001d4e0a4f..70597508c765 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -74,7 +74,6 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, size_t size, int access); void ib_umem_release(struct ib_umem *umem); -int ib_umem_page_count(struct ib_umem *umem); int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length); unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, @@ -92,7 +91,6 @@ static inline struct ib_umem *ib_umem_get(struct ib_device *device, return ERR_PTR(-EINVAL); } static inline void ib_umem_release(struct ib_umem *umem) { } -static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; } static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) { return -EINVAL; |