summaryrefslogtreecommitdiff
path: root/include/rdma/ib_umem_odp.h
diff options
context:
space:
mode:
authorArtemy Kovalyov <artemyko@mellanox.com>2017-01-18 16:58:07 +0200
committerDoug Ledford <dledford@redhat.com>2017-02-14 11:41:17 -0500
commitd07d1d70ce1ad1c525f51f459ce36ca49ec2bf48 (patch)
tree09873cb0c3319aa49d59d1f44e2007e9bbd96356 /include/rdma/ib_umem_odp.h
parent25bf14d6f5898a59325f3ecabda7695565776594 (diff)
downloadlwn-d07d1d70ce1ad1c525f51f459ce36ca49ec2bf48.tar.gz
lwn-d07d1d70ce1ad1c525f51f459ce36ca49ec2bf48.zip
IB/umem: Update on demand page (ODP) support
Currently ODP MR may explicitly register virtual address space area of limited length. This change allows MR to cover entire process virtual address space dynamicaly adding/removing translation entries to device MTT. Add following changes to support implicit MR: * Allow umem to be zero size to back-up implicit MR. * Add new function ib_alloc_odp_umem() to add virtual memory regions to implicit MR dynamically on demand. * Add new function rbt_ib_umem_lookup() to find dynamically added virtual memory regions. * Expose function rbt_ib_umem_for_each_in_range() to other modules and make it safe Signed-off-by: Artemy Kovalyov <artemyko@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'include/rdma/ib_umem_odp.h')
-rw-r--r--include/rdma/ib_umem_odp.h21
1 files changed, 17 insertions, 4 deletions
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 3da0b167041b..542cd8b3414c 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -79,11 +79,15 @@ struct ib_umem_odp {
struct completion notifier_completion;
int dying;
+ struct work_struct work;
};
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);
+struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
+ unsigned long addr,
+ size_t size);
void ib_umem_odp_release(struct ib_umem *umem);
@@ -117,10 +121,12 @@ typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end,
umem_call_back cb, void *cookie);
-struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root,
- u64 start, u64 last);
-struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node,
- u64 start, u64 last);
+/*
+ * Find first region intersecting with address range.
+ * Return NULL if not found
+ */
+struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root *root,
+ u64 addr, u64 length);
static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,
unsigned long mmu_seq)
@@ -153,6 +159,13 @@ static inline int ib_umem_odp_get(struct ib_ucontext *context,
return -EINVAL;
}
+static inline struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
+ unsigned long addr,
+ size_t size)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline void ib_umem_odp_release(struct ib_umem *umem) {}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */