blob: dadc96dea39c1da4fa64a501bc47200851741bbb [file] [log] [blame]
Shachar Raindel8ada2c12014-12-11 17:04:17 +02001/*
2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef IB_UMEM_ODP_H
34#define IB_UMEM_ODP_H
35
36#include <rdma/ib_umem.h>
Haggai Eran882214e2014-12-11 17:04:18 +020037#include <rdma/ib_verbs.h>
38#include <linux/interval_tree.h>
39
40struct umem_odp_node {
41 u64 __subtree_last;
42 struct rb_node rb;
43};
Shachar Raindel8ada2c12014-12-11 17:04:17 +020044
45struct ib_umem_odp {
Jason Gunthorpe41b4deea2018-09-16 20:48:05 +030046 struct ib_umem umem;
Jason Gunthorpec9990ab2018-09-16 20:48:07 +030047 struct ib_ucontext_per_mm *per_mm;
48
Shachar Raindel8ada2c12014-12-11 17:04:17 +020049 /*
50 * An array of the pages included in the on-demand paging umem.
51 * Indices of pages that are currently not mapped into the device will
52 * contain NULL.
53 */
54 struct page **page_list;
55 /*
56 * An array of the same size as page_list, with DMA addresses mapped
57 * for pages the pages in page_list. The lower two bits designate
58 * access permissions. See ODP_READ_ALLOWED_BIT and
59 * ODP_WRITE_ALLOWED_BIT.
60 */
61 dma_addr_t *dma_list;
62 /*
63 * The umem_mutex protects the page_list and dma_list fields of an ODP
Haggai Eran882214e2014-12-11 17:04:18 +020064 * umem, allowing only a single thread to map/unmap pages. The mutex
65 * also protects access to the mmu notifier counters.
Shachar Raindel8ada2c12014-12-11 17:04:17 +020066 */
67 struct mutex umem_mutex;
68 void *private; /* for the HW driver to use. */
Haggai Eran882214e2014-12-11 17:04:18 +020069
Haggai Eran882214e2014-12-11 17:04:18 +020070 int notifiers_seq;
71 int notifiers_count;
72
Haggai Eran882214e2014-12-11 17:04:18 +020073 /* Tree tracking */
74 struct umem_odp_node interval_tree;
75
76 struct completion notifier_completion;
77 int dying;
Artemy Kovalyovd07d1d72017-01-18 16:58:07 +020078 struct work_struct work;
Shachar Raindel8ada2c12014-12-11 17:04:17 +020079};
80
Jason Gunthorpeb5231b02018-09-16 20:48:04 +030081static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
82{
Jason Gunthorpe41b4deea2018-09-16 20:48:05 +030083 return container_of(umem, struct ib_umem_odp, umem);
Jason Gunthorpeb5231b02018-09-16 20:48:04 +030084}
85
Leon Romanovsky13859d5d2019-01-08 16:07:26 +020086/*
87 * The lower 2 bits of the DMA address signal the R/W permissions for
88 * the entry. To upgrade the permissions, provide the appropriate
89 * bitmask to the map_dma_pages function.
90 *
91 * Be aware that upgrading a mapped address might result in change of
92 * the DMA address for the page.
93 */
94#define ODP_READ_ALLOWED_BIT (1<<0ULL)
95#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
96
97#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
98
Shachar Raindel8ada2c12014-12-11 17:04:17 +020099#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
100
Jason Gunthorpef27a0d52018-09-16 20:48:08 +0300101struct ib_ucontext_per_mm {
102 struct ib_ucontext *context;
103 struct mm_struct *mm;
104 struct pid *tgid;
Jason Gunthorpebe7a57b2018-09-16 20:48:10 +0300105 bool active;
Jason Gunthorpef27a0d52018-09-16 20:48:08 +0300106
107 struct rb_root_cached umem_tree;
108 /* Protects umem_tree */
109 struct rw_semaphore umem_rwsem;
Jason Gunthorpef27a0d52018-09-16 20:48:08 +0300110
111 struct mmu_notifier mn;
Jason Gunthorpef27a0d52018-09-16 20:48:08 +0300112 unsigned int odp_mrs_count;
113
114 struct list_head ucontext_list;
Jason Gunthorpe56ac9dd2018-09-16 20:48:11 +0300115 struct rcu_head rcu;
Jason Gunthorpef27a0d52018-09-16 20:48:08 +0300116};
117
Jason Gunthorpe41b4deea2018-09-16 20:48:05 +0300118int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
Moni Shouada6a4962019-01-22 09:16:08 +0200119struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root_umem,
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300120 unsigned long addr, size_t size);
121void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200122
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300123int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
124 u64 bcnt, u64 access_mask,
125 unsigned long current_seq);
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200126
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300127void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200128 u64 bound);
129
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300130typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
Haggai Eran882214e2014-12-11 17:04:18 +0200131 void *cookie);
132/*
133 * Call the callback on each ib_umem in the range. Returns the logical or of
134 * the return values of the functions called.
135 */
Davidlohr Buesof808c132017-09-08 16:15:08 -0700136int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
137 u64 start, u64 end,
Michal Hocko93065ac2018-08-21 21:52:33 -0700138 umem_call_back cb,
139 bool blockable, void *cookie);
Haggai Eran882214e2014-12-11 17:04:18 +0200140
Artemy Kovalyovd07d1d72017-01-18 16:58:07 +0200141/*
142 * Find first region intersecting with address range.
143 * Return NULL if not found
144 */
Davidlohr Buesof808c132017-09-08 16:15:08 -0700145struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
Artemy Kovalyovd07d1d72017-01-18 16:58:07 +0200146 u64 addr, u64 length);
Haggai Eran882214e2014-12-11 17:04:18 +0200147
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300148static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
Haggai Eran882214e2014-12-11 17:04:18 +0200149 unsigned long mmu_seq)
150{
151 /*
152 * This code is strongly based on the KVM code from
153 * mmu_notifier_retry. Should be called with
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300154 * the relevant locks taken (umem_odp->umem_mutex
Haggai Eran882214e2014-12-11 17:04:18 +0200155 * and the ucontext umem_mutex semaphore locked for read).
156 */
157
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300158 if (unlikely(umem_odp->notifiers_count))
Haggai Eran882214e2014-12-11 17:04:18 +0200159 return 1;
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300160 if (umem_odp->notifiers_seq != mmu_seq)
Haggai Eran882214e2014-12-11 17:04:18 +0200161 return 1;
162 return 0;
163}
164
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200165#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
166
Jason Gunthorpe41b4deea2018-09-16 20:48:05 +0300167static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200168{
169 return -EINVAL;
170}
171
Jason Gunthorpeb5231b02018-09-16 20:48:04 +0300172static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
Shachar Raindel8ada2c12014-12-11 17:04:17 +0200173
174#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
175
176#endif /* IB_UMEM_ODP_H */