blob: 1d0afb1dd77b5cfc78b46a88451b15c4f1a44069 [file] [log] [blame]
Andy Grovereff5f532009-02-24 15:30:29 +00001/*
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -07002 * Copyright (c) 2007, 2020 Oracle and/or its affiliates.
Andy Grovereff5f532009-02-24 15:30:29 +00003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/pagemap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grovereff5f532009-02-24 15:30:29 +000035#include <linux/rbtree.h>
36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
37
Andy Grover21f79af2010-01-12 12:57:27 -080038#include "rds.h"
Andy Grovereff5f532009-02-24 15:30:29 +000039
40/*
41 * XXX
42 * - build with sparse
Andy Grovereff5f532009-02-24 15:30:29 +000043 * - should we detect duplicate keys on a socket? hmm.
44 * - an rdma is an mlock, apply rlimit?
45 */
46
47/*
48 * get the number of pages by looking at the page indices that the start and
49 * end addresses fall in.
50 *
51 * Returns 0 if the vec is invalid. It is invalid if the number of bytes
52 * causes the address to wrap or overflows an unsigned int. This comes
53 * from being stored in the 'length' member of 'struct scatterlist'.
54 */
55static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
56{
57 if ((vec->addr + vec->bytes <= vec->addr) ||
58 (vec->bytes > (u64)UINT_MAX))
59 return 0;
60
61 return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
62 (vec->addr >> PAGE_SHIFT);
63}
64
65static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
66 struct rds_mr *insert)
67{
68 struct rb_node **p = &root->rb_node;
69 struct rb_node *parent = NULL;
70 struct rds_mr *mr;
71
72 while (*p) {
73 parent = *p;
74 mr = rb_entry(parent, struct rds_mr, r_rb_node);
75
76 if (key < mr->r_key)
77 p = &(*p)->rb_left;
78 else if (key > mr->r_key)
79 p = &(*p)->rb_right;
80 else
81 return mr;
82 }
83
84 if (insert) {
85 rb_link_node(&insert->r_rb_node, parent, p);
86 rb_insert_color(&insert->r_rb_node, root);
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -070087 kref_get(&insert->r_kref);
Andy Grovereff5f532009-02-24 15:30:29 +000088 }
89 return NULL;
90}
91
92/*
93 * Destroy the transport-specific part of a MR.
94 */
95static void rds_destroy_mr(struct rds_mr *mr)
96{
97 struct rds_sock *rs = mr->r_sock;
98 void *trans_private = NULL;
99 unsigned long flags;
100
101 rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700102 mr->r_key, kref_read(&mr->r_kref));
Andy Grovereff5f532009-02-24 15:30:29 +0000103
Andy Grovereff5f532009-02-24 15:30:29 +0000104 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
105 if (!RB_EMPTY_NODE(&mr->r_rb_node))
106 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
107 trans_private = mr->r_trans_private;
108 mr->r_trans_private = NULL;
109 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
110
111 if (trans_private)
112 mr->r_trans->free_mr(trans_private, mr->r_invalidate);
113}
114
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700115void __rds_put_mr_final(struct kref *kref)
Andy Grovereff5f532009-02-24 15:30:29 +0000116{
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700117 struct rds_mr *mr = container_of(kref, struct rds_mr, r_kref);
118
Andy Grovereff5f532009-02-24 15:30:29 +0000119 rds_destroy_mr(mr);
120 kfree(mr);
121}
122
123/*
124 * By the time this is called we can't have any more ioctls called on
125 * the socket so we don't need to worry about racing with others.
126 */
127void rds_rdma_drop_keys(struct rds_sock *rs)
128{
129 struct rds_mr *mr;
130 struct rb_node *node;
Tina Yang35b52c72010-04-01 14:09:00 -0700131 unsigned long flags;
Andy Grovereff5f532009-02-24 15:30:29 +0000132
133 /* Release any MRs associated with this socket */
Tina Yang35b52c72010-04-01 14:09:00 -0700134 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
Andy Grovereff5f532009-02-24 15:30:29 +0000135 while ((node = rb_first(&rs->rs_rdma_keys))) {
Geliang Tanga763f782016-12-20 22:02:18 +0800136 mr = rb_entry(node, struct rds_mr, r_rb_node);
Andy Grovereff5f532009-02-24 15:30:29 +0000137 if (mr->r_trans == rs->rs_transport)
138 mr->r_invalidate = 0;
Tina Yang35b52c72010-04-01 14:09:00 -0700139 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
140 RB_CLEAR_NODE(&mr->r_rb_node);
141 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700142 kref_put(&mr->r_kref, __rds_put_mr_final);
Tina Yang35b52c72010-04-01 14:09:00 -0700143 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
Andy Grovereff5f532009-02-24 15:30:29 +0000144 }
Tina Yang35b52c72010-04-01 14:09:00 -0700145 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
Andy Grovereff5f532009-02-24 15:30:29 +0000146
147 if (rs->rs_transport && rs->rs_transport->flush_mrs)
148 rs->rs_transport->flush_mrs();
149}
150
151/*
152 * Helper function to pin user pages.
153 */
154static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
155 struct page **pages, int write)
156{
Hans Westgaard Ryc4c86ab2020-01-15 14:43:38 +0200157 unsigned int gup_flags = FOLL_LONGTERM;
Andy Grovereff5f532009-02-24 15:30:29 +0000158 int ret;
159
Hans Westgaard Ryc4c86ab2020-01-15 14:43:38 +0200160 if (write)
161 gup_flags |= FOLL_WRITE;
Andy Grovereff5f532009-02-24 15:30:29 +0000162
Leon Romanovsky0d4597c2020-02-11 19:03:55 -0800163 ret = pin_user_pages_fast(user_addr, nr_pages, gup_flags, pages);
Andy Grover7acd4a72009-04-09 14:09:40 +0000164 if (ret >= 0 && ret < nr_pages) {
Leon Romanovsky0d4597c2020-02-11 19:03:55 -0800165 unpin_user_pages(pages, ret);
Andy Grovereff5f532009-02-24 15:30:29 +0000166 ret = -EFAULT;
167 }
168
169 return ret;
170}
171
172static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700173 u64 *cookie_ret, struct rds_mr **mr_ret,
174 struct rds_conn_path *cp)
Andy Grovereff5f532009-02-24 15:30:29 +0000175{
176 struct rds_mr *mr = NULL, *found;
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200177 struct scatterlist *sg = NULL;
Andy Grovereff5f532009-02-24 15:30:29 +0000178 unsigned int nr_pages;
179 struct page **pages = NULL;
Andy Grovereff5f532009-02-24 15:30:29 +0000180 void *trans_private;
181 unsigned long flags;
182 rds_rdma_cookie_t cookie;
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200183 unsigned int nents = 0;
184 int need_odp = 0;
Andy Grovereff5f532009-02-24 15:30:29 +0000185 long i;
186 int ret;
187
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700188 if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
Andy Grovereff5f532009-02-24 15:30:29 +0000189 ret = -ENOTCONN; /* XXX not a great errno */
190 goto out;
191 }
192
Andy Grover8690bfa2010-01-12 11:56:44 -0800193 if (!rs->rs_transport->get_mr) {
Andy Grovereff5f532009-02-24 15:30:29 +0000194 ret = -EOPNOTSUPP;
195 goto out;
196 }
197
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200198 /* If the combination of the addr and size requested for this memory
199 * region causes an integer overflow, return error.
200 */
201 if (((args->vec.addr + args->vec.bytes) < args->vec.addr) ||
202 PAGE_ALIGN(args->vec.addr + args->vec.bytes) <
203 (args->vec.addr + args->vec.bytes)) {
204 ret = -EINVAL;
205 goto out;
206 }
207
208 if (!can_do_mlock()) {
209 ret = -EPERM;
210 goto out;
211 }
212
Andy Grovereff5f532009-02-24 15:30:29 +0000213 nr_pages = rds_pages_in_vec(&args->vec);
214 if (nr_pages == 0) {
215 ret = -EINVAL;
216 goto out;
217 }
218
Avinash Repakaf9fb69a2016-02-29 15:30:57 -0800219 /* Restrict the size of mr irrespective of underlying transport
220 * To account for unaligned mr regions, subtract one from nr_pages
221 */
222 if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
223 ret = -EMSGSIZE;
224 goto out;
225 }
226
Andy Grovereff5f532009-02-24 15:30:29 +0000227 rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
228 args->vec.addr, args->vec.bytes, nr_pages);
229
230 /* XXX clamp nr_pages to limit the size of this alloc? */
231 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800232 if (!pages) {
Andy Grovereff5f532009-02-24 15:30:29 +0000233 ret = -ENOMEM;
234 goto out;
235 }
236
237 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800238 if (!mr) {
Andy Grovereff5f532009-02-24 15:30:29 +0000239 ret = -ENOMEM;
240 goto out;
241 }
242
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700243 kref_init(&mr->r_kref);
Andy Grovereff5f532009-02-24 15:30:29 +0000244 RB_CLEAR_NODE(&mr->r_rb_node);
245 mr->r_trans = rs->rs_transport;
246 mr->r_sock = rs;
247
248 if (args->flags & RDS_RDMA_USE_ONCE)
249 mr->r_use_once = 1;
250 if (args->flags & RDS_RDMA_INVALIDATE)
251 mr->r_invalidate = 1;
252 if (args->flags & RDS_RDMA_READWRITE)
253 mr->r_write = 1;
254
255 /*
256 * Pin the pages that make up the user buffer and transfer the page
257 * pointers to the mr's sg array. We check to see if we've mapped
258 * the whole region after transferring the partial page references
259 * to the sg array so that we can have one page ref cleanup path.
260 *
261 * For now we have no flag that tells us whether the mapping is
262 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
263 * the zero page.
264 */
Andy Groverd22faec2010-01-12 10:52:28 -0800265 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200266 if (ret == -EOPNOTSUPP) {
267 need_odp = 1;
268 } else if (ret <= 0) {
Andy Grovereff5f532009-02-24 15:30:29 +0000269 goto out;
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200270 } else {
271 nents = ret;
Julia Lawall3cec0362020-09-20 13:26:19 +0200272 sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200273 if (!sg) {
274 ret = -ENOMEM;
275 goto out;
276 }
277 WARN_ON(!nents);
278 sg_init_table(sg, nents);
Andy Grovereff5f532009-02-24 15:30:29 +0000279
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200280 /* Stick all pages into the scatterlist */
281 for (i = 0 ; i < nents; i++)
282 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
283
284 rdsdebug("RDS: trans_private nents is %u\n", nents);
Andy Grovereff5f532009-02-24 15:30:29 +0000285 }
Andy Grovereff5f532009-02-24 15:30:29 +0000286 /* Obtain a transport specific MR. If this succeeds, the
287 * s/g list is now owned by the MR.
288 * Note that dma_map() implies that pending writes are
289 * flushed to RAM, so no dma_sync is needed here. */
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200290 trans_private = rs->rs_transport->get_mr(
291 sg, nents, rs, &mr->r_key, cp ? cp->cp_conn : NULL,
292 args->vec.addr, args->vec.bytes,
293 need_odp ? ODP_ZEROBASED : ODP_NOT_NEEDED);
Andy Grovereff5f532009-02-24 15:30:29 +0000294
295 if (IS_ERR(trans_private)) {
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200296 /* In ODP case, we don't GUP pages, so don't need
297 * to release anything.
298 */
299 if (!need_odp) {
Leon Romanovsky0d4597c2020-02-11 19:03:55 -0800300 unpin_user_pages(pages, nr_pages);
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200301 kfree(sg);
302 }
Andy Grovereff5f532009-02-24 15:30:29 +0000303 ret = PTR_ERR(trans_private);
304 goto out;
305 }
306
307 mr->r_trans_private = trans_private;
308
309 rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
310 mr->r_key, (void *)(unsigned long) args->cookie_addr);
311
312 /* The user may pass us an unaligned address, but we can only
313 * map page aligned regions. So we keep the offset, and build
314 * a 64bit cookie containing <R_Key, offset> and pass that
315 * around. */
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200316 if (need_odp)
317 cookie = rds_rdma_make_cookie(mr->r_key, 0);
318 else
319 cookie = rds_rdma_make_cookie(mr->r_key,
320 args->vec.addr & ~PAGE_MASK);
Andy Grovereff5f532009-02-24 15:30:29 +0000321 if (cookie_ret)
322 *cookie_ret = cookie;
323
Leon Romanovsky0d4597c2020-02-11 19:03:55 -0800324 if (args->cookie_addr &&
325 put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) {
326 if (!need_odp) {
327 unpin_user_pages(pages, nr_pages);
328 kfree(sg);
329 }
Andy Grovereff5f532009-02-24 15:30:29 +0000330 ret = -EFAULT;
331 goto out;
332 }
333
334 /* Inserting the new MR into the rbtree bumps its
335 * reference count. */
336 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
337 found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
338 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
339
340 BUG_ON(found && found != mr);
341
342 rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
343 if (mr_ret) {
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700344 kref_get(&mr->r_kref);
Andy Grovereff5f532009-02-24 15:30:29 +0000345 *mr_ret = mr;
346 }
347
348 ret = 0;
349out:
350 kfree(pages);
351 if (mr)
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700352 kref_put(&mr->r_kref, __rds_put_mr_final);
Andy Grovereff5f532009-02-24 15:30:29 +0000353 return ret;
354}
355
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200356int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
Andy Grovereff5f532009-02-24 15:30:29 +0000357{
358 struct rds_get_mr_args args;
359
360 if (optlen != sizeof(struct rds_get_mr_args))
361 return -EINVAL;
362
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200363 if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_args)))
Andy Grovereff5f532009-02-24 15:30:29 +0000364 return -EFAULT;
365
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700366 return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
Andy Grovereff5f532009-02-24 15:30:29 +0000367}
368
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200369int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen)
Andy Grover244546f2009-10-30 08:54:53 +0000370{
371 struct rds_get_mr_for_dest_args args;
372 struct rds_get_mr_args new_args;
373
374 if (optlen != sizeof(struct rds_get_mr_for_dest_args))
375 return -EINVAL;
376
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200377 if (copy_from_sockptr(&args, optval,
Andy Grover244546f2009-10-30 08:54:53 +0000378 sizeof(struct rds_get_mr_for_dest_args)))
379 return -EFAULT;
380
381 /*
382 * Initially, just behave like get_mr().
383 * TODO: Implement get_mr as wrapper around this
384 * and deprecate it.
385 */
386 new_args.vec = args.vec;
387 new_args.cookie_addr = args.cookie_addr;
388 new_args.flags = args.flags;
389
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700390 return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
Andy Grover244546f2009-10-30 08:54:53 +0000391}
392
Andy Grovereff5f532009-02-24 15:30:29 +0000393/*
394 * Free the MR indicated by the given R_Key
395 */
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200396int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
Andy Grovereff5f532009-02-24 15:30:29 +0000397{
398 struct rds_free_mr_args args;
399 struct rds_mr *mr;
400 unsigned long flags;
401
402 if (optlen != sizeof(struct rds_free_mr_args))
403 return -EINVAL;
404
Christoph Hellwiga7b75c52020-07-23 08:09:07 +0200405 if (copy_from_sockptr(&args, optval, sizeof(struct rds_free_mr_args)))
Andy Grovereff5f532009-02-24 15:30:29 +0000406 return -EFAULT;
407
408 /* Special case - a null cookie means flush all unused MRs */
409 if (args.cookie == 0) {
410 if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
411 return -EINVAL;
412 rs->rs_transport->flush_mrs();
413 return 0;
414 }
415
416 /* Look up the MR given its R_key and remove it from the rbtree
417 * so nobody else finds it.
418 * This should also prevent races with rds_rdma_unuse.
419 */
420 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
421 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
422 if (mr) {
423 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
424 RB_CLEAR_NODE(&mr->r_rb_node);
425 if (args.flags & RDS_RDMA_INVALIDATE)
426 mr->r_invalidate = 1;
427 }
428 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
429
430 if (!mr)
431 return -EINVAL;
432
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700433 kref_put(&mr->r_kref, __rds_put_mr_final);
Andy Grovereff5f532009-02-24 15:30:29 +0000434 return 0;
435}
436
437/*
438 * This is called when we receive an extension header that
439 * tells us this MR was used. It allows us to implement
440 * use_once semantics
441 */
442void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
443{
444 struct rds_mr *mr;
445 unsigned long flags;
446 int zot_me = 0;
447
448 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
449 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
Andy Grover3ef13f32010-01-12 12:37:17 -0800450 if (!mr) {
Santosh Shilimkarc536a062016-07-03 19:14:10 -0700451 pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
452 r_key);
Andy Grover3ef13f32010-01-12 12:37:17 -0800453 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
454 return;
455 }
456
Ka-Cheong Poon2fabef42020-04-08 03:21:02 -0700457 /* Get a reference so that the MR won't go away before calling
458 * sync_mr() below.
459 */
460 kref_get(&mr->r_kref);
461
462 /* If it is going to be freed, remove it from the tree now so
463 * that no other thread can find it and free it.
464 */
Andy Grover3ef13f32010-01-12 12:37:17 -0800465 if (mr->r_use_once || force) {
Andy Grovereff5f532009-02-24 15:30:29 +0000466 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
467 RB_CLEAR_NODE(&mr->r_rb_node);
468 zot_me = 1;
Andy Grover3ef13f32010-01-12 12:37:17 -0800469 }
Andy Grovereff5f532009-02-24 15:30:29 +0000470 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
471
472 /* May have to issue a dma_sync on this memory region.
473 * Note we could avoid this if the operation was a RDMA READ,
474 * but at this point we can't tell. */
Andy Grover3ef13f32010-01-12 12:37:17 -0800475 if (mr->r_trans->sync_mr)
476 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
Andy Grovereff5f532009-02-24 15:30:29 +0000477
Ka-Cheong Poon2fabef42020-04-08 03:21:02 -0700478 /* Release the reference held above. */
479 kref_put(&mr->r_kref, __rds_put_mr_final);
480
Andy Grover3ef13f32010-01-12 12:37:17 -0800481 /* If the MR was marked as invalidate, this will
482 * trigger an async flush. */
Ka-Cheong Poon2fabef42020-04-08 03:21:02 -0700483 if (zot_me)
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700484 kref_put(&mr->r_kref, __rds_put_mr_final);
Andy Grovereff5f532009-02-24 15:30:29 +0000485}
486
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800487void rds_rdma_free_op(struct rm_rdma_op *ro)
Andy Grovereff5f532009-02-24 15:30:29 +0000488{
489 unsigned int i;
490
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200491 if (ro->op_odp_mr) {
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700492 kref_put(&ro->op_odp_mr->r_kref, __rds_put_mr_final);
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200493 } else {
494 for (i = 0; i < ro->op_nents; i++) {
495 struct page *page = sg_page(&ro->op_sg[i]);
Andy Grovereff5f532009-02-24 15:30:29 +0000496
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200497 /* Mark page dirty if it was possibly modified, which
498 * is the case for a RDMA_READ which copies from remote
499 * to local memory
500 */
Leon Romanovsky0d4597c2020-02-11 19:03:55 -0800501 unpin_user_pages_dirty_lock(&page, 1, !ro->op_write);
Andy Grover561c7df2010-03-11 13:50:06 +0000502 }
Andy Grovereff5f532009-02-24 15:30:29 +0000503 }
504
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800505 kfree(ro->op_notifier);
506 ro->op_notifier = NULL;
507 ro->op_active = 0;
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200508 ro->op_odp_mr = NULL;
Andy Groverff87e972010-01-12 14:13:15 -0800509}
510
Andy Groverd0ab25a2010-01-27 16:15:48 -0800511void rds_atomic_free_op(struct rm_atomic_op *ao)
512{
513 struct page *page = sg_page(ao->op_sg);
514
515 /* Mark page dirty if it was possibly modified, which
516 * is the case for a RDMA_READ which copies from remote
517 * to local memory */
Leon Romanovsky0d4597c2020-02-11 19:03:55 -0800518 unpin_user_pages_dirty_lock(&page, 1, true);
Andy Groverd0ab25a2010-01-27 16:15:48 -0800519
520 kfree(ao->op_notifier);
521 ao->op_notifier = NULL;
522 ao->op_active = 0;
523}
524
525
Andy Groverff87e972010-01-12 14:13:15 -0800526/*
Andy Groverfc8162e2010-10-28 15:40:58 +0000527 * Count the number of pages needed to describe an incoming iovec array.
Andy Groverff87e972010-01-12 14:13:15 -0800528 */
Andy Groverfc8162e2010-10-28 15:40:58 +0000529static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
530{
531 int tot_pages = 0;
532 unsigned int nr_pages;
533 unsigned int i;
534
535 /* figure out the number of pages in the vector */
536 for (i = 0; i < nr_iovecs; i++) {
537 nr_pages = rds_pages_in_vec(&iov[i]);
538 if (nr_pages == 0)
539 return -EINVAL;
540
541 tot_pages += nr_pages;
542
543 /*
544 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
545 * so tot_pages cannot overflow without first going negative.
546 */
547 if (tot_pages < 0)
548 return -EINVAL;
549 }
550
551 return tot_pages;
552}
553
shamir rabinovitchea010072018-12-16 09:01:08 +0200554int rds_rdma_extra_size(struct rds_rdma_args *args,
555 struct rds_iov_vector *iov)
Andy Groverff87e972010-01-12 14:13:15 -0800556{
shamir rabinovitchea010072018-12-16 09:01:08 +0200557 struct rds_iovec *vec;
Andy Groverff87e972010-01-12 14:13:15 -0800558 struct rds_iovec __user *local_vec;
Andy Groverfc8162e2010-10-28 15:40:58 +0000559 int tot_pages = 0;
Andy Groverff87e972010-01-12 14:13:15 -0800560 unsigned int nr_pages;
561 unsigned int i;
562
563 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
564
Mohamed Ghannamc0955082018-01-02 19:44:34 +0000565 if (args->nr_local == 0)
566 return -EINVAL;
567
shamir rabinovitchea010072018-12-16 09:01:08 +0200568 iov->iov = kcalloc(args->nr_local,
569 sizeof(struct rds_iovec),
570 GFP_KERNEL);
571 if (!iov->iov)
572 return -ENOMEM;
Andy Groverff87e972010-01-12 14:13:15 -0800573
shamir rabinovitchea010072018-12-16 09:01:08 +0200574 vec = &iov->iov[0];
575
576 if (copy_from_user(vec, local_vec, args->nr_local *
577 sizeof(struct rds_iovec)))
578 return -EFAULT;
579 iov->len = args->nr_local;
580
581 /* figure out the number of pages in the vector */
582 for (i = 0; i < args->nr_local; i++, vec++) {
583
584 nr_pages = rds_pages_in_vec(vec);
Andy Groverff87e972010-01-12 14:13:15 -0800585 if (nr_pages == 0)
586 return -EINVAL;
587
588 tot_pages += nr_pages;
Linus Torvalds1b1f6932010-10-28 15:40:55 +0000589
590 /*
591 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
592 * so tot_pages cannot overflow without first going negative.
593 */
Andy Groverfc8162e2010-10-28 15:40:58 +0000594 if (tot_pages < 0)
Linus Torvalds1b1f6932010-10-28 15:40:55 +0000595 return -EINVAL;
Andy Groverff87e972010-01-12 14:13:15 -0800596 }
597
Andy Groverfc8162e2010-10-28 15:40:58 +0000598 return tot_pages * sizeof(struct scatterlist);
Andy Grovereff5f532009-02-24 15:30:29 +0000599}
600
601/*
Andy Grover43248792010-01-27 16:07:30 -0800602 * The application asks for a RDMA transfer.
603 * Extract all arguments and set up the rdma_op
Andy Grovereff5f532009-02-24 15:30:29 +0000604 */
Andy Grover43248792010-01-27 16:07:30 -0800605int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
shamir rabinovitchea010072018-12-16 09:01:08 +0200606 struct cmsghdr *cmsg,
607 struct rds_iov_vector *vec)
Andy Grovereff5f532009-02-24 15:30:29 +0000608{
Andy Grover43248792010-01-27 16:07:30 -0800609 struct rds_rdma_args *args;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800610 struct rm_rdma_op *op = &rm->rdma;
Dan Carpenter9b9d2e02010-09-18 13:42:25 +0000611 int nr_pages;
Andy Grovereff5f532009-02-24 15:30:29 +0000612 unsigned int nr_bytes;
613 struct page **pages = NULL;
shamir rabinovitchea010072018-12-16 09:01:08 +0200614 struct rds_iovec *iovs;
Andy Grovereff5f532009-02-24 15:30:29 +0000615 unsigned int i, j;
Andy Groverff87e972010-01-12 14:13:15 -0800616 int ret = 0;
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200617 bool odp_supported = true;
Andy Grovereff5f532009-02-24 15:30:29 +0000618
Andy Grover43248792010-01-27 16:07:30 -0800619 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800620 || rm->rdma.op_active)
Andy Grover43248792010-01-27 16:07:30 -0800621 return -EINVAL;
622
623 args = CMSG_DATA(cmsg);
Andy Grovereff5f532009-02-24 15:30:29 +0000624
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700625 if (ipv6_addr_any(&rs->rs_bound_addr)) {
Andy Grovereff5f532009-02-24 15:30:29 +0000626 ret = -ENOTCONN; /* XXX not a great errno */
Cong Wangdee49f22014-10-14 12:35:08 -0700627 goto out_ret;
Andy Grovereff5f532009-02-24 15:30:29 +0000628 }
629
Dan Rosenberg218854a2010-11-17 06:37:16 +0000630 if (args->nr_local > UIO_MAXIOV) {
Andy Grovereff5f532009-02-24 15:30:29 +0000631 ret = -EMSGSIZE;
Cong Wangdee49f22014-10-14 12:35:08 -0700632 goto out_ret;
Andy Grovereff5f532009-02-24 15:30:29 +0000633 }
634
shamir rabinovitchea010072018-12-16 09:01:08 +0200635 if (vec->len != args->nr_local) {
636 ret = -EINVAL;
637 goto out_ret;
Andy Groverfc8162e2010-10-28 15:40:58 +0000638 }
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200639 /* odp-mr is not supported for multiple requests within one message */
640 if (args->nr_local != 1)
641 odp_supported = false;
Andy Groverfc8162e2010-10-28 15:40:58 +0000642
shamir rabinovitchea010072018-12-16 09:01:08 +0200643 iovs = vec->iov;
Andy Groverfc8162e2010-10-28 15:40:58 +0000644
645 nr_pages = rds_rdma_pages(iovs, args->nr_local);
Andy Grovera09f69c2010-10-28 15:40:56 +0000646 if (nr_pages < 0) {
647 ret = -EINVAL;
shamir rabinovitchea010072018-12-16 09:01:08 +0200648 goto out_ret;
Andy Grovera09f69c2010-10-28 15:40:56 +0000649 }
Andy Grovereff5f532009-02-24 15:30:29 +0000650
Andy Groverff87e972010-01-12 14:13:15 -0800651 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800652 if (!pages) {
Andy Grovereff5f532009-02-24 15:30:29 +0000653 ret = -ENOMEM;
shamir rabinovitchea010072018-12-16 09:01:08 +0200654 goto out_ret;
Andy Grovereff5f532009-02-24 15:30:29 +0000655 }
656
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800657 op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
658 op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
659 op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
Andy Grover2c3a5f92010-03-01 16:10:40 -0800660 op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800661 op->op_active = 1;
662 op->op_recverr = rs->rs_recverr;
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200663 op->op_odp_mr = NULL;
664
Andy Grovereff5f532009-02-24 15:30:29 +0000665 WARN_ON(!nr_pages);
Jason Gunthorpe7dba9202020-04-14 20:02:07 -0300666 op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
667 if (IS_ERR(op->op_sg)) {
668 ret = PTR_ERR(op->op_sg);
shamir rabinovitchea010072018-12-16 09:01:08 +0200669 goto out_pages;
Jason Gunthorpe7dba9202020-04-14 20:02:07 -0300670 }
Andy Grovereff5f532009-02-24 15:30:29 +0000671
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800672 if (op->op_notify || op->op_recverr) {
Andy Grovereff5f532009-02-24 15:30:29 +0000673 /* We allocate an uninitialized notifier here, because
674 * we don't want to do that in the completion handler. We
675 * would have to use GFP_ATOMIC there, and don't want to deal
676 * with failed allocations.
677 */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800678 op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
679 if (!op->op_notifier) {
Andy Grovereff5f532009-02-24 15:30:29 +0000680 ret = -ENOMEM;
shamir rabinovitchea010072018-12-16 09:01:08 +0200681 goto out_pages;
Andy Grovereff5f532009-02-24 15:30:29 +0000682 }
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800683 op->op_notifier->n_user_token = args->user_token;
684 op->op_notifier->n_status = RDS_RDMA_SUCCESS;
Andy Grovereff5f532009-02-24 15:30:29 +0000685 }
686
687 /* The cookie contains the R_Key of the remote memory region, and
688 * optionally an offset into it. This is how we implement RDMA into
689 * unaligned memory.
690 * When setting up the RDMA, we need to add that offset to the
691 * destination address (which is really an offset into the MR)
692 * FIXME: We may want to move this into ib_rdma.c
693 */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800694 op->op_rkey = rds_rdma_cookie_key(args->cookie);
695 op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
Andy Grovereff5f532009-02-24 15:30:29 +0000696
697 nr_bytes = 0;
698
699 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
700 (unsigned long long)args->nr_local,
701 (unsigned long long)args->remote_vec.addr,
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800702 op->op_rkey);
Andy Grovereff5f532009-02-24 15:30:29 +0000703
704 for (i = 0; i < args->nr_local; i++) {
Andy Groverfc8162e2010-10-28 15:40:58 +0000705 struct rds_iovec *iov = &iovs[i];
706 /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
707 unsigned int nr = rds_pages_in_vec(iov);
Andy Grovereff5f532009-02-24 15:30:29 +0000708
Andy Groverfc8162e2010-10-28 15:40:58 +0000709 rs->rs_user_addr = iov->addr;
710 rs->rs_user_bytes = iov->bytes;
Andy Grovereff5f532009-02-24 15:30:29 +0000711
Andy Grovereff5f532009-02-24 15:30:29 +0000712 /* If it's a WRITE operation, we want to pin the pages for reading.
713 * If it's a READ operation, we need to pin the pages for writing.
714 */
Andy Groverfc8162e2010-10-28 15:40:58 +0000715 ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200716 if ((!odp_supported && ret <= 0) ||
717 (odp_supported && ret <= 0 && ret != -EOPNOTSUPP))
shamir rabinovitchea010072018-12-16 09:01:08 +0200718 goto out_pages;
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200719
720 if (ret == -EOPNOTSUPP) {
721 struct rds_mr *local_odp_mr;
722
723 if (!rs->rs_transport->get_mr) {
724 ret = -EOPNOTSUPP;
725 goto out_pages;
726 }
727 local_odp_mr =
728 kzalloc(sizeof(*local_odp_mr), GFP_KERNEL);
729 if (!local_odp_mr) {
730 ret = -ENOMEM;
731 goto out_pages;
732 }
733 RB_CLEAR_NODE(&local_odp_mr->r_rb_node);
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700734 kref_init(&local_odp_mr->r_kref);
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200735 local_odp_mr->r_trans = rs->rs_transport;
736 local_odp_mr->r_sock = rs;
737 local_odp_mr->r_trans_private =
738 rs->rs_transport->get_mr(
739 NULL, 0, rs, &local_odp_mr->r_key, NULL,
740 iov->addr, iov->bytes, ODP_VIRTUAL);
741 if (IS_ERR(local_odp_mr->r_trans_private)) {
742 ret = IS_ERR(local_odp_mr->r_trans_private);
743 rdsdebug("get_mr ret %d %p\"", ret,
744 local_odp_mr->r_trans_private);
745 kfree(local_odp_mr);
746 ret = -EOPNOTSUPP;
747 goto out_pages;
748 }
749 rdsdebug("Need odp; local_odp_mr %p trans_private %p\n",
750 local_odp_mr, local_odp_mr->r_trans_private);
751 op->op_odp_mr = local_odp_mr;
752 op->op_odp_addr = iov->addr;
753 }
Andy Grovereff5f532009-02-24 15:30:29 +0000754
Andy Groverfc8162e2010-10-28 15:40:58 +0000755 rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
756 nr_bytes, nr, iov->bytes, iov->addr);
Andy Grovereff5f532009-02-24 15:30:29 +0000757
Andy Groverfc8162e2010-10-28 15:40:58 +0000758 nr_bytes += iov->bytes;
Andy Grovereff5f532009-02-24 15:30:29 +0000759
760 for (j = 0; j < nr; j++) {
Andy Groverfc8162e2010-10-28 15:40:58 +0000761 unsigned int offset = iov->addr & ~PAGE_MASK;
Andy Groverff87e972010-01-12 14:13:15 -0800762 struct scatterlist *sg;
Andy Grovereff5f532009-02-24 15:30:29 +0000763
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800764 sg = &op->op_sg[op->op_nents + j];
Andy Grovereff5f532009-02-24 15:30:29 +0000765 sg_set_page(sg, pages[j],
Andy Groverfc8162e2010-10-28 15:40:58 +0000766 min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
Andy Grovereff5f532009-02-24 15:30:29 +0000767 offset);
768
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200769 sg_dma_len(sg) = sg->length;
Andy Groverfc8162e2010-10-28 15:40:58 +0000770 rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
771 sg->offset, sg->length, iov->addr, iov->bytes);
Andy Grovereff5f532009-02-24 15:30:29 +0000772
Andy Groverfc8162e2010-10-28 15:40:58 +0000773 iov->addr += sg->length;
774 iov->bytes -= sg->length;
Andy Grovereff5f532009-02-24 15:30:29 +0000775 }
776
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800777 op->op_nents += nr;
Andy Grovereff5f532009-02-24 15:30:29 +0000778 }
779
Andy Grovereff5f532009-02-24 15:30:29 +0000780 if (nr_bytes > args->remote_vec.bytes) {
781 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
782 nr_bytes,
783 (unsigned int) args->remote_vec.bytes);
784 ret = -EINVAL;
shamir rabinovitchea010072018-12-16 09:01:08 +0200785 goto out_pages;
Andy Grovereff5f532009-02-24 15:30:29 +0000786 }
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800787 op->op_bytes = nr_bytes;
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200788 ret = 0;
Andy Grovereff5f532009-02-24 15:30:29 +0000789
shamir rabinovitchea010072018-12-16 09:01:08 +0200790out_pages:
Andy Grovereff5f532009-02-24 15:30:29 +0000791 kfree(pages);
Cong Wangdee49f22014-10-14 12:35:08 -0700792out_ret:
Andy Groverff87e972010-01-12 14:13:15 -0800793 if (ret)
794 rds_rdma_free_op(op);
Andy Groverf4a3fc02010-10-28 15:40:57 +0000795 else
796 rds_stats_inc(s_send_rdma);
Andy Grover43248792010-01-27 16:07:30 -0800797
798 return ret;
Andy Grovereff5f532009-02-24 15:30:29 +0000799}
800
801/*
802 * The application wants us to pass an RDMA destination (aka MR)
803 * to the remote
804 */
805int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
806 struct cmsghdr *cmsg)
807{
808 unsigned long flags;
809 struct rds_mr *mr;
810 u32 r_key;
811 int err = 0;
812
Joe Perchesf64f9e72009-11-29 16:55:45 -0800813 if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
814 rm->m_rdma_cookie != 0)
Andy Grovereff5f532009-02-24 15:30:29 +0000815 return -EINVAL;
816
817 memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
818
819 /* We are reusing a previously mapped MR here. Most likely, the
820 * application has written to the buffer, so we need to explicitly
821 * flush those writes to RAM. Otherwise the HCA may not see them
822 * when doing a DMA from that buffer.
823 */
824 r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
825
826 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
827 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800828 if (!mr)
Andy Grovereff5f532009-02-24 15:30:29 +0000829 err = -EINVAL; /* invalid r_key */
830 else
Ka-Cheong Poone228a5d2020-04-08 03:21:01 -0700831 kref_get(&mr->r_kref);
Andy Grovereff5f532009-02-24 15:30:29 +0000832 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
833
834 if (mr) {
Hans Westgaard Ry2eafa172020-01-15 14:43:39 +0200835 mr->r_trans->sync_mr(mr->r_trans_private,
836 DMA_TO_DEVICE);
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800837 rm->rdma.op_rdma_mr = mr;
Andy Grovereff5f532009-02-24 15:30:29 +0000838 }
839 return err;
840}
841
842/*
843 * The application passes us an address range it wants to enable RDMA
844 * to/from. We map the area, and save the <R_Key,offset> pair
845 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
846 * in an extension header.
847 */
848int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
849 struct cmsghdr *cmsg)
850{
Joe Perchesf64f9e72009-11-29 16:55:45 -0800851 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
852 rm->m_rdma_cookie != 0)
Andy Grovereff5f532009-02-24 15:30:29 +0000853 return -EINVAL;
854
Avinash Repaka9e630bc2018-07-24 20:31:58 -0700855 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
856 &rm->rdma.op_rdma_mr, rm->m_conn_path);
Andy Grovereff5f532009-02-24 15:30:29 +0000857}
Andy Grover15133f62010-01-12 14:33:38 -0800858
859/*
860 * Fill in rds_message for an atomic request.
861 */
862int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
863 struct cmsghdr *cmsg)
864{
865 struct page *page = NULL;
866 struct rds_atomic_args *args;
867 int ret = 0;
868
869 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
870 || rm->atomic.op_active)
871 return -EINVAL;
872
873 args = CMSG_DATA(cmsg);
874
Andy Grover20c72bd2010-08-25 05:51:28 -0700875 /* Nonmasked & masked cmsg ops converted to masked hw ops */
876 switch (cmsg->cmsg_type) {
877 case RDS_CMSG_ATOMIC_FADD:
Andy Grover15133f62010-01-12 14:33:38 -0800878 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
Andy Grover20c72bd2010-08-25 05:51:28 -0700879 rm->atomic.op_m_fadd.add = args->fadd.add;
880 rm->atomic.op_m_fadd.nocarry_mask = 0;
881 break;
882 case RDS_CMSG_MASKED_ATOMIC_FADD:
883 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
884 rm->atomic.op_m_fadd.add = args->m_fadd.add;
885 rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
886 break;
887 case RDS_CMSG_ATOMIC_CSWP:
888 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
889 rm->atomic.op_m_cswp.compare = args->cswp.compare;
890 rm->atomic.op_m_cswp.swap = args->cswp.swap;
891 rm->atomic.op_m_cswp.compare_mask = ~0;
892 rm->atomic.op_m_cswp.swap_mask = ~0;
893 break;
894 case RDS_CMSG_MASKED_ATOMIC_CSWP:
895 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
896 rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
897 rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
898 rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
899 rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
900 break;
901 default:
902 BUG(); /* should never happen */
Andy Grover15133f62010-01-12 14:33:38 -0800903 }
904
Andy Grover15133f62010-01-12 14:33:38 -0800905 rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
Andy Grover2c3a5f92010-03-01 16:10:40 -0800906 rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
Andy Grover7e3bd652010-03-01 16:04:59 -0800907 rm->atomic.op_active = 1;
Andy Grover15133f62010-01-12 14:33:38 -0800908 rm->atomic.op_recverr = rs->rs_recverr;
Jason Gunthorpe7dba9202020-04-14 20:02:07 -0300909 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
910 if (IS_ERR(rm->atomic.op_sg)) {
911 ret = PTR_ERR(rm->atomic.op_sg);
Andy Groverd139ff02010-10-28 15:40:59 +0000912 goto err;
Jason Gunthorpe7dba9202020-04-14 20:02:07 -0300913 }
Andy Grover15133f62010-01-12 14:33:38 -0800914
915 /* verify 8 byte-aligned */
916 if (args->local_addr & 0x7) {
917 ret = -EFAULT;
918 goto err;
919 }
920
921 ret = rds_pin_pages(args->local_addr, 1, &page, 1);
922 if (ret != 1)
923 goto err;
924 ret = 0;
925
926 sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
927
928 if (rm->atomic.op_notify || rm->atomic.op_recverr) {
929 /* We allocate an uninitialized notifier here, because
930 * we don't want to do that in the completion handler. We
931 * would have to use GFP_ATOMIC there, and don't want to deal
932 * with failed allocations.
933 */
934 rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
935 if (!rm->atomic.op_notifier) {
936 ret = -ENOMEM;
937 goto err;
938 }
939
940 rm->atomic.op_notifier->n_user_token = args->user_token;
941 rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
942 }
943
Andy Grover40589e72010-01-12 10:50:48 -0800944 rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
Andy Grover15133f62010-01-12 14:33:38 -0800945 rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
946
Andy Grover15133f62010-01-12 14:33:38 -0800947 return ret;
948err:
949 if (page)
Leon Romanovsky0d4597c2020-02-11 19:03:55 -0800950 unpin_user_page(page);
Mohamed Ghannam7d11f772018-01-03 21:06:06 +0000951 rm->atomic.op_active = 0;
Andy Grover15133f62010-01-12 14:33:38 -0800952 kfree(rm->atomic.op_notifier);
953
954 return ret;
955}