blob: 9e37b7028ea4ddaf69e7a5d94c6f2b3daebd52f0 [file] [log] [blame]
Mike Marshall274dcf52015-07-17 10:38:13 -04001/*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * See COPYING in top-level directory.
5 */
6#include "protocol.h"
Mike Marshall575e9462015-12-04 12:56:14 -05007#include "orangefs-kernel.h"
8#include "orangefs-bufmap.h"
Mike Marshall274dcf52015-07-17 10:38:13 -04009
Al Viroea2c9c92016-02-13 21:01:21 -050010struct slot_map {
11 int c;
12 wait_queue_head_t q;
13 int count;
14 unsigned long *map;
15};
16
17static struct slot_map rw_map = {
18 .c = -1,
19 .q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q)
20};
21static struct slot_map readdir_map = {
22 .c = -1,
23 .q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q)
24};
25
26
27static void install(struct slot_map *m, int count, unsigned long *map)
28{
29 spin_lock(&m->q.lock);
30 m->c = m->count = count;
31 m->map = map;
32 wake_up_all_locked(&m->q);
33 spin_unlock(&m->q.lock);
34}
35
36static void mark_killed(struct slot_map *m)
37{
38 spin_lock(&m->q.lock);
39 m->c -= m->count + 1;
40 spin_unlock(&m->q.lock);
41}
42
43static void run_down(struct slot_map *m)
44{
45 DEFINE_WAIT(wait);
46 spin_lock(&m->q.lock);
47 if (m->c != -1) {
48 for (;;) {
49 if (likely(list_empty(&wait.task_list)))
Ingo Molnarac6424b2017-06-20 12:06:13 +020050 __add_wait_queue_entry_tail(&m->q, &wait);
Al Viroea2c9c92016-02-13 21:01:21 -050051 set_current_state(TASK_UNINTERRUPTIBLE);
52
53 if (m->c == -1)
54 break;
55
56 spin_unlock(&m->q.lock);
57 schedule();
58 spin_lock(&m->q.lock);
59 }
60 __remove_wait_queue(&m->q, &wait);
61 __set_current_state(TASK_RUNNING);
62 }
63 m->map = NULL;
64 spin_unlock(&m->q.lock);
65}
66
67static void put(struct slot_map *m, int slot)
68{
69 int v;
70 spin_lock(&m->q.lock);
71 __clear_bit(slot, m->map);
72 v = ++m->c;
73 if (unlikely(v == 1)) /* no free slots -> one free slot */
74 wake_up_locked(&m->q);
75 else if (unlikely(v == -1)) /* finished dying */
76 wake_up_all_locked(&m->q);
77 spin_unlock(&m->q.lock);
78}
79
80static int wait_for_free(struct slot_map *m)
81{
82 long left = slot_timeout_secs * HZ;
83 DEFINE_WAIT(wait);
84
85 do {
86 long n = left, t;
87 if (likely(list_empty(&wait.task_list)))
Ingo Molnarac6424b2017-06-20 12:06:13 +020088 __add_wait_queue_entry_tail_exclusive(&m->q, &wait);
Al Viroea2c9c92016-02-13 21:01:21 -050089 set_current_state(TASK_INTERRUPTIBLE);
90
91 if (m->c > 0)
92 break;
93
94 if (m->c < 0) {
95 /* we are waiting for map to be installed */
96 /* it would better be there soon, or we go away */
97 if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ)
98 n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ;
99 }
100 spin_unlock(&m->q.lock);
101 t = schedule_timeout(n);
102 spin_lock(&m->q.lock);
103 if (unlikely(!t) && n != left && m->c < 0)
104 left = t;
105 else
106 left = t + (left - n);
107 if (unlikely(signal_pending(current)))
108 left = -EINTR;
109 } while (left > 0);
110
111 if (!list_empty(&wait.task_list))
112 list_del(&wait.task_list);
113 else if (left <= 0 && waitqueue_active(&m->q))
114 __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL);
115 __set_current_state(TASK_RUNNING);
116
117 if (likely(left > 0))
118 return 0;
119
120 return left < 0 ? -EINTR : -ETIMEDOUT;
121}
122
123static int get(struct slot_map *m)
124{
125 int res = 0;
126 spin_lock(&m->q.lock);
127 if (unlikely(m->c <= 0))
128 res = wait_for_free(m);
129 if (likely(!res)) {
130 m->c--;
131 res = find_first_zero_bit(m->map, m->count);
132 __set_bit(res, m->map);
133 }
134 spin_unlock(&m->q.lock);
135 return res;
136}
Mike Marshall274dcf52015-07-17 10:38:13 -0400137
Martin Brandenburgbf89f582015-12-15 14:45:12 -0500138/* used to describe mapped buffers */
139struct orangefs_bufmap_desc {
140 void *uaddr; /* user space address pointer */
141 struct page **page_array; /* array of mapped pages */
142 int array_count; /* size of above arrays */
143 struct list_head list_link;
144};
145
Yi Liu8bb8aef2015-11-24 15:12:14 -0500146static struct orangefs_bufmap {
Mike Marshall274dcf52015-07-17 10:38:13 -0400147 int desc_size;
148 int desc_shift;
149 int desc_count;
150 int total_size;
151 int page_count;
152
153 struct page **page_array;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500154 struct orangefs_bufmap_desc *desc_array;
Mike Marshall274dcf52015-07-17 10:38:13 -0400155
156 /* array to track usage of buffer descriptors */
Al Viroea2c9c92016-02-13 21:01:21 -0500157 unsigned long *buffer_index_array;
Mike Marshall274dcf52015-07-17 10:38:13 -0400158
159 /* array to track usage of buffer descriptors for readdir */
Al Viroea2c9c92016-02-13 21:01:21 -0500160#define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG)
161 unsigned long readdir_index_array[N];
162#undef N
Yi Liu8bb8aef2015-11-24 15:12:14 -0500163} *__orangefs_bufmap;
Mike Marshall274dcf52015-07-17 10:38:13 -0400164
Yi Liu8bb8aef2015-11-24 15:12:14 -0500165static DEFINE_SPINLOCK(orangefs_bufmap_lock);
Mike Marshall274dcf52015-07-17 10:38:13 -0400166
167static void
Yi Liu8bb8aef2015-11-24 15:12:14 -0500168orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
Mike Marshall274dcf52015-07-17 10:38:13 -0400169{
170 int i;
171
172 for (i = 0; i < bufmap->page_count; i++)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300173 put_page(bufmap->page_array[i]);
Mike Marshall274dcf52015-07-17 10:38:13 -0400174}
175
176static void
Yi Liu8bb8aef2015-11-24 15:12:14 -0500177orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
Mike Marshall274dcf52015-07-17 10:38:13 -0400178{
179 kfree(bufmap->page_array);
180 kfree(bufmap->desc_array);
181 kfree(bufmap->buffer_index_array);
182 kfree(bufmap);
183}
184
Martin Brandenburgb09d10d2015-12-15 14:54:27 -0500185/*
186 * XXX: Can the size and shift change while the caller gives up the
187 * XXX: lock between calling this and doing something useful?
188 */
189
Martin Brandenburg765a75b2015-12-15 14:48:17 -0500190int orangefs_bufmap_size_query(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400191{
Martin Brandenburgb09d10d2015-12-15 14:54:27 -0500192 struct orangefs_bufmap *bufmap;
193 int size = 0;
Al Viro17804182016-02-13 11:16:37 -0500194 spin_lock(&orangefs_bufmap_lock);
195 bufmap = __orangefs_bufmap;
196 if (bufmap)
Martin Brandenburgb09d10d2015-12-15 14:54:27 -0500197 size = bufmap->desc_size;
Al Viro17804182016-02-13 11:16:37 -0500198 spin_unlock(&orangefs_bufmap_lock);
Mike Marshall274dcf52015-07-17 10:38:13 -0400199 return size;
200}
201
Martin Brandenburg765a75b2015-12-15 14:48:17 -0500202int orangefs_bufmap_shift_query(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400203{
Martin Brandenburgb09d10d2015-12-15 14:54:27 -0500204 struct orangefs_bufmap *bufmap;
205 int shift = 0;
Al Viro17804182016-02-13 11:16:37 -0500206 spin_lock(&orangefs_bufmap_lock);
207 bufmap = __orangefs_bufmap;
208 if (bufmap)
Martin Brandenburgb09d10d2015-12-15 14:54:27 -0500209 shift = bufmap->desc_shift;
Al Viro17804182016-02-13 11:16:37 -0500210 spin_unlock(&orangefs_bufmap_lock);
Mike Marshall274dcf52015-07-17 10:38:13 -0400211 return shift;
212}
213
214static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq);
215static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq);
216
217/*
Martin Brandenburg7d221482016-01-04 15:05:28 -0500218 * orangefs_get_bufmap_init
Mike Marshall274dcf52015-07-17 10:38:13 -0400219 *
220 * If bufmap_init is 1, then the shared memory system, including the
221 * buffer_index_array, is available. Otherwise, it is not.
222 *
223 * returns the value of bufmap_init
224 */
Martin Brandenburg7d221482016-01-04 15:05:28 -0500225int orangefs_get_bufmap_init(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400226{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500227 return __orangefs_bufmap ? 1 : 0;
Mike Marshall274dcf52015-07-17 10:38:13 -0400228}
229
230
Yi Liu8bb8aef2015-11-24 15:12:14 -0500231static struct orangefs_bufmap *
232orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
Mike Marshall274dcf52015-07-17 10:38:13 -0400233{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500234 struct orangefs_bufmap *bufmap;
Mike Marshall274dcf52015-07-17 10:38:13 -0400235
236 bufmap = kzalloc(sizeof(*bufmap), GFP_KERNEL);
237 if (!bufmap)
238 goto out;
239
Mike Marshall274dcf52015-07-17 10:38:13 -0400240 bufmap->total_size = user_desc->total_size;
241 bufmap->desc_count = user_desc->count;
242 bufmap->desc_size = user_desc->size;
243 bufmap->desc_shift = ilog2(bufmap->desc_size);
244
Mike Marshall274dcf52015-07-17 10:38:13 -0400245 bufmap->buffer_index_array =
Al Viroea2c9c92016-02-13 21:01:21 -0500246 kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL);
Mike Marshall274dcf52015-07-17 10:38:13 -0400247 if (!bufmap->buffer_index_array) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500248 gossip_err("orangefs: could not allocate %d buffer indices\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400249 bufmap->desc_count);
250 goto out_free_bufmap;
251 }
Mike Marshall274dcf52015-07-17 10:38:13 -0400252
253 bufmap->desc_array =
Yi Liu8bb8aef2015-11-24 15:12:14 -0500254 kcalloc(bufmap->desc_count, sizeof(struct orangefs_bufmap_desc),
Mike Marshall274dcf52015-07-17 10:38:13 -0400255 GFP_KERNEL);
256 if (!bufmap->desc_array) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500257 gossip_err("orangefs: could not allocate %d descriptors\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400258 bufmap->desc_count);
259 goto out_free_index_array;
260 }
261
262 bufmap->page_count = bufmap->total_size / PAGE_SIZE;
263
264 /* allocate storage to track our page mappings */
265 bufmap->page_array =
266 kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL);
267 if (!bufmap->page_array)
268 goto out_free_desc_array;
269
270 return bufmap;
271
272out_free_desc_array:
273 kfree(bufmap->desc_array);
274out_free_index_array:
275 kfree(bufmap->buffer_index_array);
276out_free_bufmap:
277 kfree(bufmap);
278out:
279 return NULL;
280}
281
282static int
Yi Liu8bb8aef2015-11-24 15:12:14 -0500283orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
284 struct ORANGEFS_dev_map_desc *user_desc)
Mike Marshall274dcf52015-07-17 10:38:13 -0400285{
286 int pages_per_desc = bufmap->desc_size / PAGE_SIZE;
287 int offset = 0, ret, i;
288
289 /* map the pages */
Al Viro16742f22015-10-08 20:10:00 -0400290 ret = get_user_pages_fast((unsigned long)user_desc->ptr,
291 bufmap->page_count, 1, bufmap->page_array);
Mike Marshall274dcf52015-07-17 10:38:13 -0400292
293 if (ret < 0)
294 return ret;
295
296 if (ret != bufmap->page_count) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500297 gossip_err("orangefs error: asked for %d pages, only got %d.\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400298 bufmap->page_count, ret);
299
300 for (i = 0; i < ret; i++) {
301 SetPageError(bufmap->page_array[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300302 put_page(bufmap->page_array[i]);
Mike Marshall274dcf52015-07-17 10:38:13 -0400303 }
304 return -ENOMEM;
305 }
306
307 /*
308 * ideally we want to get kernel space pointers for each page, but
309 * we can't kmap that many pages at once if highmem is being used.
310 * so instead, we just kmap/kunmap the page address each time the
311 * kaddr is needed.
312 */
313 for (i = 0; i < bufmap->page_count; i++)
314 flush_dcache_page(bufmap->page_array[i]);
315
316 /* build a list of available descriptors */
317 for (offset = 0, i = 0; i < bufmap->desc_count; i++) {
318 bufmap->desc_array[i].page_array = &bufmap->page_array[offset];
319 bufmap->desc_array[i].array_count = pages_per_desc;
320 bufmap->desc_array[i].uaddr =
321 (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE));
322 offset += pages_per_desc;
323 }
324
325 return 0;
326}
327
328/*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500329 * orangefs_bufmap_initialize()
Mike Marshall274dcf52015-07-17 10:38:13 -0400330 *
331 * initializes the mapped buffer interface
332 *
333 * returns 0 on success, -errno on failure
334 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500335int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc)
Mike Marshall274dcf52015-07-17 10:38:13 -0400336{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500337 struct orangefs_bufmap *bufmap;
Mike Marshall274dcf52015-07-17 10:38:13 -0400338 int ret = -EINVAL;
339
340 gossip_debug(GOSSIP_BUFMAP_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500341 "orangefs_bufmap_initialize: called (ptr ("
Mike Marshall274dcf52015-07-17 10:38:13 -0400342 "%p) sz (%d) cnt(%d).\n",
343 user_desc->ptr,
344 user_desc->size,
345 user_desc->count);
346
Dan Carpentereb82fbc2017-01-21 08:04:45 +0300347 if (user_desc->total_size < 0 ||
348 user_desc->size < 0 ||
349 user_desc->count < 0)
350 goto out;
351
Mike Marshall274dcf52015-07-17 10:38:13 -0400352 /*
353 * sanity check alignment and size of buffer that caller wants to
354 * work with
355 */
356 if (PAGE_ALIGN((unsigned long)user_desc->ptr) !=
357 (unsigned long)user_desc->ptr) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500358 gossip_err("orangefs error: memory alignment (front). %p\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400359 user_desc->ptr);
360 goto out;
361 }
362
363 if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size))
364 != (unsigned long)(user_desc->ptr + user_desc->total_size)) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500365 gossip_err("orangefs error: memory alignment (back).(%p + %d)\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400366 user_desc->ptr,
367 user_desc->total_size);
368 goto out;
369 }
370
371 if (user_desc->total_size != (user_desc->size * user_desc->count)) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500372 gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400373 user_desc->total_size,
374 user_desc->size,
375 user_desc->count);
376 goto out;
377 }
378
379 if ((user_desc->size % PAGE_SIZE) != 0) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500380 gossip_err("orangefs error: bufmap size not page size divisible (%d).\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400381 user_desc->size);
382 goto out;
383 }
384
385 ret = -ENOMEM;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500386 bufmap = orangefs_bufmap_alloc(user_desc);
Mike Marshall274dcf52015-07-17 10:38:13 -0400387 if (!bufmap)
388 goto out;
389
Yi Liu8bb8aef2015-11-24 15:12:14 -0500390 ret = orangefs_bufmap_map(bufmap, user_desc);
Mike Marshall274dcf52015-07-17 10:38:13 -0400391 if (ret)
392 goto out_free_bufmap;
393
394
Yi Liu8bb8aef2015-11-24 15:12:14 -0500395 spin_lock(&orangefs_bufmap_lock);
396 if (__orangefs_bufmap) {
397 spin_unlock(&orangefs_bufmap_lock);
398 gossip_err("orangefs: error: bufmap already initialized.\n");
Al Viroea2c9c92016-02-13 21:01:21 -0500399 ret = -EINVAL;
Mike Marshall274dcf52015-07-17 10:38:13 -0400400 goto out_unmap_bufmap;
401 }
Yi Liu8bb8aef2015-11-24 15:12:14 -0500402 __orangefs_bufmap = bufmap;
Al Viroea2c9c92016-02-13 21:01:21 -0500403 install(&rw_map,
404 bufmap->desc_count,
405 bufmap->buffer_index_array);
406 install(&readdir_map,
407 ORANGEFS_READDIR_DEFAULT_DESC_COUNT,
408 bufmap->readdir_index_array);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500409 spin_unlock(&orangefs_bufmap_lock);
Mike Marshall274dcf52015-07-17 10:38:13 -0400410
Mike Marshall274dcf52015-07-17 10:38:13 -0400411 gossip_debug(GOSSIP_BUFMAP_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500412 "orangefs_bufmap_initialize: exiting normally\n");
Mike Marshall274dcf52015-07-17 10:38:13 -0400413 return 0;
414
415out_unmap_bufmap:
Yi Liu8bb8aef2015-11-24 15:12:14 -0500416 orangefs_bufmap_unmap(bufmap);
Mike Marshall274dcf52015-07-17 10:38:13 -0400417out_free_bufmap:
Yi Liu8bb8aef2015-11-24 15:12:14 -0500418 orangefs_bufmap_free(bufmap);
Mike Marshall274dcf52015-07-17 10:38:13 -0400419out:
420 return ret;
421}
422
423/*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500424 * orangefs_bufmap_finalize()
Mike Marshall274dcf52015-07-17 10:38:13 -0400425 *
426 * shuts down the mapped buffer interface and releases any resources
427 * associated with it
428 *
429 * no return value
430 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500431void orangefs_bufmap_finalize(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400432{
Al Viroea2c9c92016-02-13 21:01:21 -0500433 struct orangefs_bufmap *bufmap = __orangefs_bufmap;
434 if (!bufmap)
435 return;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500436 gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n");
Al Viroea2c9c92016-02-13 21:01:21 -0500437 mark_killed(&rw_map);
438 mark_killed(&readdir_map);
Mike Marshall274dcf52015-07-17 10:38:13 -0400439 gossip_debug(GOSSIP_BUFMAP_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500440 "orangefs_bufmap_finalize: exiting normally\n");
Mike Marshall274dcf52015-07-17 10:38:13 -0400441}
442
Al Viroea2c9c92016-02-13 21:01:21 -0500443void orangefs_bufmap_run_down(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400444{
Al Viroea2c9c92016-02-13 21:01:21 -0500445 struct orangefs_bufmap *bufmap = __orangefs_bufmap;
446 if (!bufmap)
Mike Marshall274dcf52015-07-17 10:38:13 -0400447 return;
Al Viroea2c9c92016-02-13 21:01:21 -0500448 run_down(&rw_map);
449 run_down(&readdir_map);
450 spin_lock(&orangefs_bufmap_lock);
451 __orangefs_bufmap = NULL;
452 spin_unlock(&orangefs_bufmap_lock);
453 orangefs_bufmap_unmap(bufmap);
454 orangefs_bufmap_free(bufmap);
Mike Marshall274dcf52015-07-17 10:38:13 -0400455}
456
457/*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500458 * orangefs_bufmap_get()
Mike Marshall274dcf52015-07-17 10:38:13 -0400459 *
460 * gets a free mapped buffer descriptor, will sleep until one becomes
461 * available if necessary
462 *
Al Virob8a99a82016-02-16 20:10:26 -0500463 * returns slot on success, -errno on failure
Mike Marshall274dcf52015-07-17 10:38:13 -0400464 */
Al Virob8a99a82016-02-16 20:10:26 -0500465int orangefs_bufmap_get(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400466{
Al Virob8a99a82016-02-16 20:10:26 -0500467 return get(&rw_map);
Mike Marshall274dcf52015-07-17 10:38:13 -0400468}
469
470/*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500471 * orangefs_bufmap_put()
Mike Marshall274dcf52015-07-17 10:38:13 -0400472 *
473 * returns a mapped buffer descriptor to the collection
474 *
475 * no return value
476 */
Al Viro1357d062016-02-11 21:34:52 -0500477void orangefs_bufmap_put(int buffer_index)
Mike Marshall274dcf52015-07-17 10:38:13 -0400478{
Al Viroea2c9c92016-02-13 21:01:21 -0500479 put(&rw_map, buffer_index);
Mike Marshall274dcf52015-07-17 10:38:13 -0400480}
481
482/*
Martin Brandenburg7d221482016-01-04 15:05:28 -0500483 * orangefs_readdir_index_get()
Mike Marshall274dcf52015-07-17 10:38:13 -0400484 *
485 * gets a free descriptor, will sleep until one becomes
486 * available if necessary.
487 * Although the readdir buffers are not mapped into kernel space
488 * we could do that at a later point of time. Regardless, these
489 * indices are used by the client-core.
490 *
Al Virob8a99a82016-02-16 20:10:26 -0500491 * returns slot on success, -errno on failure
Mike Marshall274dcf52015-07-17 10:38:13 -0400492 */
Al Virob8a99a82016-02-16 20:10:26 -0500493int orangefs_readdir_index_get(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400494{
Al Virob8a99a82016-02-16 20:10:26 -0500495 return get(&readdir_map);
Mike Marshall274dcf52015-07-17 10:38:13 -0400496}
497
Al Viro82d37f12016-02-13 21:04:51 -0500498void orangefs_readdir_index_put(int buffer_index)
Mike Marshall274dcf52015-07-17 10:38:13 -0400499{
Al Viroea2c9c92016-02-13 21:01:21 -0500500 put(&readdir_map, buffer_index);
Mike Marshall274dcf52015-07-17 10:38:13 -0400501}
502
Mike Marshallb5e376e2015-12-11 10:50:42 -0500503/*
504 * we've been handed an iovec, we need to copy it to
505 * the shared memory descriptor at "buffer_index".
506 */
Al Virobf6bf602016-02-16 20:06:19 -0500507int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter,
Mike Marshall54804942015-10-05 13:44:24 -0400508 int buffer_index,
509 size_t size)
Mike Marshall274dcf52015-07-17 10:38:13 -0400510{
Al Virobf6bf602016-02-16 20:06:19 -0500511 struct orangefs_bufmap_desc *to;
Mike Marshall4d1c4402015-09-04 10:31:16 -0400512 int i;
Mike Marshall274dcf52015-07-17 10:38:13 -0400513
514 gossip_debug(GOSSIP_BUFMAP_DEBUG,
Al Viro34204fd2015-10-08 17:47:44 -0400515 "%s: buffer_index:%d: size:%zu:\n",
Mike Marshall4d1c4402015-09-04 10:31:16 -0400516 __func__, buffer_index, size);
Mike Marshall274dcf52015-07-17 10:38:13 -0400517
Al Virobf6bf602016-02-16 20:06:19 -0500518 to = &__orangefs_bufmap->desc_array[buffer_index];
Mike Marshall4d1c4402015-09-04 10:31:16 -0400519 for (i = 0; size; i++) {
Al Viro34204fd2015-10-08 17:47:44 -0400520 struct page *page = to->page_array[i];
521 size_t n = size;
522 if (n > PAGE_SIZE)
523 n = PAGE_SIZE;
Al Viro890559e2017-04-13 03:10:18 -0400524 if (copy_page_from_iter(page, 0, n, iter) != n)
Al Viro34204fd2015-10-08 17:47:44 -0400525 return -EFAULT;
526 size -= n;
Mike Marshall274dcf52015-07-17 10:38:13 -0400527 }
Al Viro34204fd2015-10-08 17:47:44 -0400528 return 0;
Mike Marshall274dcf52015-07-17 10:38:13 -0400529}
530
531/*
Mike Marshallb5e376e2015-12-11 10:50:42 -0500532 * we've been handed an iovec, we need to fill it from
533 * the shared memory descriptor at "buffer_index".
Mike Marshall274dcf52015-07-17 10:38:13 -0400534 */
Al Virobf6bf602016-02-16 20:06:19 -0500535int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter,
Al Viro5c278222015-10-08 17:43:58 -0400536 int buffer_index,
537 size_t size)
Mike Marshall274dcf52015-07-17 10:38:13 -0400538{
Al Virobf6bf602016-02-16 20:06:19 -0500539 struct orangefs_bufmap_desc *from;
Mike Marshall4d1c4402015-09-04 10:31:16 -0400540 int i;
Mike Marshall274dcf52015-07-17 10:38:13 -0400541
Al Virobf6bf602016-02-16 20:06:19 -0500542 from = &__orangefs_bufmap->desc_array[buffer_index];
Mike Marshall274dcf52015-07-17 10:38:13 -0400543 gossip_debug(GOSSIP_BUFMAP_DEBUG,
Al Viro5c278222015-10-08 17:43:58 -0400544 "%s: buffer_index:%d: size:%zu:\n",
545 __func__, buffer_index, size);
Mike Marshall274dcf52015-07-17 10:38:13 -0400546
Mike Marshall274dcf52015-07-17 10:38:13 -0400547
Al Viro5c278222015-10-08 17:43:58 -0400548 for (i = 0; size; i++) {
549 struct page *page = from->page_array[i];
550 size_t n = size;
551 if (n > PAGE_SIZE)
552 n = PAGE_SIZE;
553 n = copy_page_to_iter(page, 0, n, iter);
554 if (!n)
555 return -EFAULT;
556 size -= n;
Mike Marshall274dcf52015-07-17 10:38:13 -0400557 }
Al Viro5c278222015-10-08 17:43:58 -0400558 return 0;
Mike Marshall274dcf52015-07-17 10:38:13 -0400559}