blob: 2bb916d68576fa01e4431017fa812907e117049b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Mike Marshall274dcf52015-07-17 10:38:13 -04002/*
3 * (C) 2001 Clemson University and The University of Chicago
4 *
5 * See COPYING in top-level directory.
6 */
7#include "protocol.h"
Mike Marshall575e9462015-12-04 12:56:14 -05008#include "orangefs-kernel.h"
9#include "orangefs-bufmap.h"
Mike Marshall274dcf52015-07-17 10:38:13 -040010
Al Viroea2c9c92016-02-13 21:01:21 -050011struct slot_map {
12 int c;
13 wait_queue_head_t q;
14 int count;
15 unsigned long *map;
16};
17
18static struct slot_map rw_map = {
19 .c = -1,
20 .q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q)
21};
22static struct slot_map readdir_map = {
23 .c = -1,
24 .q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q)
25};
26
27
28static void install(struct slot_map *m, int count, unsigned long *map)
29{
30 spin_lock(&m->q.lock);
31 m->c = m->count = count;
32 m->map = map;
33 wake_up_all_locked(&m->q);
34 spin_unlock(&m->q.lock);
35}
36
37static void mark_killed(struct slot_map *m)
38{
39 spin_lock(&m->q.lock);
40 m->c -= m->count + 1;
41 spin_unlock(&m->q.lock);
42}
43
44static void run_down(struct slot_map *m)
45{
46 DEFINE_WAIT(wait);
47 spin_lock(&m->q.lock);
48 if (m->c != -1) {
49 for (;;) {
Ingo Molnar2055da92017-06-20 12:06:46 +020050 if (likely(list_empty(&wait.entry)))
Ingo Molnarac6424b2017-06-20 12:06:13 +020051 __add_wait_queue_entry_tail(&m->q, &wait);
Al Viroea2c9c92016-02-13 21:01:21 -050052 set_current_state(TASK_UNINTERRUPTIBLE);
53
54 if (m->c == -1)
55 break;
56
57 spin_unlock(&m->q.lock);
58 schedule();
59 spin_lock(&m->q.lock);
60 }
61 __remove_wait_queue(&m->q, &wait);
62 __set_current_state(TASK_RUNNING);
63 }
64 m->map = NULL;
65 spin_unlock(&m->q.lock);
66}
67
68static void put(struct slot_map *m, int slot)
69{
70 int v;
71 spin_lock(&m->q.lock);
72 __clear_bit(slot, m->map);
73 v = ++m->c;
David Reynoldsc2676ef2018-03-08 18:54:12 -050074 if (v > 0)
Al Viroea2c9c92016-02-13 21:01:21 -050075 wake_up_locked(&m->q);
David Reynoldsc2676ef2018-03-08 18:54:12 -050076 if (unlikely(v == -1)) /* finished dying */
Al Viroea2c9c92016-02-13 21:01:21 -050077 wake_up_all_locked(&m->q);
78 spin_unlock(&m->q.lock);
79}
80
81static int wait_for_free(struct slot_map *m)
82{
83 long left = slot_timeout_secs * HZ;
84 DEFINE_WAIT(wait);
85
86 do {
87 long n = left, t;
Ingo Molnar2055da92017-06-20 12:06:46 +020088 if (likely(list_empty(&wait.entry)))
Ingo Molnarac6424b2017-06-20 12:06:13 +020089 __add_wait_queue_entry_tail_exclusive(&m->q, &wait);
Al Viroea2c9c92016-02-13 21:01:21 -050090 set_current_state(TASK_INTERRUPTIBLE);
91
92 if (m->c > 0)
93 break;
94
95 if (m->c < 0) {
96 /* we are waiting for map to be installed */
97 /* it would better be there soon, or we go away */
98 if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ)
99 n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ;
100 }
101 spin_unlock(&m->q.lock);
102 t = schedule_timeout(n);
103 spin_lock(&m->q.lock);
104 if (unlikely(!t) && n != left && m->c < 0)
105 left = t;
106 else
107 left = t + (left - n);
Davidlohr Bueso08d405c2019-01-03 15:28:58 -0800108 if (signal_pending(current))
Al Viroea2c9c92016-02-13 21:01:21 -0500109 left = -EINTR;
110 } while (left > 0);
111
Ingo Molnar2055da92017-06-20 12:06:46 +0200112 if (!list_empty(&wait.entry))
113 list_del(&wait.entry);
Al Viroea2c9c92016-02-13 21:01:21 -0500114 else if (left <= 0 && waitqueue_active(&m->q))
115 __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL);
116 __set_current_state(TASK_RUNNING);
117
118 if (likely(left > 0))
119 return 0;
120
121 return left < 0 ? -EINTR : -ETIMEDOUT;
122}
123
124static int get(struct slot_map *m)
125{
126 int res = 0;
127 spin_lock(&m->q.lock);
128 if (unlikely(m->c <= 0))
129 res = wait_for_free(m);
130 if (likely(!res)) {
131 m->c--;
132 res = find_first_zero_bit(m->map, m->count);
133 __set_bit(res, m->map);
134 }
135 spin_unlock(&m->q.lock);
136 return res;
137}
Mike Marshall274dcf52015-07-17 10:38:13 -0400138
Martin Brandenburgbf89f582015-12-15 14:45:12 -0500139/* used to describe mapped buffers */
140struct orangefs_bufmap_desc {
Mike Marshall817e9b42018-06-01 12:19:45 -0400141 void __user *uaddr; /* user space address pointer */
Martin Brandenburgbf89f582015-12-15 14:45:12 -0500142 struct page **page_array; /* array of mapped pages */
143 int array_count; /* size of above arrays */
144 struct list_head list_link;
145};
146
Yi Liu8bb8aef2015-11-24 15:12:14 -0500147static struct orangefs_bufmap {
Mike Marshall274dcf52015-07-17 10:38:13 -0400148 int desc_size;
149 int desc_shift;
150 int desc_count;
151 int total_size;
152 int page_count;
153
154 struct page **page_array;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500155 struct orangefs_bufmap_desc *desc_array;
Mike Marshall274dcf52015-07-17 10:38:13 -0400156
157 /* array to track usage of buffer descriptors */
Al Viroea2c9c92016-02-13 21:01:21 -0500158 unsigned long *buffer_index_array;
Mike Marshall274dcf52015-07-17 10:38:13 -0400159
160 /* array to track usage of buffer descriptors for readdir */
Al Viroea2c9c92016-02-13 21:01:21 -0500161#define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG)
162 unsigned long readdir_index_array[N];
163#undef N
Yi Liu8bb8aef2015-11-24 15:12:14 -0500164} *__orangefs_bufmap;
Mike Marshall274dcf52015-07-17 10:38:13 -0400165
Yi Liu8bb8aef2015-11-24 15:12:14 -0500166static DEFINE_SPINLOCK(orangefs_bufmap_lock);
Mike Marshall274dcf52015-07-17 10:38:13 -0400167
168static void
Yi Liu8bb8aef2015-11-24 15:12:14 -0500169orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
Mike Marshall274dcf52015-07-17 10:38:13 -0400170{
171 int i;
172
173 for (i = 0; i < bufmap->page_count; i++)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300174 put_page(bufmap->page_array[i]);
Mike Marshall274dcf52015-07-17 10:38:13 -0400175}
176
177static void
Yi Liu8bb8aef2015-11-24 15:12:14 -0500178orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
Mike Marshall274dcf52015-07-17 10:38:13 -0400179{
180 kfree(bufmap->page_array);
181 kfree(bufmap->desc_array);
182 kfree(bufmap->buffer_index_array);
183 kfree(bufmap);
184}
185
Martin Brandenburgb09d10d2015-12-15 14:54:27 -0500186/*
Mike Marshall95f5f882018-05-11 17:11:48 -0400187 * XXX: Can the size and shift change while the caller gives up the
Martin Brandenburgb09d10d2015-12-15 14:54:27 -0500188 * XXX: lock between calling this and doing something useful?
189 */
190
Martin Brandenburg765a75b2015-12-15 14:48:17 -0500191int orangefs_bufmap_size_query(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400192{
Martin Brandenburgb09d10d2015-12-15 14:54:27 -0500193 struct orangefs_bufmap *bufmap;
194 int size = 0;
Al Viro17804182016-02-13 11:16:37 -0500195 spin_lock(&orangefs_bufmap_lock);
196 bufmap = __orangefs_bufmap;
197 if (bufmap)
Martin Brandenburgb09d10d2015-12-15 14:54:27 -0500198 size = bufmap->desc_size;
Al Viro17804182016-02-13 11:16:37 -0500199 spin_unlock(&orangefs_bufmap_lock);
Mike Marshall274dcf52015-07-17 10:38:13 -0400200 return size;
201}
202
Martin Brandenburg765a75b2015-12-15 14:48:17 -0500203int orangefs_bufmap_shift_query(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400204{
Martin Brandenburgb09d10d2015-12-15 14:54:27 -0500205 struct orangefs_bufmap *bufmap;
206 int shift = 0;
Al Viro17804182016-02-13 11:16:37 -0500207 spin_lock(&orangefs_bufmap_lock);
208 bufmap = __orangefs_bufmap;
209 if (bufmap)
Martin Brandenburgb09d10d2015-12-15 14:54:27 -0500210 shift = bufmap->desc_shift;
Al Viro17804182016-02-13 11:16:37 -0500211 spin_unlock(&orangefs_bufmap_lock);
Mike Marshall274dcf52015-07-17 10:38:13 -0400212 return shift;
213}
214
215static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq);
216static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq);
217
Yi Liu8bb8aef2015-11-24 15:12:14 -0500218static struct orangefs_bufmap *
219orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
Mike Marshall274dcf52015-07-17 10:38:13 -0400220{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500221 struct orangefs_bufmap *bufmap;
Mike Marshall274dcf52015-07-17 10:38:13 -0400222
223 bufmap = kzalloc(sizeof(*bufmap), GFP_KERNEL);
224 if (!bufmap)
225 goto out;
226
Mike Marshall274dcf52015-07-17 10:38:13 -0400227 bufmap->total_size = user_desc->total_size;
228 bufmap->desc_count = user_desc->count;
229 bufmap->desc_size = user_desc->size;
230 bufmap->desc_shift = ilog2(bufmap->desc_size);
231
Mike Marshall274dcf52015-07-17 10:38:13 -0400232 bufmap->buffer_index_array =
Al Viroea2c9c92016-02-13 21:01:21 -0500233 kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL);
Markus Elfring07a25852017-08-17 21:00:07 +0200234 if (!bufmap->buffer_index_array)
Mike Marshall274dcf52015-07-17 10:38:13 -0400235 goto out_free_bufmap;
Mike Marshall274dcf52015-07-17 10:38:13 -0400236
237 bufmap->desc_array =
Yi Liu8bb8aef2015-11-24 15:12:14 -0500238 kcalloc(bufmap->desc_count, sizeof(struct orangefs_bufmap_desc),
Mike Marshall274dcf52015-07-17 10:38:13 -0400239 GFP_KERNEL);
Markus Elfring07a25852017-08-17 21:00:07 +0200240 if (!bufmap->desc_array)
Mike Marshall274dcf52015-07-17 10:38:13 -0400241 goto out_free_index_array;
Mike Marshall274dcf52015-07-17 10:38:13 -0400242
243 bufmap->page_count = bufmap->total_size / PAGE_SIZE;
244
245 /* allocate storage to track our page mappings */
246 bufmap->page_array =
247 kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL);
248 if (!bufmap->page_array)
249 goto out_free_desc_array;
250
251 return bufmap;
252
253out_free_desc_array:
254 kfree(bufmap->desc_array);
255out_free_index_array:
256 kfree(bufmap->buffer_index_array);
257out_free_bufmap:
258 kfree(bufmap);
259out:
260 return NULL;
261}
262
263static int
Yi Liu8bb8aef2015-11-24 15:12:14 -0500264orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
265 struct ORANGEFS_dev_map_desc *user_desc)
Mike Marshall274dcf52015-07-17 10:38:13 -0400266{
267 int pages_per_desc = bufmap->desc_size / PAGE_SIZE;
268 int offset = 0, ret, i;
269
270 /* map the pages */
Al Viro16742f22015-10-08 20:10:00 -0400271 ret = get_user_pages_fast((unsigned long)user_desc->ptr,
Ira Weiny73b01402019-05-13 17:17:11 -0700272 bufmap->page_count, FOLL_WRITE, bufmap->page_array);
Mike Marshall274dcf52015-07-17 10:38:13 -0400273
274 if (ret < 0)
275 return ret;
276
277 if (ret != bufmap->page_count) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500278 gossip_err("orangefs error: asked for %d pages, only got %d.\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400279 bufmap->page_count, ret);
280
281 for (i = 0; i < ret; i++) {
282 SetPageError(bufmap->page_array[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300283 put_page(bufmap->page_array[i]);
Mike Marshall274dcf52015-07-17 10:38:13 -0400284 }
285 return -ENOMEM;
286 }
287
288 /*
289 * ideally we want to get kernel space pointers for each page, but
290 * we can't kmap that many pages at once if highmem is being used.
291 * so instead, we just kmap/kunmap the page address each time the
292 * kaddr is needed.
293 */
294 for (i = 0; i < bufmap->page_count; i++)
295 flush_dcache_page(bufmap->page_array[i]);
296
297 /* build a list of available descriptors */
298 for (offset = 0, i = 0; i < bufmap->desc_count; i++) {
299 bufmap->desc_array[i].page_array = &bufmap->page_array[offset];
300 bufmap->desc_array[i].array_count = pages_per_desc;
301 bufmap->desc_array[i].uaddr =
302 (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE));
303 offset += pages_per_desc;
304 }
305
306 return 0;
307}
308
309/*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500310 * orangefs_bufmap_initialize()
Mike Marshall274dcf52015-07-17 10:38:13 -0400311 *
312 * initializes the mapped buffer interface
313 *
314 * returns 0 on success, -errno on failure
315 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500316int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc)
Mike Marshall274dcf52015-07-17 10:38:13 -0400317{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500318 struct orangefs_bufmap *bufmap;
Mike Marshall274dcf52015-07-17 10:38:13 -0400319 int ret = -EINVAL;
320
321 gossip_debug(GOSSIP_BUFMAP_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500322 "orangefs_bufmap_initialize: called (ptr ("
Mike Marshall274dcf52015-07-17 10:38:13 -0400323 "%p) sz (%d) cnt(%d).\n",
324 user_desc->ptr,
325 user_desc->size,
326 user_desc->count);
327
Dan Carpentereb82fbc2017-01-21 08:04:45 +0300328 if (user_desc->total_size < 0 ||
329 user_desc->size < 0 ||
330 user_desc->count < 0)
331 goto out;
332
Mike Marshall274dcf52015-07-17 10:38:13 -0400333 /*
334 * sanity check alignment and size of buffer that caller wants to
335 * work with
336 */
337 if (PAGE_ALIGN((unsigned long)user_desc->ptr) !=
338 (unsigned long)user_desc->ptr) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500339 gossip_err("orangefs error: memory alignment (front). %p\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400340 user_desc->ptr);
341 goto out;
342 }
343
344 if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size))
345 != (unsigned long)(user_desc->ptr + user_desc->total_size)) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500346 gossip_err("orangefs error: memory alignment (back).(%p + %d)\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400347 user_desc->ptr,
348 user_desc->total_size);
349 goto out;
350 }
351
352 if (user_desc->total_size != (user_desc->size * user_desc->count)) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500353 gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400354 user_desc->total_size,
355 user_desc->size,
356 user_desc->count);
357 goto out;
358 }
359
360 if ((user_desc->size % PAGE_SIZE) != 0) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500361 gossip_err("orangefs error: bufmap size not page size divisible (%d).\n",
Mike Marshall274dcf52015-07-17 10:38:13 -0400362 user_desc->size);
363 goto out;
364 }
365
366 ret = -ENOMEM;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500367 bufmap = orangefs_bufmap_alloc(user_desc);
Mike Marshall274dcf52015-07-17 10:38:13 -0400368 if (!bufmap)
369 goto out;
370
Yi Liu8bb8aef2015-11-24 15:12:14 -0500371 ret = orangefs_bufmap_map(bufmap, user_desc);
Mike Marshall274dcf52015-07-17 10:38:13 -0400372 if (ret)
373 goto out_free_bufmap;
374
375
Yi Liu8bb8aef2015-11-24 15:12:14 -0500376 spin_lock(&orangefs_bufmap_lock);
377 if (__orangefs_bufmap) {
378 spin_unlock(&orangefs_bufmap_lock);
379 gossip_err("orangefs: error: bufmap already initialized.\n");
Al Viroea2c9c92016-02-13 21:01:21 -0500380 ret = -EINVAL;
Mike Marshall274dcf52015-07-17 10:38:13 -0400381 goto out_unmap_bufmap;
382 }
Yi Liu8bb8aef2015-11-24 15:12:14 -0500383 __orangefs_bufmap = bufmap;
Al Viroea2c9c92016-02-13 21:01:21 -0500384 install(&rw_map,
385 bufmap->desc_count,
386 bufmap->buffer_index_array);
387 install(&readdir_map,
388 ORANGEFS_READDIR_DEFAULT_DESC_COUNT,
389 bufmap->readdir_index_array);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500390 spin_unlock(&orangefs_bufmap_lock);
Mike Marshall274dcf52015-07-17 10:38:13 -0400391
Mike Marshall274dcf52015-07-17 10:38:13 -0400392 gossip_debug(GOSSIP_BUFMAP_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500393 "orangefs_bufmap_initialize: exiting normally\n");
Mike Marshall274dcf52015-07-17 10:38:13 -0400394 return 0;
395
396out_unmap_bufmap:
Yi Liu8bb8aef2015-11-24 15:12:14 -0500397 orangefs_bufmap_unmap(bufmap);
Mike Marshall274dcf52015-07-17 10:38:13 -0400398out_free_bufmap:
Yi Liu8bb8aef2015-11-24 15:12:14 -0500399 orangefs_bufmap_free(bufmap);
Mike Marshall274dcf52015-07-17 10:38:13 -0400400out:
401 return ret;
402}
403
404/*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500405 * orangefs_bufmap_finalize()
Mike Marshall274dcf52015-07-17 10:38:13 -0400406 *
407 * shuts down the mapped buffer interface and releases any resources
408 * associated with it
409 *
410 * no return value
411 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500412void orangefs_bufmap_finalize(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400413{
Al Viroea2c9c92016-02-13 21:01:21 -0500414 struct orangefs_bufmap *bufmap = __orangefs_bufmap;
415 if (!bufmap)
416 return;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500417 gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n");
Al Viroea2c9c92016-02-13 21:01:21 -0500418 mark_killed(&rw_map);
419 mark_killed(&readdir_map);
Mike Marshall274dcf52015-07-17 10:38:13 -0400420 gossip_debug(GOSSIP_BUFMAP_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500421 "orangefs_bufmap_finalize: exiting normally\n");
Mike Marshall274dcf52015-07-17 10:38:13 -0400422}
423
Al Viroea2c9c92016-02-13 21:01:21 -0500424void orangefs_bufmap_run_down(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400425{
Al Viroea2c9c92016-02-13 21:01:21 -0500426 struct orangefs_bufmap *bufmap = __orangefs_bufmap;
427 if (!bufmap)
Mike Marshall274dcf52015-07-17 10:38:13 -0400428 return;
Al Viroea2c9c92016-02-13 21:01:21 -0500429 run_down(&rw_map);
430 run_down(&readdir_map);
431 spin_lock(&orangefs_bufmap_lock);
432 __orangefs_bufmap = NULL;
433 spin_unlock(&orangefs_bufmap_lock);
434 orangefs_bufmap_unmap(bufmap);
435 orangefs_bufmap_free(bufmap);
Mike Marshall274dcf52015-07-17 10:38:13 -0400436}
437
438/*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500439 * orangefs_bufmap_get()
Mike Marshall274dcf52015-07-17 10:38:13 -0400440 *
441 * gets a free mapped buffer descriptor, will sleep until one becomes
442 * available if necessary
443 *
Al Virob8a99a82016-02-16 20:10:26 -0500444 * returns slot on success, -errno on failure
Mike Marshall274dcf52015-07-17 10:38:13 -0400445 */
Al Virob8a99a82016-02-16 20:10:26 -0500446int orangefs_bufmap_get(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400447{
Al Virob8a99a82016-02-16 20:10:26 -0500448 return get(&rw_map);
Mike Marshall274dcf52015-07-17 10:38:13 -0400449}
450
451/*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500452 * orangefs_bufmap_put()
Mike Marshall274dcf52015-07-17 10:38:13 -0400453 *
454 * returns a mapped buffer descriptor to the collection
455 *
456 * no return value
457 */
Al Viro1357d062016-02-11 21:34:52 -0500458void orangefs_bufmap_put(int buffer_index)
Mike Marshall274dcf52015-07-17 10:38:13 -0400459{
Al Viroea2c9c92016-02-13 21:01:21 -0500460 put(&rw_map, buffer_index);
Mike Marshall274dcf52015-07-17 10:38:13 -0400461}
462
463/*
Martin Brandenburg7d221482016-01-04 15:05:28 -0500464 * orangefs_readdir_index_get()
Mike Marshall274dcf52015-07-17 10:38:13 -0400465 *
466 * gets a free descriptor, will sleep until one becomes
467 * available if necessary.
468 * Although the readdir buffers are not mapped into kernel space
469 * we could do that at a later point of time. Regardless, these
470 * indices are used by the client-core.
471 *
Al Virob8a99a82016-02-16 20:10:26 -0500472 * returns slot on success, -errno on failure
Mike Marshall274dcf52015-07-17 10:38:13 -0400473 */
Al Virob8a99a82016-02-16 20:10:26 -0500474int orangefs_readdir_index_get(void)
Mike Marshall274dcf52015-07-17 10:38:13 -0400475{
Al Virob8a99a82016-02-16 20:10:26 -0500476 return get(&readdir_map);
Mike Marshall274dcf52015-07-17 10:38:13 -0400477}
478
Al Viro82d37f12016-02-13 21:04:51 -0500479void orangefs_readdir_index_put(int buffer_index)
Mike Marshall274dcf52015-07-17 10:38:13 -0400480{
Al Viroea2c9c92016-02-13 21:01:21 -0500481 put(&readdir_map, buffer_index);
Mike Marshall274dcf52015-07-17 10:38:13 -0400482}
483
Mike Marshallb5e376e2015-12-11 10:50:42 -0500484/*
Mike Marshall95f5f882018-05-11 17:11:48 -0400485 * we've been handed an iovec, we need to copy it to
Mike Marshallb5e376e2015-12-11 10:50:42 -0500486 * the shared memory descriptor at "buffer_index".
487 */
Al Virobf6bf602016-02-16 20:06:19 -0500488int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter,
Mike Marshall54804942015-10-05 13:44:24 -0400489 int buffer_index,
490 size_t size)
Mike Marshall274dcf52015-07-17 10:38:13 -0400491{
Al Virobf6bf602016-02-16 20:06:19 -0500492 struct orangefs_bufmap_desc *to;
Mike Marshall4d1c4402015-09-04 10:31:16 -0400493 int i;
Mike Marshall274dcf52015-07-17 10:38:13 -0400494
495 gossip_debug(GOSSIP_BUFMAP_DEBUG,
Al Viro34204fd2015-10-08 17:47:44 -0400496 "%s: buffer_index:%d: size:%zu:\n",
Mike Marshall4d1c4402015-09-04 10:31:16 -0400497 __func__, buffer_index, size);
Mike Marshall274dcf52015-07-17 10:38:13 -0400498
Al Virobf6bf602016-02-16 20:06:19 -0500499 to = &__orangefs_bufmap->desc_array[buffer_index];
Mike Marshall4d1c4402015-09-04 10:31:16 -0400500 for (i = 0; size; i++) {
Al Viro34204fd2015-10-08 17:47:44 -0400501 struct page *page = to->page_array[i];
502 size_t n = size;
503 if (n > PAGE_SIZE)
504 n = PAGE_SIZE;
Al Viro890559e2017-04-13 03:10:18 -0400505 if (copy_page_from_iter(page, 0, n, iter) != n)
Al Viro34204fd2015-10-08 17:47:44 -0400506 return -EFAULT;
507 size -= n;
Mike Marshall274dcf52015-07-17 10:38:13 -0400508 }
Al Viro34204fd2015-10-08 17:47:44 -0400509 return 0;
Mike Marshall274dcf52015-07-17 10:38:13 -0400510}
511
512/*
Mike Marshallb5e376e2015-12-11 10:50:42 -0500513 * we've been handed an iovec, we need to fill it from
514 * the shared memory descriptor at "buffer_index".
Mike Marshall274dcf52015-07-17 10:38:13 -0400515 */
Al Virobf6bf602016-02-16 20:06:19 -0500516int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter,
Al Viro5c278222015-10-08 17:43:58 -0400517 int buffer_index,
518 size_t size)
Mike Marshall274dcf52015-07-17 10:38:13 -0400519{
Al Virobf6bf602016-02-16 20:06:19 -0500520 struct orangefs_bufmap_desc *from;
Mike Marshall4d1c4402015-09-04 10:31:16 -0400521 int i;
Mike Marshall274dcf52015-07-17 10:38:13 -0400522
Al Virobf6bf602016-02-16 20:06:19 -0500523 from = &__orangefs_bufmap->desc_array[buffer_index];
Mike Marshall274dcf52015-07-17 10:38:13 -0400524 gossip_debug(GOSSIP_BUFMAP_DEBUG,
Al Viro5c278222015-10-08 17:43:58 -0400525 "%s: buffer_index:%d: size:%zu:\n",
526 __func__, buffer_index, size);
Mike Marshall274dcf52015-07-17 10:38:13 -0400527
Mike Marshall274dcf52015-07-17 10:38:13 -0400528
Al Viro5c278222015-10-08 17:43:58 -0400529 for (i = 0; size; i++) {
530 struct page *page = from->page_array[i];
531 size_t n = size;
532 if (n > PAGE_SIZE)
533 n = PAGE_SIZE;
534 n = copy_page_to_iter(page, 0, n, iter);
535 if (!n)
536 return -EFAULT;
537 size -= n;
Mike Marshall274dcf52015-07-17 10:38:13 -0400538 }
Al Viro5c278222015-10-08 17:43:58 -0400539 return 0;
Mike Marshall274dcf52015-07-17 10:38:13 -0400540}
Mike Marshalldd59a642019-03-25 18:59:29 -0400541
542void orangefs_bufmap_page_fill(void *page_to,
543 int buffer_index,
544 int slot_index)
545{
546 struct orangefs_bufmap_desc *from;
547 void *page_from;
548
549 from = &__orangefs_bufmap->desc_array[buffer_index];
550 page_from = kmap_atomic(from->page_array[slot_index]);
551 memcpy(page_to, page_from, PAGE_SIZE);
552 kunmap_atomic(page_from);
553}