blob: 48f0547d4850e985411e477dcc61b08721b3f516 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Mike Marshall5db11c22015-07-17 10:38:12 -04002/*
3 * (C) 2001 Clemson University and The University of Chicago
Martin Brandenburgafd9fb22018-02-13 20:13:46 +00004 * Copyright 2018 Omnibond Systems, L.L.C.
Mike Marshall5db11c22015-07-17 10:38:12 -04005 *
6 * See COPYING in top-level directory.
7 */
8
9/*
10 * Linux VFS inode operations.
11 */
12
Christoph Hellwig2f8b5442016-11-01 07:40:13 -060013#include <linux/bvec.h>
Mike Marshall5db11c22015-07-17 10:38:12 -040014#include "protocol.h"
Mike Marshall575e9462015-12-04 12:56:14 -050015#include "orangefs-kernel.h"
16#include "orangefs-bufmap.h"
Mike Marshall5db11c22015-07-17 10:38:12 -040017
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -050018static int orangefs_writepage_locked(struct page *page,
19 struct writeback_control *wbc)
Martin Brandenburg85ac7992018-02-22 18:10:43 +000020{
21 struct inode *inode = page->mapping->host;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -050022 struct orangefs_write_range *wr = NULL;
Martin Brandenburg85ac7992018-02-22 18:10:43 +000023 struct iov_iter iter;
24 struct bio_vec bv;
25 size_t len, wlen;
26 ssize_t ret;
27 loff_t off;
28
29 set_page_writeback(page);
30
Martin Brandenburg85ac7992018-02-22 18:10:43 +000031 len = i_size_read(inode);
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -050032 if (PagePrivate(page)) {
33 wr = (struct orangefs_write_range *)page_private(page);
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +000034 WARN_ON(wr->pos >= len);
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -050035 off = wr->pos;
36 if (off + wr->len > len)
37 wlen = len - off;
38 else
39 wlen = wr->len;
40 } else {
41 WARN_ON(1);
42 off = page_offset(page);
43 if (off + PAGE_SIZE > len)
44 wlen = len - off;
45 else
46 wlen = PAGE_SIZE;
Martin Brandenburg85ac7992018-02-22 18:10:43 +000047 }
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -050048 /* Should've been handled in orangefs_invalidatepage. */
49 WARN_ON(off == len || off + wlen > len);
Martin Brandenburg85ac7992018-02-22 18:10:43 +000050
51 bv.bv_page = page;
52 bv.bv_len = wlen;
53 bv.bv_offset = off % PAGE_SIZE;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -050054 WARN_ON(wlen == 0);
Martin Brandenburg85ac7992018-02-22 18:10:43 +000055 iov_iter_bvec(&iter, WRITE, &bv, 1, wlen);
56
57 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
Mike Marshallf9bbb682019-11-26 12:39:37 -050058 len, wr, NULL, NULL);
Martin Brandenburg85ac7992018-02-22 18:10:43 +000059 if (ret < 0) {
60 SetPageError(page);
61 mapping_set_error(page->mapping, ret);
62 } else {
63 ret = 0;
64 }
Guoqing Jiang4c42be32020-06-01 21:48:00 -070065 kfree(detach_page_private(page));
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -050066 return ret;
67}
68
69static int orangefs_writepage(struct page *page, struct writeback_control *wbc)
70{
71 int ret;
72 ret = orangefs_writepage_locked(page, wbc);
Martin Brandenburg85ac7992018-02-22 18:10:43 +000073 unlock_page(page);
74 end_page_writeback(page);
75 return ret;
76}
77
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +000078struct orangefs_writepages {
79 loff_t off;
80 size_t len;
81 kuid_t uid;
82 kgid_t gid;
83 int maxpages;
84 int npages;
85 struct page **pages;
86 struct bio_vec *bv;
87};
88
89static int orangefs_writepages_work(struct orangefs_writepages *ow,
90 struct writeback_control *wbc)
91{
92 struct inode *inode = ow->pages[0]->mapping->host;
93 struct orangefs_write_range *wrp, wr;
94 struct iov_iter iter;
95 ssize_t ret;
96 size_t len;
97 loff_t off;
98 int i;
99
100 len = i_size_read(inode);
101
102 for (i = 0; i < ow->npages; i++) {
103 set_page_writeback(ow->pages[i]);
104 ow->bv[i].bv_page = ow->pages[i];
105 ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE,
106 ow->off + ow->len) -
107 max(ow->off, page_offset(ow->pages[i]));
108 if (i == 0)
109 ow->bv[i].bv_offset = ow->off -
110 page_offset(ow->pages[i]);
111 else
112 ow->bv[i].bv_offset = 0;
113 }
114 iov_iter_bvec(&iter, WRITE, ow->bv, ow->npages, ow->len);
115
116 WARN_ON(ow->off >= len);
117 if (ow->off + ow->len > len)
118 ow->len = len - ow->off;
119
120 off = ow->off;
121 wr.uid = ow->uid;
122 wr.gid = ow->gid;
123 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len,
Mike Marshallf9bbb682019-11-26 12:39:37 -0500124 0, &wr, NULL, NULL);
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000125 if (ret < 0) {
126 for (i = 0; i < ow->npages; i++) {
127 SetPageError(ow->pages[i]);
128 mapping_set_error(ow->pages[i]->mapping, ret);
129 if (PagePrivate(ow->pages[i])) {
130 wrp = (struct orangefs_write_range *)
131 page_private(ow->pages[i]);
132 ClearPagePrivate(ow->pages[i]);
133 put_page(ow->pages[i]);
134 kfree(wrp);
135 }
136 end_page_writeback(ow->pages[i]);
137 unlock_page(ow->pages[i]);
138 }
139 } else {
140 ret = 0;
141 for (i = 0; i < ow->npages; i++) {
142 if (PagePrivate(ow->pages[i])) {
143 wrp = (struct orangefs_write_range *)
144 page_private(ow->pages[i]);
145 ClearPagePrivate(ow->pages[i]);
146 put_page(ow->pages[i]);
147 kfree(wrp);
148 }
149 end_page_writeback(ow->pages[i]);
150 unlock_page(ow->pages[i]);
151 }
152 }
153 return ret;
154}
155
156static int orangefs_writepages_callback(struct page *page,
157 struct writeback_control *wbc, void *data)
158{
159 struct orangefs_writepages *ow = data;
160 struct orangefs_write_range *wr;
161 int ret;
162
163 if (!PagePrivate(page)) {
164 unlock_page(page);
165 /* It's not private so there's nothing to write, right? */
166 printk("writepages_callback not private!\n");
167 BUG();
168 return 0;
169 }
170 wr = (struct orangefs_write_range *)page_private(page);
171
172 ret = -1;
173 if (ow->npages == 0) {
174 ow->off = wr->pos;
175 ow->len = wr->len;
176 ow->uid = wr->uid;
177 ow->gid = wr->gid;
178 ow->pages[ow->npages++] = page;
179 ret = 0;
180 goto done;
181 }
182 if (!uid_eq(ow->uid, wr->uid) || !gid_eq(ow->gid, wr->gid)) {
183 orangefs_writepages_work(ow, wbc);
184 ow->npages = 0;
185 ret = -1;
186 goto done;
187 }
188 if (ow->off + ow->len == wr->pos) {
189 ow->len += wr->len;
190 ow->pages[ow->npages++] = page;
191 ret = 0;
192 goto done;
193 }
194done:
195 if (ret == -1) {
196 if (ow->npages) {
197 orangefs_writepages_work(ow, wbc);
198 ow->npages = 0;
199 }
200 ret = orangefs_writepage_locked(page, wbc);
201 mapping_set_error(page->mapping, ret);
202 unlock_page(page);
203 end_page_writeback(page);
204 } else {
205 if (ow->npages == ow->maxpages) {
206 orangefs_writepages_work(ow, wbc);
207 ow->npages = 0;
208 }
209 }
210 return ret;
211}
212
213static int orangefs_writepages(struct address_space *mapping,
214 struct writeback_control *wbc)
215{
216 struct orangefs_writepages *ow;
217 struct blk_plug plug;
218 int ret;
219 ow = kzalloc(sizeof(struct orangefs_writepages), GFP_KERNEL);
220 if (!ow)
221 return -ENOMEM;
222 ow->maxpages = orangefs_bufmap_size_query()/PAGE_SIZE;
223 ow->pages = kcalloc(ow->maxpages, sizeof(struct page *), GFP_KERNEL);
224 if (!ow->pages) {
225 kfree(ow);
226 return -ENOMEM;
227 }
228 ow->bv = kcalloc(ow->maxpages, sizeof(struct bio_vec), GFP_KERNEL);
229 if (!ow->bv) {
230 kfree(ow->pages);
231 kfree(ow);
232 return -ENOMEM;
233 }
234 blk_start_plug(&plug);
235 ret = write_cache_pages(mapping, wbc, orangefs_writepages_callback, ow);
236 if (ow->npages)
237 ret = orangefs_writepages_work(ow, wbc);
238 blk_finish_plug(&plug);
239 kfree(ow->pages);
240 kfree(ow->bv);
241 kfree(ow);
242 return ret;
243}
244
Mike Marshalldd59a642019-03-25 18:59:29 -0400245static int orangefs_launder_page(struct page *);
246
Martin Brandenburga68d9c62018-02-15 18:02:43 +0000247static int orangefs_readpage(struct file *file, struct page *page)
Mike Marshall5db11c22015-07-17 10:38:12 -0400248{
Mike Marshall5db11c22015-07-17 10:38:12 -0400249 struct inode *inode = page->mapping->host;
Martin Brandenburgc453dcf2018-02-16 20:51:24 +0000250 struct iov_iter iter;
251 struct bio_vec bv;
252 ssize_t ret;
Mike Marshalldd59a642019-03-25 18:59:29 -0400253 loff_t off; /* offset into this page */
254 pgoff_t index; /* which page */
255 struct page *next_page;
256 char *kaddr;
Mike Marshalldd59a642019-03-25 18:59:29 -0400257 loff_t read_size;
Mike Marshalldd59a642019-03-25 18:59:29 -0400258 int buffer_index = -1; /* orangefs shared memory slot */
259 int slot_index; /* index into slot */
260 int remaining;
261
262 /*
Mike Marshallec95f1d2020-04-08 08:52:40 -0400263 * Get up to this many bytes from Orangefs at a time and try
264 * to fill them into the page cache at once. Tests with dd made
265 * this seem like a reasonable static number, if there was
266 * interest perhaps this number could be made setable through
267 * sysfs...
Mike Marshalldd59a642019-03-25 18:59:29 -0400268 */
Mike Marshallec95f1d2020-04-08 08:52:40 -0400269 read_size = 524288;
Mike Marshalldd59a642019-03-25 18:59:29 -0400270
271 if (PageDirty(page))
272 orangefs_launder_page(page);
Al Viro74f68fc2015-10-08 18:31:05 -0400273
Martin Brandenburgc453dcf2018-02-16 20:51:24 +0000274 off = page_offset(page);
Mike Marshalldd59a642019-03-25 18:59:29 -0400275 index = off >> PAGE_SHIFT;
Martin Brandenburgc453dcf2018-02-16 20:51:24 +0000276 bv.bv_page = page;
277 bv.bv_len = PAGE_SIZE;
278 bv.bv_offset = 0;
279 iov_iter_bvec(&iter, READ, &bv, 1, PAGE_SIZE);
Mike Marshall5db11c22015-07-17 10:38:12 -0400280
Martin Brandenburgc453dcf2018-02-16 20:51:24 +0000281 ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
Mike Marshallf9bbb682019-11-26 12:39:37 -0500282 read_size, inode->i_size, NULL, &buffer_index, file);
Mike Marshalldd59a642019-03-25 18:59:29 -0400283 remaining = ret;
Al Viro74f68fc2015-10-08 18:31:05 -0400284 /* this will only zero remaining unread portions of the page data */
Martin Brandenburgc453dcf2018-02-16 20:51:24 +0000285 iov_iter_zero(~0U, &iter);
Mike Marshall5db11c22015-07-17 10:38:12 -0400286 /* takes care of potential aliasing */
287 flush_dcache_page(page);
Martin Brandenburgc453dcf2018-02-16 20:51:24 +0000288 if (ret < 0) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400289 SetPageError(page);
Mike Marshalldd59a642019-03-25 18:59:29 -0400290 unlock_page(page);
291 goto out;
Mike Marshall5db11c22015-07-17 10:38:12 -0400292 } else {
293 SetPageUptodate(page);
294 if (PageError(page))
295 ClearPageError(page);
296 ret = 0;
297 }
Mike Marshall5db11c22015-07-17 10:38:12 -0400298 /* unlock the page after the ->readpage() routine completes */
299 unlock_page(page);
Mike Marshalldd59a642019-03-25 18:59:29 -0400300
301 if (remaining > PAGE_SIZE) {
302 slot_index = 0;
303 while ((remaining - PAGE_SIZE) >= PAGE_SIZE) {
304 remaining -= PAGE_SIZE;
305 /*
306 * It is an optimization to try and fill more than one
307 * page... by now we've already gotten the single
308 * page we were after, if stuff doesn't seem to
309 * be going our way at this point just return
310 * and hope for the best.
311 *
312 * If we look for pages and they're already there is
313 * one reason to give up, and if they're not there
314 * and we can't create them is another reason.
315 */
316
317 index++;
318 slot_index++;
319 next_page = find_get_page(inode->i_mapping, index);
320 if (next_page) {
321 gossip_debug(GOSSIP_FILE_DEBUG,
322 "%s: found next page, quitting\n",
323 __func__);
324 put_page(next_page);
325 goto out;
326 }
327 next_page = find_or_create_page(inode->i_mapping,
328 index,
329 GFP_KERNEL);
330 /*
331 * I've never hit this, leave it as a printk for
332 * now so it will be obvious.
333 */
334 if (!next_page) {
335 printk("%s: can't create next page, quitting\n",
336 __func__);
337 goto out;
338 }
339 kaddr = kmap_atomic(next_page);
340 orangefs_bufmap_page_fill(kaddr,
341 buffer_index,
342 slot_index);
343 kunmap_atomic(kaddr);
344 SetPageUptodate(next_page);
345 unlock_page(next_page);
346 put_page(next_page);
347 }
348 }
349
350out:
351 if (buffer_index != -1)
352 orangefs_bufmap_put(buffer_index);
Mike Marshall5db11c22015-07-17 10:38:12 -0400353 return ret;
354}
355
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500356static int orangefs_write_begin(struct file *file,
357 struct address_space *mapping,
358 loff_t pos, unsigned len, unsigned flags, struct page **pagep,
359 void **fsdata)
360{
361 struct orangefs_write_range *wr;
362 struct page *page;
363 pgoff_t index;
364 int ret;
365
366 index = pos >> PAGE_SHIFT;
367
368 page = grab_cache_page_write_begin(mapping, index, flags);
369 if (!page)
370 return -ENOMEM;
371
372 *pagep = page;
373
374 if (PageDirty(page) && !PagePrivate(page)) {
375 /*
376 * Should be impossible. If it happens, launder the page
377 * since we don't know what's dirty. This will WARN in
378 * orangefs_writepage_locked.
379 */
380 ret = orangefs_launder_page(page);
381 if (ret)
382 return ret;
383 }
384 if (PagePrivate(page)) {
385 struct orangefs_write_range *wr;
386 wr = (struct orangefs_write_range *)page_private(page);
387 if (wr->pos + wr->len == pos &&
388 uid_eq(wr->uid, current_fsuid()) &&
389 gid_eq(wr->gid, current_fsgid())) {
390 wr->len += len;
391 goto okay;
392 } else {
393 ret = orangefs_launder_page(page);
394 if (ret)
395 return ret;
396 }
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500397 }
398
399 wr = kmalloc(sizeof *wr, GFP_KERNEL);
400 if (!wr)
401 return -ENOMEM;
402
403 wr->pos = pos;
404 wr->len = len;
405 wr->uid = current_fsuid();
406 wr->gid = current_fsgid();
Guoqing Jiang4c42be32020-06-01 21:48:00 -0700407 attach_page_private(page, wr);
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500408okay:
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500409 return 0;
410}
411
Martin Brandenburg85ac7992018-02-22 18:10:43 +0000412static int orangefs_write_end(struct file *file, struct address_space *mapping,
413 loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata)
414{
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000415 struct inode *inode = page->mapping->host;
416 loff_t last_pos = pos + copied;
417
418 /*
419 * No need to use i_size_read() here, the i_size
420 * cannot change under us because we hold the i_mutex.
421 */
422 if (last_pos > inode->i_size)
423 i_size_write(inode, last_pos);
424
425 /* zero the stale part of the page if we did a short copy */
426 if (!PageUptodate(page)) {
427 unsigned from = pos & (PAGE_SIZE - 1);
428 if (copied < len) {
429 zero_user(page, from + copied, len - copied);
430 }
431 /* Set fully written pages uptodate. */
432 if (pos == page_offset(page) &&
433 (len == PAGE_SIZE || pos + len == inode->i_size)) {
434 zero_user_segment(page, from + copied, PAGE_SIZE);
435 SetPageUptodate(page);
436 }
437 }
438
439 set_page_dirty(page);
440 unlock_page(page);
441 put_page(page);
442
Martin Brandenburg85ac7992018-02-22 18:10:43 +0000443 mark_inode_dirty_sync(file_inode(file));
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000444 return copied;
Martin Brandenburg85ac7992018-02-22 18:10:43 +0000445}
446
Yi Liu8bb8aef2015-11-24 15:12:14 -0500447static void orangefs_invalidatepage(struct page *page,
Mike Marshall5db11c22015-07-17 10:38:12 -0400448 unsigned int offset,
449 unsigned int length)
450{
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500451 struct orangefs_write_range *wr;
452 wr = (struct orangefs_write_range *)page_private(page);
Mike Marshall5db11c22015-07-17 10:38:12 -0400453
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500454 if (offset == 0 && length == PAGE_SIZE) {
Guoqing Jiang4c42be32020-06-01 21:48:00 -0700455 kfree(detach_page_private(page));
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000456 return;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500457 /* write range entirely within invalidate range (or equal) */
458 } else if (page_offset(page) + offset <= wr->pos &&
459 wr->pos + wr->len <= page_offset(page) + offset + length) {
Guoqing Jiang4c42be32020-06-01 21:48:00 -0700460 kfree(detach_page_private(page));
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500461 /* XXX is this right? only caller in fs */
462 cancel_dirty_page(page);
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000463 return;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500464 /* invalidate range chops off end of write range */
465 } else if (wr->pos < page_offset(page) + offset &&
466 wr->pos + wr->len <= page_offset(page) + offset + length &&
467 page_offset(page) + offset < wr->pos + wr->len) {
468 size_t x;
469 x = wr->pos + wr->len - (page_offset(page) + offset);
470 WARN_ON(x > wr->len);
471 wr->len -= x;
472 wr->uid = current_fsuid();
473 wr->gid = current_fsgid();
474 /* invalidate range chops off beginning of write range */
475 } else if (page_offset(page) + offset <= wr->pos &&
476 page_offset(page) + offset + length < wr->pos + wr->len &&
477 wr->pos < page_offset(page) + offset + length) {
478 size_t x;
479 x = page_offset(page) + offset + length - wr->pos;
480 WARN_ON(x > wr->len);
481 wr->pos += x;
482 wr->len -= x;
483 wr->uid = current_fsuid();
484 wr->gid = current_fsgid();
485 /* invalidate range entirely within write range (punch hole) */
486 } else if (wr->pos < page_offset(page) + offset &&
487 page_offset(page) + offset + length < wr->pos + wr->len) {
488 /* XXX what do we do here... should not WARN_ON */
489 WARN_ON(1);
490 /* punch hole */
491 /*
492 * should we just ignore this and write it out anyway?
493 * it hardly makes sense
494 */
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000495 return;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500496 /* non-overlapping ranges */
497 } else {
498 /* WARN if they do overlap */
499 if (!((page_offset(page) + offset + length <= wr->pos) ^
500 (wr->pos + wr->len <= page_offset(page) + offset))) {
501 WARN_ON(1);
502 printk("invalidate range offset %llu length %u\n",
503 page_offset(page) + offset, length);
504 printk("write range offset %llu length %zu\n",
505 wr->pos, wr->len);
506 }
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000507 return;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500508 }
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000509
510 /*
511 * Above there are returns where wr is freed or where we WARN.
512 * Thus the following runs if wr was modified above.
513 */
514
515 orangefs_launder_page(page);
Mike Marshall5db11c22015-07-17 10:38:12 -0400516}
517
Yi Liu8bb8aef2015-11-24 15:12:14 -0500518static int orangefs_releasepage(struct page *page, gfp_t foo)
Mike Marshall5db11c22015-07-17 10:38:12 -0400519{
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500520 return !PagePrivate(page);
521}
522
523static void orangefs_freepage(struct page *page)
524{
Guoqing Jiang4c42be32020-06-01 21:48:00 -0700525 kfree(detach_page_private(page));
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500526}
527
528static int orangefs_launder_page(struct page *page)
529{
530 int r = 0;
531 struct writeback_control wbc = {
532 .sync_mode = WB_SYNC_ALL,
533 .nr_to_write = 0,
534 };
535 wait_on_page_writeback(page);
536 if (clear_page_dirty_for_io(page)) {
537 r = orangefs_writepage_locked(page, &wbc);
538 end_page_writeback(page);
539 }
540 return r;
Mike Marshall5db11c22015-07-17 10:38:12 -0400541}
542
Mike Marshall3903f152016-06-09 15:38:03 -0400543static ssize_t orangefs_direct_IO(struct kiocb *iocb,
544 struct iov_iter *iter)
545{
Martin Brandenburg3e9dfc62018-11-06 19:54:49 +0000546 /*
547 * Comment from original do_readv_writev:
548 * Common entry point for read/write/readv/writev
549 * This function will dispatch it to either the direct I/O
550 * or buffered I/O path depending on the mount options and/or
551 * augmented/extended metadata attached to the file.
552 * Note: File extended attributes override any mount options.
553 */
Martin Brandenburgc453dcf2018-02-16 20:51:24 +0000554 struct file *file = iocb->ki_filp;
Martin Brandenburg3e9dfc62018-11-06 19:54:49 +0000555 loff_t pos = iocb->ki_pos;
556 enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ?
557 ORANGEFS_IO_WRITE : ORANGEFS_IO_READ;
558 loff_t *offset = &pos;
559 struct inode *inode = file->f_mapping->host;
560 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
561 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
562 size_t count = iov_iter_count(iter);
Martin Brandenburg3e9dfc62018-11-06 19:54:49 +0000563 ssize_t total_count = 0;
564 ssize_t ret = -EINVAL;
565 int i = 0;
566
567 gossip_debug(GOSSIP_FILE_DEBUG,
568 "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
569 __func__,
570 handle,
571 (int)count);
572
573 if (type == ORANGEFS_IO_WRITE) {
574 gossip_debug(GOSSIP_FILE_DEBUG,
575 "%s(%pU): proceeding with offset : %llu, "
576 "size %d\n",
577 __func__,
578 handle,
579 llu(*offset),
580 (int)count);
581 }
582
583 if (count == 0) {
584 ret = 0;
585 goto out;
586 }
587
588 while (iov_iter_count(iter)) {
589 size_t each_count = iov_iter_count(iter);
590 size_t amt_complete;
591 i++;
592
593 /* how much to transfer in this loop iteration */
594 if (each_count > orangefs_bufmap_size_query())
595 each_count = orangefs_bufmap_size_query();
596
597 gossip_debug(GOSSIP_FILE_DEBUG,
598 "%s(%pU): size of each_count(%d)\n",
599 __func__,
600 handle,
601 (int)each_count);
602 gossip_debug(GOSSIP_FILE_DEBUG,
603 "%s(%pU): BEFORE wait_for_io: offset is %d\n",
604 __func__,
605 handle,
606 (int)*offset);
607
608 ret = wait_for_direct_io(type, inode, offset, iter,
Mike Marshallf9bbb682019-11-26 12:39:37 -0500609 each_count, 0, NULL, NULL, file);
Martin Brandenburg3e9dfc62018-11-06 19:54:49 +0000610 gossip_debug(GOSSIP_FILE_DEBUG,
611 "%s(%pU): return from wait_for_io:%d\n",
612 __func__,
613 handle,
614 (int)ret);
615
616 if (ret < 0)
617 goto out;
618
619 *offset += ret;
620 total_count += ret;
621 amt_complete = ret;
622
623 gossip_debug(GOSSIP_FILE_DEBUG,
624 "%s(%pU): AFTER wait_for_io: offset is %d\n",
625 __func__,
626 handle,
627 (int)*offset);
628
629 /*
630 * if we got a short I/O operations,
631 * fall out and return what we got so far
632 */
633 if (amt_complete < each_count)
634 break;
635 } /*end while */
636
637out:
638 if (total_count > 0)
639 ret = total_count;
640 if (ret > 0) {
641 if (type == ORANGEFS_IO_READ) {
642 file_accessed(file);
643 } else {
644 file_update_time(file);
645 if (*offset > i_size_read(inode))
646 i_size_write(inode, *offset);
647 }
648 }
649
650 gossip_debug(GOSSIP_FILE_DEBUG,
651 "%s(%pU): Value(%d) returned.\n",
652 __func__,
653 handle,
654 (int)ret);
655
656 return ret;
Mike Marshall3903f152016-06-09 15:38:03 -0400657}
Mike Marshall5db11c22015-07-17 10:38:12 -0400658
Yi Liu8bb8aef2015-11-24 15:12:14 -0500659/** ORANGEFS2 implementation of address space operations */
Martin Brandenburgbdd6f082018-04-03 16:27:13 +0000660static const struct address_space_operations orangefs_address_operations = {
Martin Brandenburg85ac7992018-02-22 18:10:43 +0000661 .writepage = orangefs_writepage,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500662 .readpage = orangefs_readpage,
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000663 .writepages = orangefs_writepages,
Martin Brandenburg85ac7992018-02-22 18:10:43 +0000664 .set_page_dirty = __set_page_dirty_nobuffers,
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500665 .write_begin = orangefs_write_begin,
Martin Brandenburg85ac7992018-02-22 18:10:43 +0000666 .write_end = orangefs_write_end,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500667 .invalidatepage = orangefs_invalidatepage,
668 .releasepage = orangefs_releasepage,
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500669 .freepage = orangefs_freepage,
670 .launder_page = orangefs_launder_page,
Mike Marshall3903f152016-06-09 15:38:03 -0400671 .direct_IO = orangefs_direct_IO,
Mike Marshall5db11c22015-07-17 10:38:12 -0400672};
673
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500674vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
675{
676 struct page *page = vmf->page;
677 struct inode *inode = file_inode(vmf->vma->vm_file);
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000678 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
679 unsigned long *bitlock = &orangefs_inode->bitlock;
680 vm_fault_t ret;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500681 struct orangefs_write_range *wr;
682
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000683 sb_start_pagefault(inode->i_sb);
684
685 if (wait_on_bit(bitlock, 1, TASK_KILLABLE)) {
686 ret = VM_FAULT_RETRY;
687 goto out;
688 }
689
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500690 lock_page(page);
691 if (PageDirty(page) && !PagePrivate(page)) {
692 /*
693 * Should be impossible. If it happens, launder the page
694 * since we don't know what's dirty. This will WARN in
695 * orangefs_writepage_locked.
696 */
697 if (orangefs_launder_page(page)) {
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000698 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500699 goto out;
700 }
701 }
702 if (PagePrivate(page)) {
703 wr = (struct orangefs_write_range *)page_private(page);
704 if (uid_eq(wr->uid, current_fsuid()) &&
705 gid_eq(wr->gid, current_fsgid())) {
706 wr->pos = page_offset(page);
707 wr->len = PAGE_SIZE;
708 goto okay;
709 } else {
710 if (orangefs_launder_page(page)) {
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000711 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500712 goto out;
713 }
714 }
715 }
716 wr = kmalloc(sizeof *wr, GFP_KERNEL);
717 if (!wr) {
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000718 ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500719 goto out;
720 }
721 wr->pos = page_offset(page);
722 wr->len = PAGE_SIZE;
723 wr->uid = current_fsuid();
724 wr->gid = current_fsgid();
Guoqing Jiang4c42be32020-06-01 21:48:00 -0700725 attach_page_private(page, wr);
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500726okay:
727
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500728 file_update_time(vmf->vma->vm_file);
729 if (page->mapping != inode->i_mapping) {
730 unlock_page(page);
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000731 ret = VM_FAULT_LOCKED|VM_FAULT_NOPAGE;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500732 goto out;
733 }
734
735 /*
736 * We mark the page dirty already here so that when freeze is in
737 * progress, we are guaranteed that writeback during freezing will
738 * see the dirty page and writeprotect it again.
739 */
740 set_page_dirty(page);
741 wait_for_stable_page(page);
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000742 ret = VM_FAULT_LOCKED;
Martin Brandenburg52e2d0a2018-12-14 15:24:43 -0500743out:
744 sb_end_pagefault(inode->i_sb);
745 return ret;
746}
747
Yi Liu8bb8aef2015-11-24 15:12:14 -0500748static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
Mike Marshall5db11c22015-07-17 10:38:12 -0400749{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500750 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
751 struct orangefs_kernel_op_s *new_op;
Martin Brandenburgfecd86a2016-03-23 17:06:25 -0400752 loff_t orig_size;
Mike Marshall5db11c22015-07-17 10:38:12 -0400753 int ret = -EINVAL;
754
755 gossip_debug(GOSSIP_INODE_DEBUG,
756 "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n",
757 __func__,
758 get_khandle_from_ino(inode),
Yi Liu8bb8aef2015-11-24 15:12:14 -0500759 &orangefs_inode->refn.khandle,
760 orangefs_inode->refn.fs_id,
Mike Marshall5db11c22015-07-17 10:38:12 -0400761 iattr->ia_size);
762
Martin Brandenburgfecd86a2016-03-23 17:06:25 -0400763 /* Ensure that we have a up to date size, so we know if it changed. */
Martin Brandenburg8b607852018-02-07 18:44:50 +0000764 ret = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_SIZE);
Martin Brandenburgfecd86a2016-03-23 17:06:25 -0400765 if (ret == -ESTALE)
766 ret = -EIO;
767 if (ret) {
768 gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n",
769 __func__, ret);
770 return ret;
771 }
772 orig_size = i_size_read(inode);
773
Martin Brandenburg33713cd2019-04-29 17:09:48 +0000774 /* This is truncate_setsize in a different order. */
775 truncate_pagecache(inode, iattr->ia_size);
776 i_size_write(inode, iattr->ia_size);
777 if (iattr->ia_size > orig_size)
778 pagecache_isize_extended(inode, orig_size, iattr->ia_size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400779
Yi Liu8bb8aef2015-11-24 15:12:14 -0500780 new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE);
Mike Marshall5db11c22015-07-17 10:38:12 -0400781 if (!new_op)
782 return -ENOMEM;
783
Yi Liu8bb8aef2015-11-24 15:12:14 -0500784 new_op->upcall.req.truncate.refn = orangefs_inode->refn;
Mike Marshall5db11c22015-07-17 10:38:12 -0400785 new_op->upcall.req.truncate.size = (__s64) iattr->ia_size;
786
Mike Marshall95f5f882018-05-11 17:11:48 -0400787 ret = service_operation(new_op,
788 __func__,
789 get_interruptible_flag(inode));
Mike Marshall5db11c22015-07-17 10:38:12 -0400790
791 /*
792 * the truncate has no downcall members to retrieve, but
793 * the status value tells us if it went through ok or not
794 */
Mike Marshall95f5f882018-05-11 17:11:48 -0400795 gossip_debug(GOSSIP_INODE_DEBUG, "%s: ret:%d:\n", __func__, ret);
Mike Marshall5db11c22015-07-17 10:38:12 -0400796
797 op_release(new_op);
798
799 if (ret != 0)
800 return ret;
801
Martin Brandenburgf83140c2016-04-04 16:26:36 -0400802 if (orig_size != i_size_read(inode))
Mike Marshall5db11c22015-07-17 10:38:12 -0400803 iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
Mike Marshall5db11c22015-07-17 10:38:12 -0400804
805 return ret;
806}
807
Martin Brandenburgafd9fb22018-02-13 20:13:46 +0000808int __orangefs_setattr(struct inode *inode, struct iattr *iattr)
Mike Marshall5db11c22015-07-17 10:38:12 -0400809{
Martin Brandenburgdf2d7332018-02-12 20:29:37 +0000810 int ret;
Mike Marshall5db11c22015-07-17 10:38:12 -0400811
Martin Brandenburgafd9fb22018-02-13 20:13:46 +0000812 if (iattr->ia_valid & ATTR_MODE) {
813 if (iattr->ia_mode & (S_ISVTX)) {
814 if (is_root_handle(inode)) {
815 /*
816 * allow sticky bit to be set on root (since
817 * it shows up that way by default anyhow),
818 * but don't show it to the server
819 */
820 iattr->ia_mode -= S_ISVTX;
821 } else {
822 gossip_debug(GOSSIP_UTILS_DEBUG,
823 "User attempted to set sticky bit on non-root directory; returning EINVAL.\n");
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000824 ret = -EINVAL;
825 goto out;
Martin Brandenburgafd9fb22018-02-13 20:13:46 +0000826 }
827 }
828 if (iattr->ia_mode & (S_ISUID)) {
829 gossip_debug(GOSSIP_UTILS_DEBUG,
830 "Attempting to set setuid bit (not supported); returning EINVAL.\n");
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +0000831 ret = -EINVAL;
832 goto out;
Martin Brandenburgafd9fb22018-02-13 20:13:46 +0000833 }
834 }
Mike Marshall5db11c22015-07-17 10:38:12 -0400835
Martin Brandenburg53950ef2017-04-25 15:38:04 -0400836 if (iattr->ia_valid & ATTR_SIZE) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500837 ret = orangefs_setattr_size(inode, iattr);
Mike Marshall5db11c22015-07-17 10:38:12 -0400838 if (ret)
839 goto out;
840 }
841
Martin Brandenburgafd9fb22018-02-13 20:13:46 +0000842again:
843 spin_lock(&inode->i_lock);
844 if (ORANGEFS_I(inode)->attr_valid) {
845 if (uid_eq(ORANGEFS_I(inode)->attr_uid, current_fsuid()) &&
846 gid_eq(ORANGEFS_I(inode)->attr_gid, current_fsgid())) {
847 ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
848 } else {
849 spin_unlock(&inode->i_lock);
850 write_inode_now(inode, 1);
851 goto again;
852 }
853 } else {
854 ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
855 ORANGEFS_I(inode)->attr_uid = current_fsuid();
856 ORANGEFS_I(inode)->attr_gid = current_fsgid();
857 }
Mike Marshall5db11c22015-07-17 10:38:12 -0400858 setattr_copy(inode, iattr);
Martin Brandenburgafd9fb22018-02-13 20:13:46 +0000859 spin_unlock(&inode->i_lock);
Mike Marshall5db11c22015-07-17 10:38:12 -0400860 mark_inode_dirty(inode);
861
Martin Brandenburgdf2d7332018-02-12 20:29:37 +0000862 if (iattr->ia_valid & ATTR_MODE)
Mike Marshall5db11c22015-07-17 10:38:12 -0400863 /* change mod on a file that has ACLs */
864 ret = posix_acl_chmod(inode, inode->i_mode);
865
Martin Brandenburgdf2d7332018-02-12 20:29:37 +0000866 ret = 0;
Mike Marshall5db11c22015-07-17 10:38:12 -0400867out:
Martin Brandenburgafd9fb22018-02-13 20:13:46 +0000868 return ret;
869}
870
871/*
872 * Change attributes of an object referenced by dentry.
873 */
874int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
875{
876 int ret;
877 gossip_debug(GOSSIP_INODE_DEBUG, "__orangefs_setattr: called on %pd\n",
878 dentry);
879 ret = setattr_prepare(dentry, iattr);
880 if (ret)
881 goto out;
882 ret = __orangefs_setattr(d_inode(dentry), iattr);
883 sync_inode_metadata(d_inode(dentry), 1);
884out:
885 gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n",
886 ret);
Mike Marshall5db11c22015-07-17 10:38:12 -0400887 return ret;
888}
889
890/*
891 * Obtain attributes of an object given a dentry
892 */
David Howellsa528d352017-01-31 16:46:22 +0000893int orangefs_getattr(const struct path *path, struct kstat *stat,
894 u32 request_mask, unsigned int flags)
Mike Marshall5db11c22015-07-17 10:38:12 -0400895{
Colin Ian Kinge6b998a2019-07-28 19:04:07 +0100896 int ret;
David Howellsa528d352017-01-31 16:46:22 +0000897 struct inode *inode = path->dentry->d_inode;
Mike Marshall5db11c22015-07-17 10:38:12 -0400898
899 gossip_debug(GOSSIP_INODE_DEBUG,
Martin Brandenburg5e4f6062018-02-12 17:04:57 +0000900 "orangefs_getattr: called on %pd mask %u\n",
901 path->dentry, request_mask);
Mike Marshall5db11c22015-07-17 10:38:12 -0400902
Martin Brandenburg8b607852018-02-07 18:44:50 +0000903 ret = orangefs_inode_getattr(inode,
904 request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0);
Mike Marshall5db11c22015-07-17 10:38:12 -0400905 if (ret == 0) {
David Howellsa528d352017-01-31 16:46:22 +0000906 generic_fillattr(inode, stat);
Mike Marshalla7d3e782016-03-14 15:30:03 -0400907
Mike Marshall5db11c22015-07-17 10:38:12 -0400908 /* override block size reported to stat */
Christoph Hellwig5678b5d2019-01-21 16:23:25 +0100909 if (!(request_mask & STATX_SIZE))
910 stat->result_mask &= ~STATX_SIZE;
Martin Brandenburg7f549102018-05-31 16:37:00 +0000911
912 stat->attributes_mask = STATX_ATTR_IMMUTABLE |
913 STATX_ATTR_APPEND;
914 if (inode->i_flags & S_IMMUTABLE)
915 stat->attributes |= STATX_ATTR_IMMUTABLE;
916 if (inode->i_flags & S_APPEND)
917 stat->attributes |= STATX_ATTR_APPEND;
Mike Marshall5db11c22015-07-17 10:38:12 -0400918 }
919 return ret;
920}
921
Martin Brandenburg933287d2016-01-30 13:46:54 -0500922int orangefs_permission(struct inode *inode, int mask)
923{
924 int ret;
925
926 if (mask & MAY_NOT_BLOCK)
927 return -ECHILD;
928
929 gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__);
930
931 /* Make sure the permission (and other common attrs) are up to date. */
Martin Brandenburg8b607852018-02-07 18:44:50 +0000932 ret = orangefs_inode_getattr(inode, 0);
Martin Brandenburg933287d2016-01-30 13:46:54 -0500933 if (ret < 0)
934 return ret;
935
936 return generic_permission(inode, mask);
937}
938
Deepa Dinamani95582b02018-05-08 19:36:02 -0700939int orangefs_update_time(struct inode *inode, struct timespec64 *time, int flags)
Martin Brandenburga55f2d82017-11-07 15:01:40 -0500940{
941 struct iattr iattr;
942 gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n",
943 get_khandle_from_ino(inode));
944 generic_update_time(inode, time, flags);
945 memset(&iattr, 0, sizeof iattr);
946 if (flags & S_ATIME)
947 iattr.ia_valid |= ATTR_ATIME;
948 if (flags & S_CTIME)
949 iattr.ia_valid |= ATTR_CTIME;
950 if (flags & S_MTIME)
951 iattr.ia_valid |= ATTR_MTIME;
Martin Brandenburgafd9fb22018-02-13 20:13:46 +0000952 return __orangefs_setattr(inode, &iattr);
Martin Brandenburga55f2d82017-11-07 15:01:40 -0500953}
954
Mike Marshall95f5f882018-05-11 17:11:48 -0400955/* ORANGEFS2 implementation of VFS inode operations for files */
Martin Brandenburgbdd6f082018-04-03 16:27:13 +0000956static const struct inode_operations orangefs_file_inode_operations = {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500957 .get_acl = orangefs_get_acl,
958 .set_acl = orangefs_set_acl,
959 .setattr = orangefs_setattr,
960 .getattr = orangefs_getattr,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500961 .listxattr = orangefs_listxattr,
Martin Brandenburg933287d2016-01-30 13:46:54 -0500962 .permission = orangefs_permission,
Martin Brandenburga55f2d82017-11-07 15:01:40 -0500963 .update_time = orangefs_update_time,
Mike Marshall5db11c22015-07-17 10:38:12 -0400964};
965
Yi Liu8bb8aef2015-11-24 15:12:14 -0500966static int orangefs_init_iops(struct inode *inode)
Mike Marshall5db11c22015-07-17 10:38:12 -0400967{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500968 inode->i_mapping->a_ops = &orangefs_address_operations;
Mike Marshall5db11c22015-07-17 10:38:12 -0400969
970 switch (inode->i_mode & S_IFMT) {
971 case S_IFREG:
Yi Liu8bb8aef2015-11-24 15:12:14 -0500972 inode->i_op = &orangefs_file_inode_operations;
973 inode->i_fop = &orangefs_file_operations;
Mike Marshall5db11c22015-07-17 10:38:12 -0400974 break;
975 case S_IFLNK:
Yi Liu8bb8aef2015-11-24 15:12:14 -0500976 inode->i_op = &orangefs_symlink_inode_operations;
Mike Marshall5db11c22015-07-17 10:38:12 -0400977 break;
978 case S_IFDIR:
Yi Liu8bb8aef2015-11-24 15:12:14 -0500979 inode->i_op = &orangefs_dir_inode_operations;
980 inode->i_fop = &orangefs_dir_operations;
Mike Marshall5db11c22015-07-17 10:38:12 -0400981 break;
982 default:
983 gossip_debug(GOSSIP_INODE_DEBUG,
984 "%s: unsupported mode\n",
985 __func__);
986 return -EINVAL;
987 }
988
989 return 0;
990}
991
992/*
Mike Marshall95f5f882018-05-11 17:11:48 -0400993 * Given an ORANGEFS object identifier (fsid, handle), convert it into
994 * a ino_t type that will be used as a hash-index from where the handle will
Mike Marshall5db11c22015-07-17 10:38:12 -0400995 * be searched for in the VFS hash table of inodes.
996 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500997static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref)
Mike Marshall5db11c22015-07-17 10:38:12 -0400998{
999 if (!ref)
1000 return 0;
Yi Liu8bb8aef2015-11-24 15:12:14 -05001001 return orangefs_khandle_to_ino(&(ref->khandle));
Mike Marshall5db11c22015-07-17 10:38:12 -04001002}
1003
1004/*
1005 * Called to set up an inode from iget5_locked.
1006 */
Yi Liu8bb8aef2015-11-24 15:12:14 -05001007static int orangefs_set_inode(struct inode *inode, void *data)
Mike Marshall5db11c22015-07-17 10:38:12 -04001008{
Yi Liu8bb8aef2015-11-24 15:12:14 -05001009 struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
Martin Brandenburga4c680a2016-03-16 14:35:21 -04001010 ORANGEFS_I(inode)->refn.fs_id = ref->fs_id;
1011 ORANGEFS_I(inode)->refn.khandle = ref->khandle;
Martin Brandenburgafd9fb22018-02-13 20:13:46 +00001012 ORANGEFS_I(inode)->attr_valid = 0;
Martin Brandenburgfc2e2e92017-12-12 13:46:30 -05001013 hash_init(ORANGEFS_I(inode)->xattr_cache);
Martin Brandenburg8f04e1b2019-02-12 20:19:06 +00001014 ORANGEFS_I(inode)->mapping_time = jiffies - 1;
1015 ORANGEFS_I(inode)->bitlock = 0;
Mike Marshall5db11c22015-07-17 10:38:12 -04001016 return 0;
1017}
1018
1019/*
1020 * Called to determine if handles match.
1021 */
Yi Liu8bb8aef2015-11-24 15:12:14 -05001022static int orangefs_test_inode(struct inode *inode, void *data)
Mike Marshall5db11c22015-07-17 10:38:12 -04001023{
Yi Liu8bb8aef2015-11-24 15:12:14 -05001024 struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
1025 struct orangefs_inode_s *orangefs_inode = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -04001026
Yi Liu8bb8aef2015-11-24 15:12:14 -05001027 orangefs_inode = ORANGEFS_I(inode);
Mike Marshall95f5f882018-05-11 17:11:48 -04001028 /* test handles and fs_ids... */
1029 return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle),
1030 &(ref->khandle)) &&
1031 orangefs_inode->refn.fs_id == ref->fs_id);
Mike Marshall5db11c22015-07-17 10:38:12 -04001032}
1033
1034/*
Yi Liu8bb8aef2015-11-24 15:12:14 -05001035 * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS
Mike Marshall5db11c22015-07-17 10:38:12 -04001036 * file handle.
1037 *
1038 * @sb: the file system super block instance.
Mike Marshall95f5f882018-05-11 17:11:48 -04001039 * @ref: The ORANGEFS object for which we are trying to locate an inode.
Mike Marshall5db11c22015-07-17 10:38:12 -04001040 */
Mike Marshall95f5f882018-05-11 17:11:48 -04001041struct inode *orangefs_iget(struct super_block *sb,
1042 struct orangefs_object_kref *ref)
Mike Marshall5db11c22015-07-17 10:38:12 -04001043{
1044 struct inode *inode = NULL;
1045 unsigned long hash;
1046 int error;
1047
Yi Liu8bb8aef2015-11-24 15:12:14 -05001048 hash = orangefs_handle_hash(ref);
Mike Marshall95f5f882018-05-11 17:11:48 -04001049 inode = iget5_locked(sb,
1050 hash,
1051 orangefs_test_inode,
1052 orangefs_set_inode,
1053 ref);
Mike Marshallb5d72cd2018-10-18 13:52:23 -04001054
1055 if (!inode)
1056 return ERR_PTR(-ENOMEM);
1057
1058 if (!(inode->i_state & I_NEW))
Mike Marshall5db11c22015-07-17 10:38:12 -04001059 return inode;
1060
Martin Brandenburg8b607852018-02-07 18:44:50 +00001061 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
Mike Marshall5db11c22015-07-17 10:38:12 -04001062 if (error) {
1063 iget_failed(inode);
1064 return ERR_PTR(error);
1065 }
1066
1067 inode->i_ino = hash; /* needed for stat etc */
Yi Liu8bb8aef2015-11-24 15:12:14 -05001068 orangefs_init_iops(inode);
Mike Marshall5db11c22015-07-17 10:38:12 -04001069 unlock_new_inode(inode);
1070
1071 gossip_debug(GOSSIP_INODE_DEBUG,
1072 "iget handle %pU, fsid %d hash %ld i_ino %lu\n",
1073 &ref->khandle,
1074 ref->fs_id,
1075 hash,
1076 inode->i_ino);
1077
1078 return inode;
1079}
1080
1081/*
1082 * Allocate an inode for a newly created file and insert it into the inode hash.
1083 */
Yi Liu8bb8aef2015-11-24 15:12:14 -05001084struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
1085 int mode, dev_t dev, struct orangefs_object_kref *ref)
Mike Marshall5db11c22015-07-17 10:38:12 -04001086{
Yi Liu8bb8aef2015-11-24 15:12:14 -05001087 unsigned long hash = orangefs_handle_hash(ref);
Mike Marshall5db11c22015-07-17 10:38:12 -04001088 struct inode *inode;
1089 int error;
1090
1091 gossip_debug(GOSSIP_INODE_DEBUG,
Mike Marshall52534872016-02-16 17:09:09 -05001092 "%s:(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n",
1093 __func__,
Mike Marshall5db11c22015-07-17 10:38:12 -04001094 sb,
1095 MAJOR(dev),
1096 MINOR(dev),
1097 mode);
1098
1099 inode = new_inode(sb);
1100 if (!inode)
Mike Marshall56249992018-10-18 13:47:16 -04001101 return ERR_PTR(-ENOMEM);
Mike Marshall5db11c22015-07-17 10:38:12 -04001102
Yi Liu8bb8aef2015-11-24 15:12:14 -05001103 orangefs_set_inode(inode, ref);
Mike Marshall5db11c22015-07-17 10:38:12 -04001104 inode->i_ino = hash; /* needed for stat etc */
1105
Martin Brandenburg8b607852018-02-07 18:44:50 +00001106 error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
Mike Marshall5db11c22015-07-17 10:38:12 -04001107 if (error)
1108 goto out_iput;
1109
Yi Liu8bb8aef2015-11-24 15:12:14 -05001110 orangefs_init_iops(inode);
Mike Marshall5db11c22015-07-17 10:38:12 -04001111 inode->i_rdev = dev;
1112
Yi Liu8bb8aef2015-11-24 15:12:14 -05001113 error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
Mike Marshall5db11c22015-07-17 10:38:12 -04001114 if (error < 0)
1115 goto out_iput;
1116
1117 gossip_debug(GOSSIP_INODE_DEBUG,
1118 "Initializing ACL's for inode %pU\n",
1119 get_khandle_from_ino(inode));
Yi Liu8bb8aef2015-11-24 15:12:14 -05001120 orangefs_init_acl(inode, dir);
Mike Marshall5db11c22015-07-17 10:38:12 -04001121 return inode;
1122
1123out_iput:
1124 iput(inode);
1125 return ERR_PTR(error);
1126}