blob: 0b6cd3b8734c6e1643baad8c5bd668cd161c6988 [file] [log] [blame]
David Howells3d3c9502020-05-13 17:41:20 +01001// SPDX-License-Identifier: GPL-2.0-or-later
2/* Network filesystem high-level read support.
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/module.h>
9#include <linux/export.h>
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/slab.h>
14#include <linux/uio.h>
15#include <linux/sched/mm.h>
16#include <linux/task_io_accounting_ops.h>
17#include <linux/netfs.h>
18#include "internal.h"
David Howells77b4d2c2020-09-18 09:25:13 +010019#define CREATE_TRACE_POINTS
20#include <trace/events/netfs.h>
David Howells3d3c9502020-05-13 17:41:20 +010021
22MODULE_DESCRIPTION("Network fs support");
23MODULE_AUTHOR("Red Hat, Inc.");
24MODULE_LICENSE("GPL");
25
26unsigned netfs_debug;
27module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
28MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
29
30static void netfs_rreq_work(struct work_struct *);
31static void __netfs_put_subrequest(struct netfs_read_subrequest *, bool);
32
33static void netfs_put_subrequest(struct netfs_read_subrequest *subreq,
34 bool was_async)
35{
36 if (refcount_dec_and_test(&subreq->usage))
37 __netfs_put_subrequest(subreq, was_async);
38}
39
40static struct netfs_read_request *netfs_alloc_read_request(
41 const struct netfs_read_request_ops *ops, void *netfs_priv,
42 struct file *file)
43{
44 static atomic_t debug_ids;
45 struct netfs_read_request *rreq;
46
47 rreq = kzalloc(sizeof(struct netfs_read_request), GFP_KERNEL);
48 if (rreq) {
49 rreq->netfs_ops = ops;
50 rreq->netfs_priv = netfs_priv;
51 rreq->inode = file_inode(file);
52 rreq->i_size = i_size_read(rreq->inode);
53 rreq->debug_id = atomic_inc_return(&debug_ids);
54 INIT_LIST_HEAD(&rreq->subrequests);
55 INIT_WORK(&rreq->work, netfs_rreq_work);
56 refcount_set(&rreq->usage, 1);
57 __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
58 ops->init_rreq(rreq, file);
David Howells289af542020-11-03 11:32:41 +000059 netfs_stat(&netfs_n_rh_rreq);
David Howells3d3c9502020-05-13 17:41:20 +010060 }
61
62 return rreq;
63}
64
65static void netfs_get_read_request(struct netfs_read_request *rreq)
66{
67 refcount_inc(&rreq->usage);
68}
69
70static void netfs_rreq_clear_subreqs(struct netfs_read_request *rreq,
71 bool was_async)
72{
73 struct netfs_read_subrequest *subreq;
74
75 while (!list_empty(&rreq->subrequests)) {
76 subreq = list_first_entry(&rreq->subrequests,
77 struct netfs_read_subrequest, rreq_link);
78 list_del(&subreq->rreq_link);
79 netfs_put_subrequest(subreq, was_async);
80 }
81}
82
83static void netfs_free_read_request(struct work_struct *work)
84{
85 struct netfs_read_request *rreq =
86 container_of(work, struct netfs_read_request, work);
87 netfs_rreq_clear_subreqs(rreq, false);
88 if (rreq->netfs_priv)
89 rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
David Howells77b4d2c2020-09-18 09:25:13 +010090 trace_netfs_rreq(rreq, netfs_rreq_trace_free);
David Howells726218f2020-02-06 14:22:24 +000091 if (rreq->cache_resources.ops)
92 rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
David Howells3d3c9502020-05-13 17:41:20 +010093 kfree(rreq);
David Howells289af542020-11-03 11:32:41 +000094 netfs_stat_d(&netfs_n_rh_rreq);
David Howells3d3c9502020-05-13 17:41:20 +010095}
96
97static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_async)
98{
99 if (refcount_dec_and_test(&rreq->usage)) {
100 if (was_async) {
101 rreq->work.func = netfs_free_read_request;
102 if (!queue_work(system_unbound_wq, &rreq->work))
103 BUG();
104 } else {
105 netfs_free_read_request(&rreq->work);
106 }
107 }
108}
109
110/*
111 * Allocate and partially initialise an I/O request structure.
112 */
113static struct netfs_read_subrequest *netfs_alloc_subrequest(
114 struct netfs_read_request *rreq)
115{
116 struct netfs_read_subrequest *subreq;
117
118 subreq = kzalloc(sizeof(struct netfs_read_subrequest), GFP_KERNEL);
119 if (subreq) {
120 INIT_LIST_HEAD(&subreq->rreq_link);
121 refcount_set(&subreq->usage, 2);
122 subreq->rreq = rreq;
123 netfs_get_read_request(rreq);
David Howells289af542020-11-03 11:32:41 +0000124 netfs_stat(&netfs_n_rh_sreq);
David Howells3d3c9502020-05-13 17:41:20 +0100125 }
126
127 return subreq;
128}
129
130static void netfs_get_read_subrequest(struct netfs_read_subrequest *subreq)
131{
132 refcount_inc(&subreq->usage);
133}
134
135static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq,
136 bool was_async)
137{
138 struct netfs_read_request *rreq = subreq->rreq;
139
David Howells77b4d2c2020-09-18 09:25:13 +0100140 trace_netfs_sreq(subreq, netfs_sreq_trace_free);
David Howells3d3c9502020-05-13 17:41:20 +0100141 kfree(subreq);
David Howells289af542020-11-03 11:32:41 +0000142 netfs_stat_d(&netfs_n_rh_sreq);
David Howells3d3c9502020-05-13 17:41:20 +0100143 netfs_put_read_request(rreq, was_async);
144}
145
146/*
147 * Clear the unread part of an I/O request.
148 */
149static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
150{
151 struct iov_iter iter;
152
153 iov_iter_xarray(&iter, WRITE, &subreq->rreq->mapping->i_pages,
154 subreq->start + subreq->transferred,
155 subreq->len - subreq->transferred);
156 iov_iter_zero(iov_iter_count(&iter), &iter);
157}
158
David Howells726218f2020-02-06 14:22:24 +0000159static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
160 bool was_async)
161{
162 struct netfs_read_subrequest *subreq = priv;
163
164 netfs_subreq_terminated(subreq, transferred_or_error, was_async);
165}
166
167/*
168 * Issue a read against the cache.
169 * - Eats the caller's ref on subreq.
170 */
171static void netfs_read_from_cache(struct netfs_read_request *rreq,
172 struct netfs_read_subrequest *subreq,
173 bool seek_data)
174{
175 struct netfs_cache_resources *cres = &rreq->cache_resources;
176 struct iov_iter iter;
177
178 netfs_stat(&netfs_n_rh_read);
179 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
180 subreq->start + subreq->transferred,
181 subreq->len - subreq->transferred);
182
183 cres->ops->read(cres, subreq->start, &iter, seek_data,
184 netfs_cache_read_terminated, subreq);
185}
186
David Howells3d3c9502020-05-13 17:41:20 +0100187/*
188 * Fill a subrequest region with zeroes.
189 */
190static void netfs_fill_with_zeroes(struct netfs_read_request *rreq,
191 struct netfs_read_subrequest *subreq)
192{
David Howells289af542020-11-03 11:32:41 +0000193 netfs_stat(&netfs_n_rh_zero);
David Howells3d3c9502020-05-13 17:41:20 +0100194 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
195 netfs_subreq_terminated(subreq, 0, false);
196}
197
198/*
199 * Ask the netfs to issue a read request to the server for us.
200 *
201 * The netfs is expected to read from subreq->pos + subreq->transferred to
202 * subreq->pos + subreq->len - 1. It may not backtrack and write data into the
203 * buffer prior to the transferred point as it might clobber dirty data
204 * obtained from the cache.
205 *
206 * Alternatively, the netfs is allowed to indicate one of two things:
207 *
208 * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
209 * make progress.
210 *
211 * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
212 * cleared.
213 */
214static void netfs_read_from_server(struct netfs_read_request *rreq,
215 struct netfs_read_subrequest *subreq)
216{
David Howells289af542020-11-03 11:32:41 +0000217 netfs_stat(&netfs_n_rh_download);
David Howells3d3c9502020-05-13 17:41:20 +0100218 rreq->netfs_ops->issue_op(subreq);
219}
220
221/*
222 * Release those waiting.
223 */
224static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async)
225{
David Howells77b4d2c2020-09-18 09:25:13 +0100226 trace_netfs_rreq(rreq, netfs_rreq_trace_done);
David Howells3d3c9502020-05-13 17:41:20 +0100227 netfs_rreq_clear_subreqs(rreq, was_async);
228 netfs_put_read_request(rreq, was_async);
229}
230
231/*
David Howells726218f2020-02-06 14:22:24 +0000232 * Deal with the completion of writing the data to the cache. We have to clear
233 * the PG_fscache bits on the pages involved and release the caller's ref.
234 *
235 * May be called in softirq mode and we inherit a ref from the caller.
236 */
237static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
238 bool was_async)
239{
240 struct netfs_read_subrequest *subreq;
241 struct page *page;
242 pgoff_t unlocked = 0;
243 bool have_unlocked = false;
244
245 rcu_read_lock();
246
247 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
248 XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
249
250 xas_for_each(&xas, page, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
251 /* We might have multiple writes from the same huge
252 * page, but we mustn't unlock a page more than once.
253 */
254 if (have_unlocked && page->index <= unlocked)
255 continue;
256 unlocked = page->index;
257 end_page_fscache(page);
258 have_unlocked = true;
259 }
260 }
261
262 rcu_read_unlock();
263 netfs_rreq_completed(rreq, was_async);
264}
265
266static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
267 bool was_async)
268{
269 struct netfs_read_subrequest *subreq = priv;
270 struct netfs_read_request *rreq = subreq->rreq;
271
272 if (IS_ERR_VALUE(transferred_or_error)) {
273 netfs_stat(&netfs_n_rh_write_failed);
David Howells0246f3e2021-04-06 17:31:54 +0100274 trace_netfs_failure(rreq, subreq, transferred_or_error,
275 netfs_fail_copy_to_cache);
David Howells726218f2020-02-06 14:22:24 +0000276 } else {
277 netfs_stat(&netfs_n_rh_write_done);
278 }
279
280 trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
281
282 /* If we decrement nr_wr_ops to 0, the ref belongs to us. */
283 if (atomic_dec_and_test(&rreq->nr_wr_ops))
284 netfs_rreq_unmark_after_write(rreq, was_async);
285
286 netfs_put_subrequest(subreq, was_async);
287}
288
289/*
290 * Perform any outstanding writes to the cache. We inherit a ref from the
291 * caller.
292 */
293static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
294{
295 struct netfs_cache_resources *cres = &rreq->cache_resources;
296 struct netfs_read_subrequest *subreq, *next, *p;
297 struct iov_iter iter;
298 int ret;
299
300 trace_netfs_rreq(rreq, netfs_rreq_trace_write);
301
302 /* We don't want terminating writes trying to wake us up whilst we're
303 * still going through the list.
304 */
305 atomic_inc(&rreq->nr_wr_ops);
306
307 list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
308 if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) {
309 list_del_init(&subreq->rreq_link);
310 netfs_put_subrequest(subreq, false);
311 }
312 }
313
314 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
315 /* Amalgamate adjacent writes */
316 while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
317 next = list_next_entry(subreq, rreq_link);
318 if (next->start != subreq->start + subreq->len)
319 break;
320 subreq->len += next->len;
321 list_del_init(&next->rreq_link);
322 netfs_put_subrequest(next, false);
323 }
324
325 ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
326 rreq->i_size);
327 if (ret < 0) {
David Howells0246f3e2021-04-06 17:31:54 +0100328 trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
David Howells726218f2020-02-06 14:22:24 +0000329 trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
330 continue;
331 }
332
333 iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
334 subreq->start, subreq->len);
335
336 atomic_inc(&rreq->nr_wr_ops);
337 netfs_stat(&netfs_n_rh_write);
338 netfs_get_read_subrequest(subreq);
339 trace_netfs_sreq(subreq, netfs_sreq_trace_write);
340 cres->ops->write(cres, subreq->start, &iter,
341 netfs_rreq_copy_terminated, subreq);
342 }
343
344 /* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */
345 if (atomic_dec_and_test(&rreq->nr_wr_ops))
346 netfs_rreq_unmark_after_write(rreq, false);
347}
348
349static void netfs_rreq_write_to_cache_work(struct work_struct *work)
350{
351 struct netfs_read_request *rreq =
352 container_of(work, struct netfs_read_request, work);
353
354 netfs_rreq_do_write_to_cache(rreq);
355}
356
357static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
358 bool was_async)
359{
360 if (was_async) {
361 rreq->work.func = netfs_rreq_write_to_cache_work;
362 if (!queue_work(system_unbound_wq, &rreq->work))
363 BUG();
364 } else {
365 netfs_rreq_do_write_to_cache(rreq);
366 }
367}
368
369/*
David Howells3d3c9502020-05-13 17:41:20 +0100370 * Unlock the pages in a read operation. We need to set PG_fscache on any
371 * pages we're going to write back before we unlock them.
372 */
373static void netfs_rreq_unlock(struct netfs_read_request *rreq)
374{
375 struct netfs_read_subrequest *subreq;
376 struct page *page;
377 unsigned int iopos, account = 0;
378 pgoff_t start_page = rreq->start / PAGE_SIZE;
379 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
380 bool subreq_failed = false;
381 int i;
382
383 XA_STATE(xas, &rreq->mapping->i_pages, start_page);
384
385 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
386 __clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
387 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
388 __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
389 }
390 }
391
392 /* Walk through the pagecache and the I/O request lists simultaneously.
393 * We may have a mixture of cached and uncached sections and we only
394 * really want to write out the uncached sections. This is slightly
395 * complicated by the possibility that we might have huge pages with a
396 * mixture inside.
397 */
398 subreq = list_first_entry(&rreq->subrequests,
399 struct netfs_read_subrequest, rreq_link);
400 iopos = 0;
401 subreq_failed = (subreq->error < 0);
402
David Howells77b4d2c2020-09-18 09:25:13 +0100403 trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
404
David Howells3d3c9502020-05-13 17:41:20 +0100405 rcu_read_lock();
406 xas_for_each(&xas, page, last_page) {
407 unsigned int pgpos = (page->index - start_page) * PAGE_SIZE;
408 unsigned int pgend = pgpos + thp_size(page);
409 bool pg_failed = false;
410
411 for (;;) {
412 if (!subreq) {
413 pg_failed = true;
414 break;
415 }
416 if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
417 set_page_fscache(page);
418 pg_failed |= subreq_failed;
419 if (pgend < iopos + subreq->len)
420 break;
421
422 account += subreq->transferred;
423 iopos += subreq->len;
424 if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
425 subreq = list_next_entry(subreq, rreq_link);
426 subreq_failed = (subreq->error < 0);
427 } else {
428 subreq = NULL;
429 subreq_failed = false;
430 }
431 if (pgend == iopos)
432 break;
433 }
434
435 if (!pg_failed) {
436 for (i = 0; i < thp_nr_pages(page); i++)
437 flush_dcache_page(page);
438 SetPageUptodate(page);
439 }
440
441 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_PAGES, &rreq->flags)) {
442 if (page->index == rreq->no_unlock_page &&
443 test_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags))
444 _debug("no unlock");
445 else
446 unlock_page(page);
447 }
448 }
449 rcu_read_unlock();
450
451 task_io_account_read(account);
452 if (rreq->netfs_ops->done)
453 rreq->netfs_ops->done(rreq);
454}
455
456/*
457 * Handle a short read.
458 */
459static void netfs_rreq_short_read(struct netfs_read_request *rreq,
460 struct netfs_read_subrequest *subreq)
461{
462 __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
463 __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
464
David Howells289af542020-11-03 11:32:41 +0000465 netfs_stat(&netfs_n_rh_short_read);
David Howells77b4d2c2020-09-18 09:25:13 +0100466 trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
467
David Howells3d3c9502020-05-13 17:41:20 +0100468 netfs_get_read_subrequest(subreq);
469 atomic_inc(&rreq->nr_rd_ops);
David Howells726218f2020-02-06 14:22:24 +0000470 if (subreq->source == NETFS_READ_FROM_CACHE)
471 netfs_read_from_cache(rreq, subreq, true);
472 else
473 netfs_read_from_server(rreq, subreq);
David Howells3d3c9502020-05-13 17:41:20 +0100474}
475
476/*
477 * Resubmit any short or failed operations. Returns true if we got the rreq
478 * ref back.
479 */
480static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq)
481{
482 struct netfs_read_subrequest *subreq;
483
484 WARN_ON(in_interrupt());
485
David Howells77b4d2c2020-09-18 09:25:13 +0100486 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
487
David Howells3d3c9502020-05-13 17:41:20 +0100488 /* We don't want terminating submissions trying to wake us up whilst
489 * we're still going through the list.
490 */
491 atomic_inc(&rreq->nr_rd_ops);
492
493 __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
494 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
495 if (subreq->error) {
496 if (subreq->source != NETFS_READ_FROM_CACHE)
497 break;
498 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
499 subreq->error = 0;
David Howells289af542020-11-03 11:32:41 +0000500 netfs_stat(&netfs_n_rh_download_instead);
David Howells77b4d2c2020-09-18 09:25:13 +0100501 trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
David Howells3d3c9502020-05-13 17:41:20 +0100502 netfs_get_read_subrequest(subreq);
503 atomic_inc(&rreq->nr_rd_ops);
504 netfs_read_from_server(rreq, subreq);
505 } else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) {
506 netfs_rreq_short_read(rreq, subreq);
507 }
508 }
509
510 /* If we decrement nr_rd_ops to 0, the usage ref belongs to us. */
511 if (atomic_dec_and_test(&rreq->nr_rd_ops))
512 return true;
513
514 wake_up_var(&rreq->nr_rd_ops);
515 return false;
516}
517
518/*
David Howells726218f2020-02-06 14:22:24 +0000519 * Check to see if the data read is still valid.
520 */
521static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq)
522{
523 struct netfs_read_subrequest *subreq;
524
525 if (!rreq->netfs_ops->is_still_valid ||
526 rreq->netfs_ops->is_still_valid(rreq))
527 return;
528
529 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
530 if (subreq->source == NETFS_READ_FROM_CACHE) {
531 subreq->error = -ESTALE;
532 __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
533 }
534 }
535}
536
537/*
David Howells3d3c9502020-05-13 17:41:20 +0100538 * Assess the state of a read request and decide what to do next.
539 *
540 * Note that we could be in an ordinary kernel thread, on a workqueue or in
541 * softirq context at this point. We inherit a ref from the caller.
542 */
543static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
544{
David Howells77b4d2c2020-09-18 09:25:13 +0100545 trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
546
David Howells3d3c9502020-05-13 17:41:20 +0100547again:
David Howells726218f2020-02-06 14:22:24 +0000548 netfs_rreq_is_still_valid(rreq);
549
David Howells3d3c9502020-05-13 17:41:20 +0100550 if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
551 test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
552 if (netfs_rreq_perform_resubmissions(rreq))
553 goto again;
554 return;
555 }
556
557 netfs_rreq_unlock(rreq);
558
559 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
560 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
561
David Howells726218f2020-02-06 14:22:24 +0000562 if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
563 return netfs_rreq_write_to_cache(rreq, was_async);
564
David Howells3d3c9502020-05-13 17:41:20 +0100565 netfs_rreq_completed(rreq, was_async);
566}
567
568static void netfs_rreq_work(struct work_struct *work)
569{
570 struct netfs_read_request *rreq =
571 container_of(work, struct netfs_read_request, work);
572 netfs_rreq_assess(rreq, false);
573}
574
575/*
576 * Handle the completion of all outstanding I/O operations on a read request.
577 * We inherit a ref from the caller.
578 */
579static void netfs_rreq_terminated(struct netfs_read_request *rreq,
580 bool was_async)
581{
582 if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
583 was_async) {
584 if (!queue_work(system_unbound_wq, &rreq->work))
585 BUG();
586 } else {
587 netfs_rreq_assess(rreq, was_async);
588 }
589}
590
591/**
592 * netfs_subreq_terminated - Note the termination of an I/O operation.
593 * @subreq: The I/O request that has terminated.
594 * @transferred_or_error: The amount of data transferred or an error code.
595 * @was_async: The termination was asynchronous
596 *
597 * This tells the read helper that a contributory I/O operation has terminated,
598 * one way or another, and that it should integrate the results.
599 *
600 * The caller indicates in @transferred_or_error the outcome of the operation,
601 * supplying a positive value to indicate the number of bytes transferred, 0 to
602 * indicate a failure to transfer anything that should be retried or a negative
603 * error code. The helper will look after reissuing I/O operations as
604 * appropriate and writing downloaded data to the cache.
605 *
606 * If @was_async is true, the caller might be running in softirq or interrupt
607 * context and we can't sleep.
608 */
609void netfs_subreq_terminated(struct netfs_read_subrequest *subreq,
610 ssize_t transferred_or_error,
611 bool was_async)
612{
613 struct netfs_read_request *rreq = subreq->rreq;
614 int u;
615
616 _enter("[%u]{%llx,%lx},%zd",
617 subreq->debug_index, subreq->start, subreq->flags,
618 transferred_or_error);
619
David Howells289af542020-11-03 11:32:41 +0000620 switch (subreq->source) {
621 case NETFS_READ_FROM_CACHE:
622 netfs_stat(&netfs_n_rh_read_done);
623 break;
624 case NETFS_DOWNLOAD_FROM_SERVER:
625 netfs_stat(&netfs_n_rh_download_done);
626 break;
627 default:
628 break;
629 }
630
David Howells3d3c9502020-05-13 17:41:20 +0100631 if (IS_ERR_VALUE(transferred_or_error)) {
632 subreq->error = transferred_or_error;
David Howells0246f3e2021-04-06 17:31:54 +0100633 trace_netfs_failure(rreq, subreq, transferred_or_error,
634 netfs_fail_read);
David Howells3d3c9502020-05-13 17:41:20 +0100635 goto failed;
636 }
637
638 if (WARN(transferred_or_error > subreq->len - subreq->transferred,
639 "Subreq overread: R%x[%x] %zd > %zu - %zu",
640 rreq->debug_id, subreq->debug_index,
641 transferred_or_error, subreq->len, subreq->transferred))
642 transferred_or_error = subreq->len - subreq->transferred;
643
644 subreq->error = 0;
645 subreq->transferred += transferred_or_error;
646 if (subreq->transferred < subreq->len)
647 goto incomplete;
648
649complete:
650 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
651 if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
652 set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
653
654out:
David Howells77b4d2c2020-09-18 09:25:13 +0100655 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
656
David Howells3d3c9502020-05-13 17:41:20 +0100657 /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
658 u = atomic_dec_return(&rreq->nr_rd_ops);
659 if (u == 0)
660 netfs_rreq_terminated(rreq, was_async);
661 else if (u == 1)
662 wake_up_var(&rreq->nr_rd_ops);
663
664 netfs_put_subrequest(subreq, was_async);
665 return;
666
667incomplete:
668 if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
669 netfs_clear_unread(subreq);
670 subreq->transferred = subreq->len;
671 goto complete;
672 }
673
674 if (transferred_or_error == 0) {
675 if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
676 subreq->error = -ENODATA;
677 goto failed;
678 }
679 } else {
680 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
681 }
682
683 __set_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
684 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
685 goto out;
686
687failed:
688 if (subreq->source == NETFS_READ_FROM_CACHE) {
David Howells289af542020-11-03 11:32:41 +0000689 netfs_stat(&netfs_n_rh_read_failed);
David Howells3d3c9502020-05-13 17:41:20 +0100690 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
691 } else {
David Howells289af542020-11-03 11:32:41 +0000692 netfs_stat(&netfs_n_rh_download_failed);
David Howells3d3c9502020-05-13 17:41:20 +0100693 set_bit(NETFS_RREQ_FAILED, &rreq->flags);
694 rreq->error = subreq->error;
695 }
696 goto out;
697}
698EXPORT_SYMBOL(netfs_subreq_terminated);
699
700static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequest *subreq,
701 loff_t i_size)
702{
703 struct netfs_read_request *rreq = subreq->rreq;
David Howells726218f2020-02-06 14:22:24 +0000704 struct netfs_cache_resources *cres = &rreq->cache_resources;
David Howells3d3c9502020-05-13 17:41:20 +0100705
David Howells726218f2020-02-06 14:22:24 +0000706 if (cres->ops)
707 return cres->ops->prepare_read(subreq, i_size);
David Howells3d3c9502020-05-13 17:41:20 +0100708 if (subreq->start >= rreq->i_size)
709 return NETFS_FILL_WITH_ZEROES;
710 return NETFS_DOWNLOAD_FROM_SERVER;
711}
712
713/*
714 * Work out what sort of subrequest the next one will be.
715 */
716static enum netfs_read_source
717netfs_rreq_prepare_read(struct netfs_read_request *rreq,
718 struct netfs_read_subrequest *subreq)
719{
720 enum netfs_read_source source;
721
722 _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
723
724 source = netfs_cache_prepare_read(subreq, rreq->i_size);
725 if (source == NETFS_INVALID_READ)
726 goto out;
727
728 if (source == NETFS_DOWNLOAD_FROM_SERVER) {
729 /* Call out to the netfs to let it shrink the request to fit
730 * its own I/O sizes and boundaries. If it shinks it here, it
731 * will be called again to make simultaneous calls; if it wants
732 * to make serial calls, it can indicate a short read and then
733 * we will call it again.
734 */
735 if (subreq->len > rreq->i_size - subreq->start)
736 subreq->len = rreq->i_size - subreq->start;
737
738 if (rreq->netfs_ops->clamp_length &&
739 !rreq->netfs_ops->clamp_length(subreq)) {
740 source = NETFS_INVALID_READ;
741 goto out;
742 }
743 }
744
745 if (WARN_ON(subreq->len == 0))
746 source = NETFS_INVALID_READ;
747
748out:
749 subreq->source = source;
David Howells77b4d2c2020-09-18 09:25:13 +0100750 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
David Howells3d3c9502020-05-13 17:41:20 +0100751 return source;
752}
753
754/*
755 * Slice off a piece of a read request and submit an I/O request for it.
756 */
757static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
758 unsigned int *_debug_index)
759{
760 struct netfs_read_subrequest *subreq;
761 enum netfs_read_source source;
762
763 subreq = netfs_alloc_subrequest(rreq);
764 if (!subreq)
765 return false;
766
767 subreq->debug_index = (*_debug_index)++;
768 subreq->start = rreq->start + rreq->submitted;
769 subreq->len = rreq->len - rreq->submitted;
770
771 _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
772 list_add_tail(&subreq->rreq_link, &rreq->subrequests);
773
774 /* Call out to the cache to find out what it can do with the remaining
775 * subset. It tells us in subreq->flags what it decided should be done
776 * and adjusts subreq->len down if the subset crosses a cache boundary.
777 *
778 * Then when we hand the subset, it can choose to take a subset of that
779 * (the starts must coincide), in which case, we go around the loop
780 * again and ask it to download the next piece.
781 */
782 source = netfs_rreq_prepare_read(rreq, subreq);
783 if (source == NETFS_INVALID_READ)
784 goto subreq_failed;
785
786 atomic_inc(&rreq->nr_rd_ops);
787
788 rreq->submitted += subreq->len;
789
David Howells77b4d2c2020-09-18 09:25:13 +0100790 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
David Howells3d3c9502020-05-13 17:41:20 +0100791 switch (source) {
792 case NETFS_FILL_WITH_ZEROES:
793 netfs_fill_with_zeroes(rreq, subreq);
794 break;
795 case NETFS_DOWNLOAD_FROM_SERVER:
796 netfs_read_from_server(rreq, subreq);
797 break;
David Howells726218f2020-02-06 14:22:24 +0000798 case NETFS_READ_FROM_CACHE:
799 netfs_read_from_cache(rreq, subreq, false);
800 break;
David Howells3d3c9502020-05-13 17:41:20 +0100801 default:
802 BUG();
803 }
804
805 return true;
806
807subreq_failed:
808 rreq->error = subreq->error;
809 netfs_put_subrequest(subreq, false);
810 return false;
811}
812
David Howells726218f2020-02-06 14:22:24 +0000813static void netfs_cache_expand_readahead(struct netfs_read_request *rreq,
814 loff_t *_start, size_t *_len, loff_t i_size)
815{
816 struct netfs_cache_resources *cres = &rreq->cache_resources;
817
818 if (cres->ops && cres->ops->expand_readahead)
819 cres->ops->expand_readahead(cres, _start, _len, i_size);
820}
821
David Howells3d3c9502020-05-13 17:41:20 +0100822static void netfs_rreq_expand(struct netfs_read_request *rreq,
823 struct readahead_control *ractl)
824{
David Howells726218f2020-02-06 14:22:24 +0000825 /* Give the cache a chance to change the request parameters. The
826 * resultant request must contain the original region.
827 */
828 netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
829
David Howells3d3c9502020-05-13 17:41:20 +0100830 /* Give the netfs a chance to change the request parameters. The
831 * resultant request must contain the original region.
832 */
833 if (rreq->netfs_ops->expand_readahead)
834 rreq->netfs_ops->expand_readahead(rreq);
835
836 /* Expand the request if the cache wants it to start earlier. Note
837 * that the expansion may get further extended if the VM wishes to
838 * insert THPs and the preferred start and/or end wind up in the middle
839 * of THPs.
840 *
841 * If this is the case, however, the THP size should be an integer
842 * multiple of the cache granule size, so we get a whole number of
843 * granules to deal with.
844 */
845 if (rreq->start != readahead_pos(ractl) ||
846 rreq->len != readahead_length(ractl)) {
847 readahead_expand(ractl, rreq->start, rreq->len);
848 rreq->start = readahead_pos(ractl);
849 rreq->len = readahead_length(ractl);
David Howells77b4d2c2020-09-18 09:25:13 +0100850
851 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
852 netfs_read_trace_expanded);
David Howells3d3c9502020-05-13 17:41:20 +0100853 }
854}
855
856/**
857 * netfs_readahead - Helper to manage a read request
858 * @ractl: The description of the readahead request
859 * @ops: The network filesystem's operations for the helper to use
860 * @netfs_priv: Private netfs data to be retained in the request
861 *
862 * Fulfil a readahead request by drawing data from the cache if possible, or
863 * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
864 * requests from different sources will get munged together. If necessary, the
865 * readahead window can be expanded in either direction to a more convenient
866 * alighment for RPC efficiency or to make storage in the cache feasible.
867 *
868 * The calling netfs must provide a table of operations, only one of which,
869 * issue_op, is mandatory. It may also be passed a private token, which will
870 * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
871 *
872 * This is usable whether or not caching is enabled.
873 */
874void netfs_readahead(struct readahead_control *ractl,
875 const struct netfs_read_request_ops *ops,
876 void *netfs_priv)
877{
878 struct netfs_read_request *rreq;
879 struct page *page;
880 unsigned int debug_index = 0;
David Howells726218f2020-02-06 14:22:24 +0000881 int ret;
David Howells3d3c9502020-05-13 17:41:20 +0100882
883 _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
884
885 if (readahead_count(ractl) == 0)
886 goto cleanup;
887
888 rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file);
889 if (!rreq)
890 goto cleanup;
891 rreq->mapping = ractl->mapping;
892 rreq->start = readahead_pos(ractl);
893 rreq->len = readahead_length(ractl);
894
David Howells726218f2020-02-06 14:22:24 +0000895 if (ops->begin_cache_operation) {
896 ret = ops->begin_cache_operation(rreq);
897 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
898 goto cleanup_free;
899 }
900
David Howells289af542020-11-03 11:32:41 +0000901 netfs_stat(&netfs_n_rh_readahead);
David Howells77b4d2c2020-09-18 09:25:13 +0100902 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
903 netfs_read_trace_readahead);
904
David Howells3d3c9502020-05-13 17:41:20 +0100905 netfs_rreq_expand(rreq, ractl);
906
907 atomic_set(&rreq->nr_rd_ops, 1);
908 do {
909 if (!netfs_rreq_submit_slice(rreq, &debug_index))
910 break;
911
912 } while (rreq->submitted < rreq->len);
913
914 /* Drop the refs on the pages here rather than in the cache or
915 * filesystem. The locks will be dropped in netfs_rreq_unlock().
916 */
917 while ((page = readahead_page(ractl)))
918 put_page(page);
919
920 /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
921 if (atomic_dec_and_test(&rreq->nr_rd_ops))
922 netfs_rreq_assess(rreq, false);
923 return;
924
David Howells726218f2020-02-06 14:22:24 +0000925cleanup_free:
926 netfs_put_read_request(rreq, false);
927 return;
David Howells3d3c9502020-05-13 17:41:20 +0100928cleanup:
929 if (netfs_priv)
930 ops->cleanup(ractl->mapping, netfs_priv);
931 return;
932}
933EXPORT_SYMBOL(netfs_readahead);
934
935/**
David Howells53b776c2021-04-26 21:16:16 +0100936 * netfs_readpage - Helper to manage a readpage request
David Howells3d3c9502020-05-13 17:41:20 +0100937 * @file: The file to read from
938 * @page: The page to read
939 * @ops: The network filesystem's operations for the helper to use
940 * @netfs_priv: Private netfs data to be retained in the request
941 *
942 * Fulfil a readpage request by drawing data from the cache if possible, or the
943 * netfs if not. Space beyond the EOF is zero-filled. Multiple I/O requests
944 * from different sources will get munged together.
945 *
946 * The calling netfs must provide a table of operations, only one of which,
947 * issue_op, is mandatory. It may also be passed a private token, which will
948 * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
949 *
950 * This is usable whether or not caching is enabled.
951 */
952int netfs_readpage(struct file *file,
953 struct page *page,
954 const struct netfs_read_request_ops *ops,
955 void *netfs_priv)
956{
957 struct netfs_read_request *rreq;
958 unsigned int debug_index = 0;
959 int ret;
960
961 _enter("%lx", page_index(page));
962
963 rreq = netfs_alloc_read_request(ops, netfs_priv, file);
964 if (!rreq) {
965 if (netfs_priv)
966 ops->cleanup(netfs_priv, page_file_mapping(page));
967 unlock_page(page);
968 return -ENOMEM;
969 }
970 rreq->mapping = page_file_mapping(page);
David Howells53b776c2021-04-26 21:16:16 +0100971 rreq->start = page_file_offset(page);
David Howells3d3c9502020-05-13 17:41:20 +0100972 rreq->len = thp_size(page);
973
David Howells726218f2020-02-06 14:22:24 +0000974 if (ops->begin_cache_operation) {
975 ret = ops->begin_cache_operation(rreq);
976 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
977 unlock_page(page);
978 goto out;
979 }
980 }
981
David Howells289af542020-11-03 11:32:41 +0000982 netfs_stat(&netfs_n_rh_readpage);
David Howells77b4d2c2020-09-18 09:25:13 +0100983 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
984
David Howells3d3c9502020-05-13 17:41:20 +0100985 netfs_get_read_request(rreq);
986
987 atomic_set(&rreq->nr_rd_ops, 1);
988 do {
989 if (!netfs_rreq_submit_slice(rreq, &debug_index))
990 break;
991
992 } while (rreq->submitted < rreq->len);
993
994 /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
995 * the service code isn't punted off to a random thread pool to
996 * process.
997 */
998 do {
999 wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
1000 netfs_rreq_assess(rreq, false);
1001 } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
1002
1003 ret = rreq->error;
David Howells0246f3e2021-04-06 17:31:54 +01001004 if (ret == 0 && rreq->submitted < rreq->len) {
1005 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
David Howells3d3c9502020-05-13 17:41:20 +01001006 ret = -EIO;
David Howells0246f3e2021-04-06 17:31:54 +01001007 }
David Howells726218f2020-02-06 14:22:24 +00001008out:
David Howells3d3c9502020-05-13 17:41:20 +01001009 netfs_put_read_request(rreq, false);
1010 return ret;
1011}
1012EXPORT_SYMBOL(netfs_readpage);
David Howellse1b12402020-09-22 11:06:07 +01001013
Jeff Layton827a7462021-06-13 19:33:45 -04001014/**
1015 * netfs_skip_page_read - prep a page for writing without reading first
1016 * @page: page being prepared
1017 * @pos: starting position for the write
1018 * @len: length of write
1019 *
1020 * In some cases, write_begin doesn't need to read at all:
1021 * - full page write
1022 * - write that lies in a page that is completely beyond EOF
1023 * - write that covers the the page from start to EOF or beyond it
1024 *
1025 * If any of these criteria are met, then zero out the unwritten parts
1026 * of the page and return true. Otherwise, return false.
1027 */
1028static bool netfs_skip_page_read(struct page *page, loff_t pos, size_t len)
David Howellse1b12402020-09-22 11:06:07 +01001029{
Jeff Layton827a7462021-06-13 19:33:45 -04001030 struct inode *inode = page->mapping->host;
1031 loff_t i_size = i_size_read(inode);
1032 size_t offset = offset_in_thp(page, pos);
David Howellse1b12402020-09-22 11:06:07 +01001033
Jeff Layton827a7462021-06-13 19:33:45 -04001034 /* Full page write */
1035 if (offset == 0 && len >= thp_size(page))
1036 return true;
1037
1038 /* pos beyond last page in the file */
1039 if (pos - offset >= i_size)
1040 goto zero_out;
1041
1042 /* Write that covers from the start of the page to EOF or beyond */
1043 if (offset == 0 && (pos + len) >= i_size)
1044 goto zero_out;
1045
1046 return false;
1047zero_out:
1048 zero_user_segments(page, 0, offset, offset + len, thp_size(page));
1049 return true;
David Howellse1b12402020-09-22 11:06:07 +01001050}
1051
1052/**
1053 * netfs_write_begin - Helper to prepare for writing
1054 * @file: The file to read from
1055 * @mapping: The mapping to read from
1056 * @pos: File position at which the write will begin
Jeff Layton827a7462021-06-13 19:33:45 -04001057 * @len: The length of the write (may extend beyond the end of the page chosen)
David Howellse1b12402020-09-22 11:06:07 +01001058 * @flags: AOP_* flags
1059 * @_page: Where to put the resultant page
1060 * @_fsdata: Place for the netfs to store a cookie
1061 * @ops: The network filesystem's operations for the helper to use
1062 * @netfs_priv: Private netfs data to be retained in the request
1063 *
1064 * Pre-read data for a write-begin request by drawing data from the cache if
1065 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
1066 * Multiple I/O requests from different sources will get munged together. If
1067 * necessary, the readahead window can be expanded in either direction to a
1068 * more convenient alighment for RPC efficiency or to make storage in the cache
1069 * feasible.
1070 *
1071 * The calling netfs must provide a table of operations, only one of which,
1072 * issue_op, is mandatory.
1073 *
1074 * The check_write_begin() operation can be provided to check for and flush
1075 * conflicting writes once the page is grabbed and locked. It is passed a
1076 * pointer to the fsdata cookie that gets returned to the VM to be passed to
1077 * write_end. It is permitted to sleep. It should return 0 if the request
1078 * should go ahead; unlock the page and return -EAGAIN to cause the page to be
1079 * regot; or return an error.
1080 *
1081 * This is usable whether or not caching is enabled.
1082 */
1083int netfs_write_begin(struct file *file, struct address_space *mapping,
1084 loff_t pos, unsigned int len, unsigned int flags,
1085 struct page **_page, void **_fsdata,
1086 const struct netfs_read_request_ops *ops,
1087 void *netfs_priv)
1088{
1089 struct netfs_read_request *rreq;
1090 struct page *page, *xpage;
1091 struct inode *inode = file_inode(file);
1092 unsigned int debug_index = 0;
1093 pgoff_t index = pos >> PAGE_SHIFT;
David Howellse1b12402020-09-22 11:06:07 +01001094 int ret;
1095
1096 DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
1097
1098retry:
David Howells19dee6132021-05-13 11:03:32 +01001099 page = grab_cache_page_write_begin(mapping, index, flags);
David Howellse1b12402020-09-22 11:06:07 +01001100 if (!page)
1101 return -ENOMEM;
1102
1103 if (ops->check_write_begin) {
1104 /* Allow the netfs (eg. ceph) to flush conflicts. */
1105 ret = ops->check_write_begin(file, pos, len, page, _fsdata);
1106 if (ret < 0) {
David Howells0246f3e2021-04-06 17:31:54 +01001107 trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
David Howellse1b12402020-09-22 11:06:07 +01001108 if (ret == -EAGAIN)
1109 goto retry;
1110 goto error;
1111 }
1112 }
1113
1114 if (PageUptodate(page))
1115 goto have_page;
1116
1117 /* If the page is beyond the EOF, we want to clear it - unless it's
1118 * within the cache granule containing the EOF, in which case we need
1119 * to preload the granule.
1120 */
David Howellse1b12402020-09-22 11:06:07 +01001121 if (!ops->is_cache_enabled(inode) &&
Jeff Layton827a7462021-06-13 19:33:45 -04001122 netfs_skip_page_read(page, pos, len)) {
David Howellse1b12402020-09-22 11:06:07 +01001123 netfs_stat(&netfs_n_rh_write_zskip);
1124 goto have_page_no_wait;
1125 }
1126
1127 ret = -ENOMEM;
1128 rreq = netfs_alloc_read_request(ops, netfs_priv, file);
1129 if (!rreq)
1130 goto error;
1131 rreq->mapping = page->mapping;
David Howells53b776c2021-04-26 21:16:16 +01001132 rreq->start = page_offset(page);
David Howellse1b12402020-09-22 11:06:07 +01001133 rreq->len = thp_size(page);
1134 rreq->no_unlock_page = page->index;
1135 __set_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags);
1136 netfs_priv = NULL;
1137
David Howells726218f2020-02-06 14:22:24 +00001138 if (ops->begin_cache_operation) {
1139 ret = ops->begin_cache_operation(rreq);
1140 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
1141 goto error_put;
1142 }
1143
David Howellse1b12402020-09-22 11:06:07 +01001144 netfs_stat(&netfs_n_rh_write_begin);
1145 trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
1146
1147 /* Expand the request to meet caching requirements and download
1148 * preferences.
1149 */
1150 ractl._nr_pages = thp_nr_pages(page);
1151 netfs_rreq_expand(rreq, &ractl);
1152 netfs_get_read_request(rreq);
1153
1154 /* We hold the page locks, so we can drop the references */
1155 while ((xpage = readahead_page(&ractl)))
1156 if (xpage != page)
1157 put_page(xpage);
1158
1159 atomic_set(&rreq->nr_rd_ops, 1);
1160 do {
1161 if (!netfs_rreq_submit_slice(rreq, &debug_index))
1162 break;
1163
1164 } while (rreq->submitted < rreq->len);
1165
1166 /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
1167 * the service code isn't punted off to a random thread pool to
1168 * process.
1169 */
1170 for (;;) {
1171 wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
1172 netfs_rreq_assess(rreq, false);
1173 if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
1174 break;
1175 cond_resched();
1176 }
1177
1178 ret = rreq->error;
David Howells0246f3e2021-04-06 17:31:54 +01001179 if (ret == 0 && rreq->submitted < rreq->len) {
1180 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
David Howellse1b12402020-09-22 11:06:07 +01001181 ret = -EIO;
David Howells0246f3e2021-04-06 17:31:54 +01001182 }
David Howellse1b12402020-09-22 11:06:07 +01001183 netfs_put_read_request(rreq, false);
1184 if (ret < 0)
1185 goto error;
1186
1187have_page:
1188 ret = wait_on_page_fscache_killable(page);
1189 if (ret < 0)
1190 goto error;
1191have_page_no_wait:
1192 if (netfs_priv)
1193 ops->cleanup(netfs_priv, mapping);
1194 *_page = page;
1195 _leave(" = 0");
1196 return 0;
1197
1198error_put:
1199 netfs_put_read_request(rreq, false);
1200error:
1201 unlock_page(page);
1202 put_page(page);
1203 if (netfs_priv)
1204 ops->cleanup(netfs_priv, mapping);
1205 _leave(" = %d", ret);
1206 return ret;
1207}
1208EXPORT_SYMBOL(netfs_write_begin);