blob: 75c76cbb27ccfd130943f8a9810f5854e4cb09f6 [file] [log] [blame]
David Howells3d3c9502020-05-13 17:41:20 +01001// SPDX-License-Identifier: GPL-2.0-or-later
2/* Network filesystem high-level read support.
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/module.h>
9#include <linux/export.h>
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/slab.h>
14#include <linux/uio.h>
15#include <linux/sched/mm.h>
16#include <linux/task_io_accounting_ops.h>
17#include <linux/netfs.h>
18#include "internal.h"
David Howells77b4d2c2020-09-18 09:25:13 +010019#define CREATE_TRACE_POINTS
20#include <trace/events/netfs.h>
David Howells3d3c9502020-05-13 17:41:20 +010021
22MODULE_DESCRIPTION("Network fs support");
23MODULE_AUTHOR("Red Hat, Inc.");
24MODULE_LICENSE("GPL");
25
26unsigned netfs_debug;
27module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
28MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
29
30static void netfs_rreq_work(struct work_struct *);
31static void __netfs_put_subrequest(struct netfs_read_subrequest *, bool);
32
33static void netfs_put_subrequest(struct netfs_read_subrequest *subreq,
34 bool was_async)
35{
36 if (refcount_dec_and_test(&subreq->usage))
37 __netfs_put_subrequest(subreq, was_async);
38}
39
40static struct netfs_read_request *netfs_alloc_read_request(
41 const struct netfs_read_request_ops *ops, void *netfs_priv,
42 struct file *file)
43{
44 static atomic_t debug_ids;
45 struct netfs_read_request *rreq;
46
47 rreq = kzalloc(sizeof(struct netfs_read_request), GFP_KERNEL);
48 if (rreq) {
49 rreq->netfs_ops = ops;
50 rreq->netfs_priv = netfs_priv;
51 rreq->inode = file_inode(file);
52 rreq->i_size = i_size_read(rreq->inode);
53 rreq->debug_id = atomic_inc_return(&debug_ids);
54 INIT_LIST_HEAD(&rreq->subrequests);
55 INIT_WORK(&rreq->work, netfs_rreq_work);
56 refcount_set(&rreq->usage, 1);
57 __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
58 ops->init_rreq(rreq, file);
David Howells289af542020-11-03 11:32:41 +000059 netfs_stat(&netfs_n_rh_rreq);
David Howells3d3c9502020-05-13 17:41:20 +010060 }
61
62 return rreq;
63}
64
65static void netfs_get_read_request(struct netfs_read_request *rreq)
66{
67 refcount_inc(&rreq->usage);
68}
69
70static void netfs_rreq_clear_subreqs(struct netfs_read_request *rreq,
71 bool was_async)
72{
73 struct netfs_read_subrequest *subreq;
74
75 while (!list_empty(&rreq->subrequests)) {
76 subreq = list_first_entry(&rreq->subrequests,
77 struct netfs_read_subrequest, rreq_link);
78 list_del(&subreq->rreq_link);
79 netfs_put_subrequest(subreq, was_async);
80 }
81}
82
83static void netfs_free_read_request(struct work_struct *work)
84{
85 struct netfs_read_request *rreq =
86 container_of(work, struct netfs_read_request, work);
87 netfs_rreq_clear_subreqs(rreq, false);
88 if (rreq->netfs_priv)
89 rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
David Howells77b4d2c2020-09-18 09:25:13 +010090 trace_netfs_rreq(rreq, netfs_rreq_trace_free);
David Howells726218f2020-02-06 14:22:24 +000091 if (rreq->cache_resources.ops)
92 rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
David Howells3d3c9502020-05-13 17:41:20 +010093 kfree(rreq);
David Howells289af542020-11-03 11:32:41 +000094 netfs_stat_d(&netfs_n_rh_rreq);
David Howells3d3c9502020-05-13 17:41:20 +010095}
96
97static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_async)
98{
99 if (refcount_dec_and_test(&rreq->usage)) {
100 if (was_async) {
101 rreq->work.func = netfs_free_read_request;
102 if (!queue_work(system_unbound_wq, &rreq->work))
103 BUG();
104 } else {
105 netfs_free_read_request(&rreq->work);
106 }
107 }
108}
109
110/*
111 * Allocate and partially initialise an I/O request structure.
112 */
113static struct netfs_read_subrequest *netfs_alloc_subrequest(
114 struct netfs_read_request *rreq)
115{
116 struct netfs_read_subrequest *subreq;
117
118 subreq = kzalloc(sizeof(struct netfs_read_subrequest), GFP_KERNEL);
119 if (subreq) {
120 INIT_LIST_HEAD(&subreq->rreq_link);
121 refcount_set(&subreq->usage, 2);
122 subreq->rreq = rreq;
123 netfs_get_read_request(rreq);
David Howells289af542020-11-03 11:32:41 +0000124 netfs_stat(&netfs_n_rh_sreq);
David Howells3d3c9502020-05-13 17:41:20 +0100125 }
126
127 return subreq;
128}
129
130static void netfs_get_read_subrequest(struct netfs_read_subrequest *subreq)
131{
132 refcount_inc(&subreq->usage);
133}
134
135static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq,
136 bool was_async)
137{
138 struct netfs_read_request *rreq = subreq->rreq;
139
David Howells77b4d2c2020-09-18 09:25:13 +0100140 trace_netfs_sreq(subreq, netfs_sreq_trace_free);
David Howells3d3c9502020-05-13 17:41:20 +0100141 kfree(subreq);
David Howells289af542020-11-03 11:32:41 +0000142 netfs_stat_d(&netfs_n_rh_sreq);
David Howells3d3c9502020-05-13 17:41:20 +0100143 netfs_put_read_request(rreq, was_async);
144}
145
146/*
147 * Clear the unread part of an I/O request.
148 */
149static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
150{
151 struct iov_iter iter;
152
David Howells330de472021-07-26 10:37:57 +0100153 iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages,
David Howells3d3c9502020-05-13 17:41:20 +0100154 subreq->start + subreq->transferred,
155 subreq->len - subreq->transferred);
156 iov_iter_zero(iov_iter_count(&iter), &iter);
157}
158
David Howells726218f2020-02-06 14:22:24 +0000159static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
160 bool was_async)
161{
162 struct netfs_read_subrequest *subreq = priv;
163
164 netfs_subreq_terminated(subreq, transferred_or_error, was_async);
165}
166
167/*
168 * Issue a read against the cache.
169 * - Eats the caller's ref on subreq.
170 */
171static void netfs_read_from_cache(struct netfs_read_request *rreq,
172 struct netfs_read_subrequest *subreq,
173 bool seek_data)
174{
175 struct netfs_cache_resources *cres = &rreq->cache_resources;
176 struct iov_iter iter;
177
178 netfs_stat(&netfs_n_rh_read);
179 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
180 subreq->start + subreq->transferred,
181 subreq->len - subreq->transferred);
182
183 cres->ops->read(cres, subreq->start, &iter, seek_data,
184 netfs_cache_read_terminated, subreq);
185}
186
David Howells3d3c9502020-05-13 17:41:20 +0100187/*
188 * Fill a subrequest region with zeroes.
189 */
190static void netfs_fill_with_zeroes(struct netfs_read_request *rreq,
191 struct netfs_read_subrequest *subreq)
192{
David Howells289af542020-11-03 11:32:41 +0000193 netfs_stat(&netfs_n_rh_zero);
David Howells3d3c9502020-05-13 17:41:20 +0100194 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
195 netfs_subreq_terminated(subreq, 0, false);
196}
197
198/*
199 * Ask the netfs to issue a read request to the server for us.
200 *
201 * The netfs is expected to read from subreq->pos + subreq->transferred to
202 * subreq->pos + subreq->len - 1. It may not backtrack and write data into the
203 * buffer prior to the transferred point as it might clobber dirty data
204 * obtained from the cache.
205 *
206 * Alternatively, the netfs is allowed to indicate one of two things:
207 *
208 * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
209 * make progress.
210 *
211 * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
212 * cleared.
213 */
214static void netfs_read_from_server(struct netfs_read_request *rreq,
215 struct netfs_read_subrequest *subreq)
216{
David Howells289af542020-11-03 11:32:41 +0000217 netfs_stat(&netfs_n_rh_download);
David Howells3d3c9502020-05-13 17:41:20 +0100218 rreq->netfs_ops->issue_op(subreq);
219}
220
221/*
222 * Release those waiting.
223 */
224static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async)
225{
David Howells77b4d2c2020-09-18 09:25:13 +0100226 trace_netfs_rreq(rreq, netfs_rreq_trace_done);
David Howells3d3c9502020-05-13 17:41:20 +0100227 netfs_rreq_clear_subreqs(rreq, was_async);
228 netfs_put_read_request(rreq, was_async);
229}
230
231/*
David Howells726218f2020-02-06 14:22:24 +0000232 * Deal with the completion of writing the data to the cache. We have to clear
David Howells78525c72021-08-11 09:49:13 +0100233 * the PG_fscache bits on the folios involved and release the caller's ref.
David Howells726218f2020-02-06 14:22:24 +0000234 *
235 * May be called in softirq mode and we inherit a ref from the caller.
236 */
237static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
238 bool was_async)
239{
240 struct netfs_read_subrequest *subreq;
David Howells78525c72021-08-11 09:49:13 +0100241 struct folio *folio;
David Howells726218f2020-02-06 14:22:24 +0000242 pgoff_t unlocked = 0;
243 bool have_unlocked = false;
244
245 rcu_read_lock();
246
247 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
248 XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
249
David Howells78525c72021-08-11 09:49:13 +0100250 xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
David Howells726218f2020-02-06 14:22:24 +0000251 /* We might have multiple writes from the same huge
David Howells78525c72021-08-11 09:49:13 +0100252 * folio, but we mustn't unlock a folio more than once.
David Howells726218f2020-02-06 14:22:24 +0000253 */
David Howells78525c72021-08-11 09:49:13 +0100254 if (have_unlocked && folio_index(folio) <= unlocked)
David Howells726218f2020-02-06 14:22:24 +0000255 continue;
David Howells78525c72021-08-11 09:49:13 +0100256 unlocked = folio_index(folio);
257 folio_end_fscache(folio);
David Howells726218f2020-02-06 14:22:24 +0000258 have_unlocked = true;
259 }
260 }
261
262 rcu_read_unlock();
263 netfs_rreq_completed(rreq, was_async);
264}
265
266static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
267 bool was_async)
268{
269 struct netfs_read_subrequest *subreq = priv;
270 struct netfs_read_request *rreq = subreq->rreq;
271
272 if (IS_ERR_VALUE(transferred_or_error)) {
273 netfs_stat(&netfs_n_rh_write_failed);
David Howells0246f3e2021-04-06 17:31:54 +0100274 trace_netfs_failure(rreq, subreq, transferred_or_error,
275 netfs_fail_copy_to_cache);
David Howells726218f2020-02-06 14:22:24 +0000276 } else {
277 netfs_stat(&netfs_n_rh_write_done);
278 }
279
280 trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
281
282 /* If we decrement nr_wr_ops to 0, the ref belongs to us. */
283 if (atomic_dec_and_test(&rreq->nr_wr_ops))
284 netfs_rreq_unmark_after_write(rreq, was_async);
285
286 netfs_put_subrequest(subreq, was_async);
287}
288
289/*
290 * Perform any outstanding writes to the cache. We inherit a ref from the
291 * caller.
292 */
293static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
294{
295 struct netfs_cache_resources *cres = &rreq->cache_resources;
296 struct netfs_read_subrequest *subreq, *next, *p;
297 struct iov_iter iter;
298 int ret;
299
300 trace_netfs_rreq(rreq, netfs_rreq_trace_write);
301
302 /* We don't want terminating writes trying to wake us up whilst we're
303 * still going through the list.
304 */
305 atomic_inc(&rreq->nr_wr_ops);
306
307 list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
308 if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) {
309 list_del_init(&subreq->rreq_link);
310 netfs_put_subrequest(subreq, false);
311 }
312 }
313
314 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
315 /* Amalgamate adjacent writes */
316 while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
317 next = list_next_entry(subreq, rreq_link);
318 if (next->start != subreq->start + subreq->len)
319 break;
320 subreq->len += next->len;
321 list_del_init(&next->rreq_link);
322 netfs_put_subrequest(next, false);
323 }
324
325 ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
326 rreq->i_size);
327 if (ret < 0) {
David Howells0246f3e2021-04-06 17:31:54 +0100328 trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
David Howells726218f2020-02-06 14:22:24 +0000329 trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
330 continue;
331 }
332
333 iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
334 subreq->start, subreq->len);
335
336 atomic_inc(&rreq->nr_wr_ops);
337 netfs_stat(&netfs_n_rh_write);
338 netfs_get_read_subrequest(subreq);
339 trace_netfs_sreq(subreq, netfs_sreq_trace_write);
340 cres->ops->write(cres, subreq->start, &iter,
341 netfs_rreq_copy_terminated, subreq);
342 }
343
344 /* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */
345 if (atomic_dec_and_test(&rreq->nr_wr_ops))
346 netfs_rreq_unmark_after_write(rreq, false);
347}
348
349static void netfs_rreq_write_to_cache_work(struct work_struct *work)
350{
351 struct netfs_read_request *rreq =
352 container_of(work, struct netfs_read_request, work);
353
354 netfs_rreq_do_write_to_cache(rreq);
355}
356
David Howells598ad0b2021-12-07 09:53:24 +0000357static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq)
David Howells726218f2020-02-06 14:22:24 +0000358{
David Howells598ad0b2021-12-07 09:53:24 +0000359 rreq->work.func = netfs_rreq_write_to_cache_work;
360 if (!queue_work(system_unbound_wq, &rreq->work))
361 BUG();
David Howells726218f2020-02-06 14:22:24 +0000362}
363
364/*
David Howells78525c72021-08-11 09:49:13 +0100365 * Unlock the folios in a read operation. We need to set PG_fscache on any
366 * folios we're going to write back before we unlock them.
David Howells3d3c9502020-05-13 17:41:20 +0100367 */
368static void netfs_rreq_unlock(struct netfs_read_request *rreq)
369{
370 struct netfs_read_subrequest *subreq;
David Howells78525c72021-08-11 09:49:13 +0100371 struct folio *folio;
David Howells3d3c9502020-05-13 17:41:20 +0100372 unsigned int iopos, account = 0;
373 pgoff_t start_page = rreq->start / PAGE_SIZE;
374 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
375 bool subreq_failed = false;
David Howells3d3c9502020-05-13 17:41:20 +0100376
377 XA_STATE(xas, &rreq->mapping->i_pages, start_page);
378
379 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
380 __clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
381 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
382 __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
383 }
384 }
385
386 /* Walk through the pagecache and the I/O request lists simultaneously.
387 * We may have a mixture of cached and uncached sections and we only
388 * really want to write out the uncached sections. This is slightly
389 * complicated by the possibility that we might have huge pages with a
390 * mixture inside.
391 */
392 subreq = list_first_entry(&rreq->subrequests,
393 struct netfs_read_subrequest, rreq_link);
394 iopos = 0;
395 subreq_failed = (subreq->error < 0);
396
David Howells77b4d2c2020-09-18 09:25:13 +0100397 trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
398
David Howells3d3c9502020-05-13 17:41:20 +0100399 rcu_read_lock();
David Howells78525c72021-08-11 09:49:13 +0100400 xas_for_each(&xas, folio, last_page) {
401 unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
402 unsigned int pgend = pgpos + folio_size(folio);
David Howells3d3c9502020-05-13 17:41:20 +0100403 bool pg_failed = false;
404
405 for (;;) {
406 if (!subreq) {
407 pg_failed = true;
408 break;
409 }
410 if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
David Howells78525c72021-08-11 09:49:13 +0100411 folio_start_fscache(folio);
David Howells3d3c9502020-05-13 17:41:20 +0100412 pg_failed |= subreq_failed;
413 if (pgend < iopos + subreq->len)
414 break;
415
416 account += subreq->transferred;
417 iopos += subreq->len;
418 if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
419 subreq = list_next_entry(subreq, rreq_link);
420 subreq_failed = (subreq->error < 0);
421 } else {
422 subreq = NULL;
423 subreq_failed = false;
424 }
425 if (pgend == iopos)
426 break;
427 }
428
429 if (!pg_failed) {
David Howells78525c72021-08-11 09:49:13 +0100430 flush_dcache_folio(folio);
431 folio_mark_uptodate(folio);
David Howells3d3c9502020-05-13 17:41:20 +0100432 }
433
David Howells78525c72021-08-11 09:49:13 +0100434 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
435 if (folio_index(folio) == rreq->no_unlock_folio &&
436 test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
David Howells3d3c9502020-05-13 17:41:20 +0100437 _debug("no unlock");
438 else
David Howells78525c72021-08-11 09:49:13 +0100439 folio_unlock(folio);
David Howells3d3c9502020-05-13 17:41:20 +0100440 }
441 }
442 rcu_read_unlock();
443
444 task_io_account_read(account);
445 if (rreq->netfs_ops->done)
446 rreq->netfs_ops->done(rreq);
447}
448
449/*
450 * Handle a short read.
451 */
452static void netfs_rreq_short_read(struct netfs_read_request *rreq,
453 struct netfs_read_subrequest *subreq)
454{
455 __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
456 __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
457
David Howells289af542020-11-03 11:32:41 +0000458 netfs_stat(&netfs_n_rh_short_read);
David Howells77b4d2c2020-09-18 09:25:13 +0100459 trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
460
David Howells3d3c9502020-05-13 17:41:20 +0100461 netfs_get_read_subrequest(subreq);
462 atomic_inc(&rreq->nr_rd_ops);
David Howells726218f2020-02-06 14:22:24 +0000463 if (subreq->source == NETFS_READ_FROM_CACHE)
464 netfs_read_from_cache(rreq, subreq, true);
465 else
466 netfs_read_from_server(rreq, subreq);
David Howells3d3c9502020-05-13 17:41:20 +0100467}
468
469/*
470 * Resubmit any short or failed operations. Returns true if we got the rreq
471 * ref back.
472 */
473static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq)
474{
475 struct netfs_read_subrequest *subreq;
476
477 WARN_ON(in_interrupt());
478
David Howells77b4d2c2020-09-18 09:25:13 +0100479 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
480
David Howells3d3c9502020-05-13 17:41:20 +0100481 /* We don't want terminating submissions trying to wake us up whilst
482 * we're still going through the list.
483 */
484 atomic_inc(&rreq->nr_rd_ops);
485
486 __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
487 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
488 if (subreq->error) {
489 if (subreq->source != NETFS_READ_FROM_CACHE)
490 break;
491 subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
492 subreq->error = 0;
David Howells289af542020-11-03 11:32:41 +0000493 netfs_stat(&netfs_n_rh_download_instead);
David Howells77b4d2c2020-09-18 09:25:13 +0100494 trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
David Howells3d3c9502020-05-13 17:41:20 +0100495 netfs_get_read_subrequest(subreq);
496 atomic_inc(&rreq->nr_rd_ops);
497 netfs_read_from_server(rreq, subreq);
498 } else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) {
499 netfs_rreq_short_read(rreq, subreq);
500 }
501 }
502
503 /* If we decrement nr_rd_ops to 0, the usage ref belongs to us. */
504 if (atomic_dec_and_test(&rreq->nr_rd_ops))
505 return true;
506
507 wake_up_var(&rreq->nr_rd_ops);
508 return false;
509}
510
511/*
David Howells726218f2020-02-06 14:22:24 +0000512 * Check to see if the data read is still valid.
513 */
514static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq)
515{
516 struct netfs_read_subrequest *subreq;
517
518 if (!rreq->netfs_ops->is_still_valid ||
519 rreq->netfs_ops->is_still_valid(rreq))
520 return;
521
522 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
523 if (subreq->source == NETFS_READ_FROM_CACHE) {
524 subreq->error = -ESTALE;
525 __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
526 }
527 }
528}
529
530/*
David Howells3d3c9502020-05-13 17:41:20 +0100531 * Assess the state of a read request and decide what to do next.
532 *
533 * Note that we could be in an ordinary kernel thread, on a workqueue or in
534 * softirq context at this point. We inherit a ref from the caller.
535 */
536static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
537{
David Howells77b4d2c2020-09-18 09:25:13 +0100538 trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
539
David Howells3d3c9502020-05-13 17:41:20 +0100540again:
David Howells726218f2020-02-06 14:22:24 +0000541 netfs_rreq_is_still_valid(rreq);
542
David Howells3d3c9502020-05-13 17:41:20 +0100543 if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
544 test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
545 if (netfs_rreq_perform_resubmissions(rreq))
546 goto again;
547 return;
548 }
549
550 netfs_rreq_unlock(rreq);
551
552 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
553 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
554
David Howells726218f2020-02-06 14:22:24 +0000555 if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
David Howells598ad0b2021-12-07 09:53:24 +0000556 return netfs_rreq_write_to_cache(rreq);
David Howells726218f2020-02-06 14:22:24 +0000557
David Howells3d3c9502020-05-13 17:41:20 +0100558 netfs_rreq_completed(rreq, was_async);
559}
560
561static void netfs_rreq_work(struct work_struct *work)
562{
563 struct netfs_read_request *rreq =
564 container_of(work, struct netfs_read_request, work);
565 netfs_rreq_assess(rreq, false);
566}
567
568/*
569 * Handle the completion of all outstanding I/O operations on a read request.
570 * We inherit a ref from the caller.
571 */
572static void netfs_rreq_terminated(struct netfs_read_request *rreq,
573 bool was_async)
574{
575 if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
576 was_async) {
577 if (!queue_work(system_unbound_wq, &rreq->work))
578 BUG();
579 } else {
580 netfs_rreq_assess(rreq, was_async);
581 }
582}
583
584/**
585 * netfs_subreq_terminated - Note the termination of an I/O operation.
586 * @subreq: The I/O request that has terminated.
587 * @transferred_or_error: The amount of data transferred or an error code.
588 * @was_async: The termination was asynchronous
589 *
590 * This tells the read helper that a contributory I/O operation has terminated,
591 * one way or another, and that it should integrate the results.
592 *
593 * The caller indicates in @transferred_or_error the outcome of the operation,
594 * supplying a positive value to indicate the number of bytes transferred, 0 to
595 * indicate a failure to transfer anything that should be retried or a negative
596 * error code. The helper will look after reissuing I/O operations as
597 * appropriate and writing downloaded data to the cache.
598 *
599 * If @was_async is true, the caller might be running in softirq or interrupt
600 * context and we can't sleep.
601 */
602void netfs_subreq_terminated(struct netfs_read_subrequest *subreq,
603 ssize_t transferred_or_error,
604 bool was_async)
605{
606 struct netfs_read_request *rreq = subreq->rreq;
607 int u;
608
609 _enter("[%u]{%llx,%lx},%zd",
610 subreq->debug_index, subreq->start, subreq->flags,
611 transferred_or_error);
612
David Howells289af542020-11-03 11:32:41 +0000613 switch (subreq->source) {
614 case NETFS_READ_FROM_CACHE:
615 netfs_stat(&netfs_n_rh_read_done);
616 break;
617 case NETFS_DOWNLOAD_FROM_SERVER:
618 netfs_stat(&netfs_n_rh_download_done);
619 break;
620 default:
621 break;
622 }
623
David Howells3d3c9502020-05-13 17:41:20 +0100624 if (IS_ERR_VALUE(transferred_or_error)) {
625 subreq->error = transferred_or_error;
David Howells0246f3e2021-04-06 17:31:54 +0100626 trace_netfs_failure(rreq, subreq, transferred_or_error,
627 netfs_fail_read);
David Howells3d3c9502020-05-13 17:41:20 +0100628 goto failed;
629 }
630
631 if (WARN(transferred_or_error > subreq->len - subreq->transferred,
632 "Subreq overread: R%x[%x] %zd > %zu - %zu",
633 rreq->debug_id, subreq->debug_index,
634 transferred_or_error, subreq->len, subreq->transferred))
635 transferred_or_error = subreq->len - subreq->transferred;
636
637 subreq->error = 0;
638 subreq->transferred += transferred_or_error;
639 if (subreq->transferred < subreq->len)
640 goto incomplete;
641
642complete:
643 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
644 if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
645 set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
646
647out:
David Howells77b4d2c2020-09-18 09:25:13 +0100648 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
649
David Howells3d3c9502020-05-13 17:41:20 +0100650 /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
651 u = atomic_dec_return(&rreq->nr_rd_ops);
652 if (u == 0)
653 netfs_rreq_terminated(rreq, was_async);
654 else if (u == 1)
655 wake_up_var(&rreq->nr_rd_ops);
656
657 netfs_put_subrequest(subreq, was_async);
658 return;
659
660incomplete:
661 if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
662 netfs_clear_unread(subreq);
663 subreq->transferred = subreq->len;
664 goto complete;
665 }
666
667 if (transferred_or_error == 0) {
668 if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
669 subreq->error = -ENODATA;
670 goto failed;
671 }
672 } else {
673 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
674 }
675
676 __set_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
677 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
678 goto out;
679
680failed:
681 if (subreq->source == NETFS_READ_FROM_CACHE) {
David Howells289af542020-11-03 11:32:41 +0000682 netfs_stat(&netfs_n_rh_read_failed);
David Howells3d3c9502020-05-13 17:41:20 +0100683 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
684 } else {
David Howells289af542020-11-03 11:32:41 +0000685 netfs_stat(&netfs_n_rh_download_failed);
David Howells3d3c9502020-05-13 17:41:20 +0100686 set_bit(NETFS_RREQ_FAILED, &rreq->flags);
687 rreq->error = subreq->error;
688 }
689 goto out;
690}
691EXPORT_SYMBOL(netfs_subreq_terminated);
692
693static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequest *subreq,
694 loff_t i_size)
695{
696 struct netfs_read_request *rreq = subreq->rreq;
David Howells726218f2020-02-06 14:22:24 +0000697 struct netfs_cache_resources *cres = &rreq->cache_resources;
David Howells3d3c9502020-05-13 17:41:20 +0100698
David Howells726218f2020-02-06 14:22:24 +0000699 if (cres->ops)
700 return cres->ops->prepare_read(subreq, i_size);
David Howells3d3c9502020-05-13 17:41:20 +0100701 if (subreq->start >= rreq->i_size)
702 return NETFS_FILL_WITH_ZEROES;
703 return NETFS_DOWNLOAD_FROM_SERVER;
704}
705
706/*
707 * Work out what sort of subrequest the next one will be.
708 */
709static enum netfs_read_source
710netfs_rreq_prepare_read(struct netfs_read_request *rreq,
711 struct netfs_read_subrequest *subreq)
712{
713 enum netfs_read_source source;
714
715 _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
716
717 source = netfs_cache_prepare_read(subreq, rreq->i_size);
718 if (source == NETFS_INVALID_READ)
719 goto out;
720
721 if (source == NETFS_DOWNLOAD_FROM_SERVER) {
722 /* Call out to the netfs to let it shrink the request to fit
723 * its own I/O sizes and boundaries. If it shinks it here, it
724 * will be called again to make simultaneous calls; if it wants
725 * to make serial calls, it can indicate a short read and then
726 * we will call it again.
727 */
728 if (subreq->len > rreq->i_size - subreq->start)
729 subreq->len = rreq->i_size - subreq->start;
730
731 if (rreq->netfs_ops->clamp_length &&
732 !rreq->netfs_ops->clamp_length(subreq)) {
733 source = NETFS_INVALID_READ;
734 goto out;
735 }
736 }
737
738 if (WARN_ON(subreq->len == 0))
739 source = NETFS_INVALID_READ;
740
741out:
742 subreq->source = source;
David Howells77b4d2c2020-09-18 09:25:13 +0100743 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
David Howells3d3c9502020-05-13 17:41:20 +0100744 return source;
745}
746
747/*
748 * Slice off a piece of a read request and submit an I/O request for it.
749 */
750static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
751 unsigned int *_debug_index)
752{
753 struct netfs_read_subrequest *subreq;
754 enum netfs_read_source source;
755
756 subreq = netfs_alloc_subrequest(rreq);
757 if (!subreq)
758 return false;
759
760 subreq->debug_index = (*_debug_index)++;
761 subreq->start = rreq->start + rreq->submitted;
762 subreq->len = rreq->len - rreq->submitted;
763
764 _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
765 list_add_tail(&subreq->rreq_link, &rreq->subrequests);
766
767 /* Call out to the cache to find out what it can do with the remaining
768 * subset. It tells us in subreq->flags what it decided should be done
769 * and adjusts subreq->len down if the subset crosses a cache boundary.
770 *
771 * Then when we hand the subset, it can choose to take a subset of that
772 * (the starts must coincide), in which case, we go around the loop
773 * again and ask it to download the next piece.
774 */
775 source = netfs_rreq_prepare_read(rreq, subreq);
776 if (source == NETFS_INVALID_READ)
777 goto subreq_failed;
778
779 atomic_inc(&rreq->nr_rd_ops);
780
781 rreq->submitted += subreq->len;
782
David Howells77b4d2c2020-09-18 09:25:13 +0100783 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
David Howells3d3c9502020-05-13 17:41:20 +0100784 switch (source) {
785 case NETFS_FILL_WITH_ZEROES:
786 netfs_fill_with_zeroes(rreq, subreq);
787 break;
788 case NETFS_DOWNLOAD_FROM_SERVER:
789 netfs_read_from_server(rreq, subreq);
790 break;
David Howells726218f2020-02-06 14:22:24 +0000791 case NETFS_READ_FROM_CACHE:
792 netfs_read_from_cache(rreq, subreq, false);
793 break;
David Howells3d3c9502020-05-13 17:41:20 +0100794 default:
795 BUG();
796 }
797
798 return true;
799
800subreq_failed:
801 rreq->error = subreq->error;
802 netfs_put_subrequest(subreq, false);
803 return false;
804}
805
David Howells726218f2020-02-06 14:22:24 +0000806static void netfs_cache_expand_readahead(struct netfs_read_request *rreq,
807 loff_t *_start, size_t *_len, loff_t i_size)
808{
809 struct netfs_cache_resources *cres = &rreq->cache_resources;
810
811 if (cres->ops && cres->ops->expand_readahead)
812 cres->ops->expand_readahead(cres, _start, _len, i_size);
813}
814
David Howells3d3c9502020-05-13 17:41:20 +0100815static void netfs_rreq_expand(struct netfs_read_request *rreq,
816 struct readahead_control *ractl)
817{
David Howells726218f2020-02-06 14:22:24 +0000818 /* Give the cache a chance to change the request parameters. The
819 * resultant request must contain the original region.
820 */
821 netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
822
David Howells3d3c9502020-05-13 17:41:20 +0100823 /* Give the netfs a chance to change the request parameters. The
824 * resultant request must contain the original region.
825 */
826 if (rreq->netfs_ops->expand_readahead)
827 rreq->netfs_ops->expand_readahead(rreq);
828
829 /* Expand the request if the cache wants it to start earlier. Note
830 * that the expansion may get further extended if the VM wishes to
831 * insert THPs and the preferred start and/or end wind up in the middle
832 * of THPs.
833 *
834 * If this is the case, however, the THP size should be an integer
835 * multiple of the cache granule size, so we get a whole number of
836 * granules to deal with.
837 */
838 if (rreq->start != readahead_pos(ractl) ||
839 rreq->len != readahead_length(ractl)) {
840 readahead_expand(ractl, rreq->start, rreq->len);
841 rreq->start = readahead_pos(ractl);
842 rreq->len = readahead_length(ractl);
David Howells77b4d2c2020-09-18 09:25:13 +0100843
844 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
845 netfs_read_trace_expanded);
David Howells3d3c9502020-05-13 17:41:20 +0100846 }
847}
848
849/**
850 * netfs_readahead - Helper to manage a read request
851 * @ractl: The description of the readahead request
852 * @ops: The network filesystem's operations for the helper to use
853 * @netfs_priv: Private netfs data to be retained in the request
854 *
855 * Fulfil a readahead request by drawing data from the cache if possible, or
856 * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
857 * requests from different sources will get munged together. If necessary, the
858 * readahead window can be expanded in either direction to a more convenient
859 * alighment for RPC efficiency or to make storage in the cache feasible.
860 *
861 * The calling netfs must provide a table of operations, only one of which,
862 * issue_op, is mandatory. It may also be passed a private token, which will
863 * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
864 *
865 * This is usable whether or not caching is enabled.
866 */
867void netfs_readahead(struct readahead_control *ractl,
868 const struct netfs_read_request_ops *ops,
869 void *netfs_priv)
870{
871 struct netfs_read_request *rreq;
David Howells3d3c9502020-05-13 17:41:20 +0100872 unsigned int debug_index = 0;
David Howells726218f2020-02-06 14:22:24 +0000873 int ret;
David Howells3d3c9502020-05-13 17:41:20 +0100874
875 _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
876
877 if (readahead_count(ractl) == 0)
878 goto cleanup;
879
880 rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file);
881 if (!rreq)
882 goto cleanup;
883 rreq->mapping = ractl->mapping;
884 rreq->start = readahead_pos(ractl);
885 rreq->len = readahead_length(ractl);
886
David Howells726218f2020-02-06 14:22:24 +0000887 if (ops->begin_cache_operation) {
888 ret = ops->begin_cache_operation(rreq);
889 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
890 goto cleanup_free;
891 }
892
David Howells289af542020-11-03 11:32:41 +0000893 netfs_stat(&netfs_n_rh_readahead);
David Howells77b4d2c2020-09-18 09:25:13 +0100894 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
895 netfs_read_trace_readahead);
896
David Howells3d3c9502020-05-13 17:41:20 +0100897 netfs_rreq_expand(rreq, ractl);
898
899 atomic_set(&rreq->nr_rd_ops, 1);
900 do {
901 if (!netfs_rreq_submit_slice(rreq, &debug_index))
902 break;
903
904 } while (rreq->submitted < rreq->len);
905
David Howells78525c72021-08-11 09:49:13 +0100906 /* Drop the refs on the folios here rather than in the cache or
David Howells3d3c9502020-05-13 17:41:20 +0100907 * filesystem. The locks will be dropped in netfs_rreq_unlock().
908 */
David Howells78525c72021-08-11 09:49:13 +0100909 while (readahead_folio(ractl))
910 ;
David Howells3d3c9502020-05-13 17:41:20 +0100911
912 /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
913 if (atomic_dec_and_test(&rreq->nr_rd_ops))
914 netfs_rreq_assess(rreq, false);
915 return;
916
David Howells726218f2020-02-06 14:22:24 +0000917cleanup_free:
918 netfs_put_read_request(rreq, false);
919 return;
David Howells3d3c9502020-05-13 17:41:20 +0100920cleanup:
921 if (netfs_priv)
922 ops->cleanup(ractl->mapping, netfs_priv);
923 return;
924}
925EXPORT_SYMBOL(netfs_readahead);
926
927/**
David Howells53b776c2021-04-26 21:16:16 +0100928 * netfs_readpage - Helper to manage a readpage request
David Howells3d3c9502020-05-13 17:41:20 +0100929 * @file: The file to read from
David Howells78525c72021-08-11 09:49:13 +0100930 * @folio: The folio to read
David Howells3d3c9502020-05-13 17:41:20 +0100931 * @ops: The network filesystem's operations for the helper to use
932 * @netfs_priv: Private netfs data to be retained in the request
933 *
934 * Fulfil a readpage request by drawing data from the cache if possible, or the
935 * netfs if not. Space beyond the EOF is zero-filled. Multiple I/O requests
936 * from different sources will get munged together.
937 *
938 * The calling netfs must provide a table of operations, only one of which,
939 * issue_op, is mandatory. It may also be passed a private token, which will
940 * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
941 *
942 * This is usable whether or not caching is enabled.
943 */
944int netfs_readpage(struct file *file,
David Howells78525c72021-08-11 09:49:13 +0100945 struct folio *folio,
David Howells3d3c9502020-05-13 17:41:20 +0100946 const struct netfs_read_request_ops *ops,
947 void *netfs_priv)
948{
949 struct netfs_read_request *rreq;
950 unsigned int debug_index = 0;
951 int ret;
952
David Howells78525c72021-08-11 09:49:13 +0100953 _enter("%lx", folio_index(folio));
David Howells3d3c9502020-05-13 17:41:20 +0100954
955 rreq = netfs_alloc_read_request(ops, netfs_priv, file);
956 if (!rreq) {
957 if (netfs_priv)
Jeffle Xu3cfef1b62021-12-07 11:14:49 +0800958 ops->cleanup(folio_file_mapping(folio), netfs_priv);
David Howells78525c72021-08-11 09:49:13 +0100959 folio_unlock(folio);
David Howells3d3c9502020-05-13 17:41:20 +0100960 return -ENOMEM;
961 }
David Howells78525c72021-08-11 09:49:13 +0100962 rreq->mapping = folio_file_mapping(folio);
963 rreq->start = folio_file_pos(folio);
964 rreq->len = folio_size(folio);
David Howells3d3c9502020-05-13 17:41:20 +0100965
David Howells726218f2020-02-06 14:22:24 +0000966 if (ops->begin_cache_operation) {
967 ret = ops->begin_cache_operation(rreq);
968 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
David Howells78525c72021-08-11 09:49:13 +0100969 folio_unlock(folio);
David Howells726218f2020-02-06 14:22:24 +0000970 goto out;
971 }
972 }
973
David Howells289af542020-11-03 11:32:41 +0000974 netfs_stat(&netfs_n_rh_readpage);
David Howells77b4d2c2020-09-18 09:25:13 +0100975 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
976
David Howells3d3c9502020-05-13 17:41:20 +0100977 netfs_get_read_request(rreq);
978
979 atomic_set(&rreq->nr_rd_ops, 1);
980 do {
981 if (!netfs_rreq_submit_slice(rreq, &debug_index))
982 break;
983
984 } while (rreq->submitted < rreq->len);
985
986 /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
987 * the service code isn't punted off to a random thread pool to
988 * process.
989 */
990 do {
991 wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
992 netfs_rreq_assess(rreq, false);
993 } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
994
995 ret = rreq->error;
David Howells0246f3e2021-04-06 17:31:54 +0100996 if (ret == 0 && rreq->submitted < rreq->len) {
997 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
David Howells3d3c9502020-05-13 17:41:20 +0100998 ret = -EIO;
David Howells0246f3e2021-04-06 17:31:54 +0100999 }
David Howells726218f2020-02-06 14:22:24 +00001000out:
David Howells3d3c9502020-05-13 17:41:20 +01001001 netfs_put_read_request(rreq, false);
1002 return ret;
1003}
1004EXPORT_SYMBOL(netfs_readpage);
David Howellse1b12402020-09-22 11:06:07 +01001005
David Howellsddca5b02021-11-29 15:26:43 +00001006/*
1007 * Prepare a folio for writing without reading first
David Howells78525c72021-08-11 09:49:13 +01001008 * @folio: The folio being prepared
Jeff Layton827a7462021-06-13 19:33:45 -04001009 * @pos: starting position for the write
1010 * @len: length of write
1011 *
1012 * In some cases, write_begin doesn't need to read at all:
David Howells78525c72021-08-11 09:49:13 +01001013 * - full folio write
1014 * - write that lies in a folio that is completely beyond EOF
1015 * - write that covers the folio from start to EOF or beyond it
Jeff Layton827a7462021-06-13 19:33:45 -04001016 *
1017 * If any of these criteria are met, then zero out the unwritten parts
David Howells78525c72021-08-11 09:49:13 +01001018 * of the folio and return true. Otherwise, return false.
Jeff Layton827a7462021-06-13 19:33:45 -04001019 */
David Howells78525c72021-08-11 09:49:13 +01001020static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len)
David Howellse1b12402020-09-22 11:06:07 +01001021{
David Howells78525c72021-08-11 09:49:13 +01001022 struct inode *inode = folio_inode(folio);
Jeff Layton827a7462021-06-13 19:33:45 -04001023 loff_t i_size = i_size_read(inode);
David Howells78525c72021-08-11 09:49:13 +01001024 size_t offset = offset_in_folio(folio, pos);
David Howellse1b12402020-09-22 11:06:07 +01001025
David Howells78525c72021-08-11 09:49:13 +01001026 /* Full folio write */
1027 if (offset == 0 && len >= folio_size(folio))
Jeff Layton827a7462021-06-13 19:33:45 -04001028 return true;
1029
David Howells78525c72021-08-11 09:49:13 +01001030 /* pos beyond last folio in the file */
Jeff Layton827a7462021-06-13 19:33:45 -04001031 if (pos - offset >= i_size)
1032 goto zero_out;
1033
David Howells78525c72021-08-11 09:49:13 +01001034 /* Write that covers from the start of the folio to EOF or beyond */
Jeff Layton827a7462021-06-13 19:33:45 -04001035 if (offset == 0 && (pos + len) >= i_size)
1036 goto zero_out;
1037
1038 return false;
1039zero_out:
David Howells78525c72021-08-11 09:49:13 +01001040 zero_user_segments(&folio->page, 0, offset, offset + len, folio_size(folio));
Jeff Layton827a7462021-06-13 19:33:45 -04001041 return true;
David Howellse1b12402020-09-22 11:06:07 +01001042}
1043
1044/**
1045 * netfs_write_begin - Helper to prepare for writing
1046 * @file: The file to read from
1047 * @mapping: The mapping to read from
1048 * @pos: File position at which the write will begin
David Howells78525c72021-08-11 09:49:13 +01001049 * @len: The length of the write (may extend beyond the end of the folio chosen)
1050 * @aop_flags: AOP_* flags
1051 * @_folio: Where to put the resultant folio
David Howellse1b12402020-09-22 11:06:07 +01001052 * @_fsdata: Place for the netfs to store a cookie
1053 * @ops: The network filesystem's operations for the helper to use
1054 * @netfs_priv: Private netfs data to be retained in the request
1055 *
1056 * Pre-read data for a write-begin request by drawing data from the cache if
1057 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
1058 * Multiple I/O requests from different sources will get munged together. If
1059 * necessary, the readahead window can be expanded in either direction to a
1060 * more convenient alighment for RPC efficiency or to make storage in the cache
1061 * feasible.
1062 *
1063 * The calling netfs must provide a table of operations, only one of which,
1064 * issue_op, is mandatory.
1065 *
1066 * The check_write_begin() operation can be provided to check for and flush
David Howells78525c72021-08-11 09:49:13 +01001067 * conflicting writes once the folio is grabbed and locked. It is passed a
David Howellse1b12402020-09-22 11:06:07 +01001068 * pointer to the fsdata cookie that gets returned to the VM to be passed to
1069 * write_end. It is permitted to sleep. It should return 0 if the request
David Howells78525c72021-08-11 09:49:13 +01001070 * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
1071 * be regot; or return an error.
David Howellse1b12402020-09-22 11:06:07 +01001072 *
1073 * This is usable whether or not caching is enabled.
1074 */
1075int netfs_write_begin(struct file *file, struct address_space *mapping,
David Howells78525c72021-08-11 09:49:13 +01001076 loff_t pos, unsigned int len, unsigned int aop_flags,
1077 struct folio **_folio, void **_fsdata,
David Howellse1b12402020-09-22 11:06:07 +01001078 const struct netfs_read_request_ops *ops,
1079 void *netfs_priv)
1080{
1081 struct netfs_read_request *rreq;
David Howells78525c72021-08-11 09:49:13 +01001082 struct folio *folio;
David Howellse1b12402020-09-22 11:06:07 +01001083 struct inode *inode = file_inode(file);
David Howells78525c72021-08-11 09:49:13 +01001084 unsigned int debug_index = 0, fgp_flags;
David Howellse1b12402020-09-22 11:06:07 +01001085 pgoff_t index = pos >> PAGE_SHIFT;
David Howellse1b12402020-09-22 11:06:07 +01001086 int ret;
1087
1088 DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
1089
1090retry:
David Howells78525c72021-08-11 09:49:13 +01001091 fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
1092 if (aop_flags & AOP_FLAG_NOFS)
1093 fgp_flags |= FGP_NOFS;
1094 folio = __filemap_get_folio(mapping, index, fgp_flags,
1095 mapping_gfp_mask(mapping));
1096 if (!folio)
David Howellse1b12402020-09-22 11:06:07 +01001097 return -ENOMEM;
1098
1099 if (ops->check_write_begin) {
1100 /* Allow the netfs (eg. ceph) to flush conflicts. */
David Howells78525c72021-08-11 09:49:13 +01001101 ret = ops->check_write_begin(file, pos, len, folio, _fsdata);
David Howellse1b12402020-09-22 11:06:07 +01001102 if (ret < 0) {
David Howells0246f3e2021-04-06 17:31:54 +01001103 trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
David Howellse1b12402020-09-22 11:06:07 +01001104 if (ret == -EAGAIN)
1105 goto retry;
1106 goto error;
1107 }
1108 }
1109
David Howells78525c72021-08-11 09:49:13 +01001110 if (folio_test_uptodate(folio))
1111 goto have_folio;
David Howellse1b12402020-09-22 11:06:07 +01001112
1113 /* If the page is beyond the EOF, we want to clear it - unless it's
1114 * within the cache granule containing the EOF, in which case we need
1115 * to preload the granule.
1116 */
David Howellse1b12402020-09-22 11:06:07 +01001117 if (!ops->is_cache_enabled(inode) &&
David Howells78525c72021-08-11 09:49:13 +01001118 netfs_skip_folio_read(folio, pos, len)) {
David Howellse1b12402020-09-22 11:06:07 +01001119 netfs_stat(&netfs_n_rh_write_zskip);
David Howells78525c72021-08-11 09:49:13 +01001120 goto have_folio_no_wait;
David Howellse1b12402020-09-22 11:06:07 +01001121 }
1122
1123 ret = -ENOMEM;
1124 rreq = netfs_alloc_read_request(ops, netfs_priv, file);
1125 if (!rreq)
1126 goto error;
David Howells78525c72021-08-11 09:49:13 +01001127 rreq->mapping = folio_file_mapping(folio);
1128 rreq->start = folio_file_pos(folio);
1129 rreq->len = folio_size(folio);
1130 rreq->no_unlock_folio = folio_index(folio);
1131 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
David Howellse1b12402020-09-22 11:06:07 +01001132 netfs_priv = NULL;
1133
David Howells726218f2020-02-06 14:22:24 +00001134 if (ops->begin_cache_operation) {
1135 ret = ops->begin_cache_operation(rreq);
1136 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
1137 goto error_put;
1138 }
1139
David Howellse1b12402020-09-22 11:06:07 +01001140 netfs_stat(&netfs_n_rh_write_begin);
1141 trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
1142
1143 /* Expand the request to meet caching requirements and download
1144 * preferences.
1145 */
David Howells78525c72021-08-11 09:49:13 +01001146 ractl._nr_pages = folio_nr_pages(folio);
David Howellse1b12402020-09-22 11:06:07 +01001147 netfs_rreq_expand(rreq, &ractl);
1148 netfs_get_read_request(rreq);
1149
David Howells78525c72021-08-11 09:49:13 +01001150 /* We hold the folio locks, so we can drop the references */
1151 folio_get(folio);
1152 while (readahead_folio(&ractl))
1153 ;
David Howellse1b12402020-09-22 11:06:07 +01001154
1155 atomic_set(&rreq->nr_rd_ops, 1);
1156 do {
1157 if (!netfs_rreq_submit_slice(rreq, &debug_index))
1158 break;
1159
1160 } while (rreq->submitted < rreq->len);
1161
1162 /* Keep nr_rd_ops incremented so that the ref always belongs to us, and
1163 * the service code isn't punted off to a random thread pool to
1164 * process.
1165 */
1166 for (;;) {
1167 wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
1168 netfs_rreq_assess(rreq, false);
1169 if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
1170 break;
1171 cond_resched();
1172 }
1173
1174 ret = rreq->error;
David Howells0246f3e2021-04-06 17:31:54 +01001175 if (ret == 0 && rreq->submitted < rreq->len) {
1176 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
David Howellse1b12402020-09-22 11:06:07 +01001177 ret = -EIO;
David Howells0246f3e2021-04-06 17:31:54 +01001178 }
David Howellse1b12402020-09-22 11:06:07 +01001179 netfs_put_read_request(rreq, false);
1180 if (ret < 0)
1181 goto error;
1182
David Howells78525c72021-08-11 09:49:13 +01001183have_folio:
1184 ret = folio_wait_fscache_killable(folio);
David Howellse1b12402020-09-22 11:06:07 +01001185 if (ret < 0)
1186 goto error;
David Howells78525c72021-08-11 09:49:13 +01001187have_folio_no_wait:
David Howellse1b12402020-09-22 11:06:07 +01001188 if (netfs_priv)
Jeffle Xu3cfef1b62021-12-07 11:14:49 +08001189 ops->cleanup(mapping, netfs_priv);
David Howells78525c72021-08-11 09:49:13 +01001190 *_folio = folio;
David Howellse1b12402020-09-22 11:06:07 +01001191 _leave(" = 0");
1192 return 0;
1193
1194error_put:
1195 netfs_put_read_request(rreq, false);
1196error:
David Howells78525c72021-08-11 09:49:13 +01001197 folio_unlock(folio);
1198 folio_put(folio);
David Howellse1b12402020-09-22 11:06:07 +01001199 if (netfs_priv)
Jeffle Xu3cfef1b62021-12-07 11:14:49 +08001200 ops->cleanup(mapping, netfs_priv);
David Howellse1b12402020-09-22 11:06:07 +01001201 _leave(" = %d", ret);
1202 return ret;
1203}
1204EXPORT_SYMBOL(netfs_write_begin);