Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 2 | * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * This software may be freely redistributed under the terms of the |
| 5 | * GNU General Public License. |
| 6 | * |
| 7 | * You should have received a copy of the GNU General Public License |
| 8 | * along with this program; if not, write to the Free Software |
| 9 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 10 | * |
David Woodhouse | 44d1b98 | 2008-06-05 22:46:18 -0700 | [diff] [blame] | 11 | * Authors: David Woodhouse <dwmw2@infradead.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * David Howells <dhowells@redhat.com> |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/init.h> |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 19 | #include <linux/circ_buf.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 20 | #include <linux/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include "internal.h" |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 22 | |
David Howells | c435ee3 | 2017-11-02 15:27:49 +0000 | [diff] [blame] | 23 | /* |
David Howells | 6e0e99d | 2021-09-02 16:43:10 +0100 | [diff] [blame] | 24 | * Handle invalidation of an mmap'd file. We invalidate all the PTEs referring |
| 25 | * to the pages in this file's pagecache, forcing the kernel to go through |
| 26 | * ->fault() or ->page_mkwrite() - at which point we can handle invalidation |
| 27 | * more fully. |
| 28 | */ |
| 29 | void afs_invalidate_mmap_work(struct work_struct *work) |
| 30 | { |
| 31 | struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work); |
| 32 | |
| 33 | unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false); |
| 34 | } |
| 35 | |
| 36 | void afs_server_init_callback_work(struct work_struct *work) |
| 37 | { |
| 38 | struct afs_server *server = container_of(work, struct afs_server, initcb_work); |
| 39 | struct afs_vnode *vnode; |
| 40 | struct afs_cell *cell = server->cell; |
| 41 | |
| 42 | down_read(&cell->fs_open_mmaps_lock); |
| 43 | |
| 44 | list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) { |
| 45 | if (vnode->cb_server == server) { |
| 46 | clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); |
| 47 | queue_work(system_unbound_wq, &vnode->cb_work); |
| 48 | } |
| 49 | } |
| 50 | |
| 51 | up_read(&cell->fs_open_mmaps_lock); |
| 52 | } |
| 53 | |
| 54 | /* |
David Howells | 3c4c407 | 2020-05-27 15:51:30 +0100 | [diff] [blame] | 55 | * Allow the fileserver to request callback state (re-)initialisation. |
| 56 | * Unfortunately, UUIDs are not guaranteed unique. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | */ |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 58 | void afs_init_callback_state(struct afs_server *server) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | { |
David Howells | 3c4c407 | 2020-05-27 15:51:30 +0100 | [diff] [blame] | 60 | rcu_read_lock(); |
| 61 | do { |
| 62 | server->cb_s_break++; |
David Howells | 4fe6a94 | 2021-09-02 21:51:01 +0100 | [diff] [blame] | 63 | atomic_inc(&server->cell->fs_s_break); |
David Howells | 6e0e99d | 2021-09-02 16:43:10 +0100 | [diff] [blame] | 64 | if (!list_empty(&server->cell->fs_open_mmaps)) |
| 65 | queue_work(system_unbound_wq, &server->initcb_work); |
| 66 | |
| 67 | } while ((server = rcu_dereference(server->uuid_next))); |
David Howells | 3c4c407 | 2020-05-27 15:51:30 +0100 | [diff] [blame] | 68 | rcu_read_unlock(); |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | /* |
| 72 | * actually break a callback |
| 73 | */ |
David Howells | 051d252 | 2019-06-20 18:12:16 +0100 | [diff] [blame] | 74 | void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason) |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 75 | { |
| 76 | _enter(""); |
| 77 | |
David Howells | 5a81327 | 2018-04-06 14:17:26 +0100 | [diff] [blame] | 78 | clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); |
David Howells | c435ee3 | 2017-11-02 15:27:49 +0000 | [diff] [blame] | 79 | if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { |
| 80 | vnode->cb_break++; |
David Howells | 4fe6a94 | 2021-09-02 21:51:01 +0100 | [diff] [blame] | 81 | vnode->cb_v_break = vnode->volume->cb_v_break; |
David Howells | c435ee3 | 2017-11-02 15:27:49 +0000 | [diff] [blame] | 82 | afs_clear_permits(vnode); |
| 83 | |
David Howells | c7226e4 | 2019-05-10 23:03:31 +0100 | [diff] [blame] | 84 | if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB) |
David Howells | e8d6c55 | 2007-07-15 23:40:12 -0700 | [diff] [blame] | 85 | afs_lock_may_be_available(vnode); |
David Howells | 051d252 | 2019-06-20 18:12:16 +0100 | [diff] [blame] | 86 | |
David Howells | 6e0e99d | 2021-09-02 16:43:10 +0100 | [diff] [blame] | 87 | if (reason != afs_cb_break_for_deleted && |
| 88 | vnode->status.type == AFS_FTYPE_FILE && |
| 89 | atomic_read(&vnode->cb_nr_mmap)) |
| 90 | queue_work(system_unbound_wq, &vnode->cb_work); |
| 91 | |
David Howells | 051d252 | 2019-06-20 18:12:16 +0100 | [diff] [blame] | 92 | trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true); |
| 93 | } else { |
| 94 | trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false); |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 95 | } |
David Howells | 30062bd | 2018-10-20 00:57:58 +0100 | [diff] [blame] | 96 | } |
David Howells | c435ee3 | 2017-11-02 15:27:49 +0000 | [diff] [blame] | 97 | |
David Howells | 051d252 | 2019-06-20 18:12:16 +0100 | [diff] [blame] | 98 | void afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason) |
David Howells | 30062bd | 2018-10-20 00:57:58 +0100 | [diff] [blame] | 99 | { |
| 100 | write_seqlock(&vnode->cb_lock); |
David Howells | 051d252 | 2019-06-20 18:12:16 +0100 | [diff] [blame] | 101 | __afs_break_callback(vnode, reason); |
David Howells | c435ee3 | 2017-11-02 15:27:49 +0000 | [diff] [blame] | 102 | write_sequnlock(&vnode->cb_lock); |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | /* |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 106 | * Look up a volume by volume ID under RCU conditions. |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 107 | */ |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 108 | static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell, |
| 109 | afs_volid_t vid) |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 110 | { |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 111 | struct afs_volume *volume = NULL; |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 112 | struct rb_node *p; |
| 113 | int seq = 0; |
| 114 | |
| 115 | do { |
| 116 | /* Unfortunately, rbtree walking doesn't give reliable results |
| 117 | * under just the RCU read lock, so we have to check for |
| 118 | * changes. |
| 119 | */ |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 120 | read_seqbegin_or_lock(&cell->volume_lock, &seq); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 121 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 122 | p = rcu_dereference_raw(cell->volumes.rb_node); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 123 | while (p) { |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 124 | volume = rb_entry(p, struct afs_volume, cell_node); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 125 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 126 | if (volume->vid < vid) |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 127 | p = rcu_dereference_raw(p->rb_left); |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 128 | else if (volume->vid > vid) |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 129 | p = rcu_dereference_raw(p->rb_right); |
| 130 | else |
| 131 | break; |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 132 | volume = NULL; |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 133 | } |
| 134 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 135 | } while (need_seqretry(&cell->volume_lock, seq)); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 136 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 137 | done_seqretry(&cell->volume_lock, seq); |
| 138 | return volume; |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | /* |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 142 | * allow the fileserver to explicitly break one callback |
| 143 | * - happens when |
| 144 | * - the backing file is changed |
| 145 | * - a lock is released |
| 146 | */ |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 147 | static void afs_break_one_callback(struct afs_volume *volume, |
| 148 | struct afs_fid *fid) |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 149 | { |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 150 | struct super_block *sb; |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 151 | struct afs_vnode *vnode; |
David Howells | c435ee3 | 2017-11-02 15:27:49 +0000 | [diff] [blame] | 152 | struct inode *inode; |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 153 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 154 | if (fid->vnode == 0 && fid->unique == 0) { |
| 155 | /* The callback break applies to an entire volume. */ |
| 156 | write_lock(&volume->cb_v_break_lock); |
| 157 | volume->cb_v_break++; |
| 158 | trace_afs_cb_break(fid, volume->cb_v_break, |
| 159 | afs_cb_break_for_volume_callback, false); |
| 160 | write_unlock(&volume->cb_v_break_lock); |
| 161 | return; |
| 162 | } |
David Howells | 68251f0 | 2018-05-12 22:31:33 +0100 | [diff] [blame] | 163 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 164 | /* See if we can find a matching inode - even an I_NEW inode needs to |
| 165 | * be marked as it can have its callback broken before we finish |
| 166 | * setting up the local inode. |
| 167 | */ |
| 168 | sb = rcu_dereference(volume->sb); |
| 169 | if (!sb) |
| 170 | return; |
| 171 | |
| 172 | inode = find_inode_rcu(sb, fid->vnode, afs_ilookup5_test_by_fid, fid); |
| 173 | if (inode) { |
| 174 | vnode = AFS_FS_I(inode); |
| 175 | afs_break_callback(vnode, afs_cb_break_for_callback); |
| 176 | } else { |
| 177 | trace_afs_cb_miss(fid, afs_cb_break_for_callback); |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 178 | } |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 179 | } |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 180 | |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 181 | static void afs_break_some_callbacks(struct afs_server *server, |
| 182 | struct afs_callback_break *cbb, |
| 183 | size_t *_count) |
| 184 | { |
| 185 | struct afs_callback_break *residue = cbb; |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 186 | struct afs_volume *volume; |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 187 | afs_volid_t vid = cbb->fid.vid; |
| 188 | size_t i; |
| 189 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 190 | volume = afs_lookup_volume_rcu(server->cell, vid); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 191 | |
| 192 | /* TODO: Find all matching volumes if we couldn't match the server and |
| 193 | * break them anyway. |
| 194 | */ |
| 195 | |
| 196 | for (i = *_count; i > 0; cbb++, i--) { |
| 197 | if (cbb->fid.vid == vid) { |
| 198 | _debug("- Fid { vl=%08llx n=%llu u=%u }", |
| 199 | cbb->fid.vid, |
| 200 | cbb->fid.vnode, |
| 201 | cbb->fid.unique); |
| 202 | --*_count; |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 203 | if (volume) |
| 204 | afs_break_one_callback(volume, &cbb->fid); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 205 | } else { |
| 206 | *residue++ = *cbb; |
| 207 | } |
| 208 | } |
David Howells | ec26815 | 2007-04-26 15:49:28 -0700 | [diff] [blame] | 209 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | /* |
| 212 | * allow the fileserver to break callback promises |
| 213 | */ |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 214 | void afs_break_callbacks(struct afs_server *server, size_t count, |
David Howells | 5cf9dd5 | 2018-04-09 21:12:31 +0100 | [diff] [blame] | 215 | struct afs_callback_break *callbacks) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | { |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 217 | _enter("%p,%zu,", server, count); |
| 218 | |
| 219 | ASSERT(server != NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 221 | rcu_read_lock(); |
David Howells | 68251f0 | 2018-05-12 22:31:33 +0100 | [diff] [blame] | 222 | |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 223 | while (count > 0) |
| 224 | afs_break_some_callbacks(server, callbacks, &count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 226 | rcu_read_unlock(); |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 227 | return; |
David Howells | ec26815 | 2007-04-26 15:49:28 -0700 | [diff] [blame] | 228 | } |