Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 2 | * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * This software may be freely redistributed under the terms of the |
| 5 | * GNU General Public License. |
| 6 | * |
| 7 | * You should have received a copy of the GNU General Public License |
| 8 | * along with this program; if not, write to the Free Software |
| 9 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 10 | * |
David Woodhouse | 44d1b98 | 2008-06-05 22:46:18 -0700 | [diff] [blame] | 11 | * Authors: David Woodhouse <dwmw2@infradead.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * David Howells <dhowells@redhat.com> |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/init.h> |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 19 | #include <linux/circ_buf.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 20 | #include <linux/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include "internal.h" |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 22 | |
David Howells | c435ee3 | 2017-11-02 15:27:49 +0000 | [diff] [blame] | 23 | /* |
David Howells | 3c4c407 | 2020-05-27 15:51:30 +0100 | [diff] [blame] | 24 | * Allow the fileserver to request callback state (re-)initialisation. |
| 25 | * Unfortunately, UUIDs are not guaranteed unique. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | */ |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 27 | void afs_init_callback_state(struct afs_server *server) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | { |
David Howells | 3c4c407 | 2020-05-27 15:51:30 +0100 | [diff] [blame] | 29 | rcu_read_lock(); |
| 30 | do { |
| 31 | server->cb_s_break++; |
| 32 | server = rcu_dereference(server->uuid_next); |
| 33 | } while (0); |
| 34 | rcu_read_unlock(); |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 35 | } |
| 36 | |
| 37 | /* |
| 38 | * actually break a callback |
| 39 | */ |
David Howells | 051d252 | 2019-06-20 18:12:16 +0100 | [diff] [blame] | 40 | void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason) |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 41 | { |
| 42 | _enter(""); |
| 43 | |
David Howells | 5a81327 | 2018-04-06 14:17:26 +0100 | [diff] [blame] | 44 | clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); |
David Howells | c435ee3 | 2017-11-02 15:27:49 +0000 | [diff] [blame] | 45 | if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { |
| 46 | vnode->cb_break++; |
| 47 | afs_clear_permits(vnode); |
| 48 | |
David Howells | c7226e4 | 2019-05-10 23:03:31 +0100 | [diff] [blame] | 49 | if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB) |
David Howells | e8d6c55 | 2007-07-15 23:40:12 -0700 | [diff] [blame] | 50 | afs_lock_may_be_available(vnode); |
David Howells | 051d252 | 2019-06-20 18:12:16 +0100 | [diff] [blame] | 51 | |
| 52 | trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true); |
| 53 | } else { |
| 54 | trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false); |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 55 | } |
David Howells | 30062bd | 2018-10-20 00:57:58 +0100 | [diff] [blame] | 56 | } |
David Howells | c435ee3 | 2017-11-02 15:27:49 +0000 | [diff] [blame] | 57 | |
David Howells | 051d252 | 2019-06-20 18:12:16 +0100 | [diff] [blame] | 58 | void afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason) |
David Howells | 30062bd | 2018-10-20 00:57:58 +0100 | [diff] [blame] | 59 | { |
| 60 | write_seqlock(&vnode->cb_lock); |
David Howells | 051d252 | 2019-06-20 18:12:16 +0100 | [diff] [blame] | 61 | __afs_break_callback(vnode, reason); |
David Howells | c435ee3 | 2017-11-02 15:27:49 +0000 | [diff] [blame] | 62 | write_sequnlock(&vnode->cb_lock); |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | /* |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 66 | * Look up a volume by volume ID under RCU conditions. |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 67 | */ |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 68 | static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell, |
| 69 | afs_volid_t vid) |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 70 | { |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 71 | struct afs_volume *volume = NULL; |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 72 | struct rb_node *p; |
| 73 | int seq = 0; |
| 74 | |
| 75 | do { |
| 76 | /* Unfortunately, rbtree walking doesn't give reliable results |
| 77 | * under just the RCU read lock, so we have to check for |
| 78 | * changes. |
| 79 | */ |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 80 | read_seqbegin_or_lock(&cell->volume_lock, &seq); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 81 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 82 | p = rcu_dereference_raw(cell->volumes.rb_node); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 83 | while (p) { |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 84 | volume = rb_entry(p, struct afs_volume, cell_node); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 85 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 86 | if (volume->vid < vid) |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 87 | p = rcu_dereference_raw(p->rb_left); |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 88 | else if (volume->vid > vid) |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 89 | p = rcu_dereference_raw(p->rb_right); |
| 90 | else |
| 91 | break; |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 92 | volume = NULL; |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 93 | } |
| 94 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 95 | } while (need_seqretry(&cell->volume_lock, seq)); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 96 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 97 | done_seqretry(&cell->volume_lock, seq); |
| 98 | return volume; |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | /* |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 102 | * allow the fileserver to explicitly break one callback |
| 103 | * - happens when |
| 104 | * - the backing file is changed |
| 105 | * - a lock is released |
| 106 | */ |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 107 | static void afs_break_one_callback(struct afs_volume *volume, |
| 108 | struct afs_fid *fid) |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 109 | { |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 110 | struct super_block *sb; |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 111 | struct afs_vnode *vnode; |
David Howells | c435ee3 | 2017-11-02 15:27:49 +0000 | [diff] [blame] | 112 | struct inode *inode; |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 113 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 114 | if (fid->vnode == 0 && fid->unique == 0) { |
| 115 | /* The callback break applies to an entire volume. */ |
| 116 | write_lock(&volume->cb_v_break_lock); |
| 117 | volume->cb_v_break++; |
| 118 | trace_afs_cb_break(fid, volume->cb_v_break, |
| 119 | afs_cb_break_for_volume_callback, false); |
| 120 | write_unlock(&volume->cb_v_break_lock); |
| 121 | return; |
| 122 | } |
David Howells | 68251f0 | 2018-05-12 22:31:33 +0100 | [diff] [blame] | 123 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 124 | /* See if we can find a matching inode - even an I_NEW inode needs to |
| 125 | * be marked as it can have its callback broken before we finish |
| 126 | * setting up the local inode. |
| 127 | */ |
| 128 | sb = rcu_dereference(volume->sb); |
| 129 | if (!sb) |
| 130 | return; |
| 131 | |
| 132 | inode = find_inode_rcu(sb, fid->vnode, afs_ilookup5_test_by_fid, fid); |
| 133 | if (inode) { |
| 134 | vnode = AFS_FS_I(inode); |
| 135 | afs_break_callback(vnode, afs_cb_break_for_callback); |
| 136 | } else { |
| 137 | trace_afs_cb_miss(fid, afs_cb_break_for_callback); |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 138 | } |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 139 | } |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 140 | |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 141 | static void afs_break_some_callbacks(struct afs_server *server, |
| 142 | struct afs_callback_break *cbb, |
| 143 | size_t *_count) |
| 144 | { |
| 145 | struct afs_callback_break *residue = cbb; |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 146 | struct afs_volume *volume; |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 147 | afs_volid_t vid = cbb->fid.vid; |
| 148 | size_t i; |
| 149 | |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 150 | volume = afs_lookup_volume_rcu(server->cell, vid); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 151 | |
| 152 | /* TODO: Find all matching volumes if we couldn't match the server and |
| 153 | * break them anyway. |
| 154 | */ |
| 155 | |
| 156 | for (i = *_count; i > 0; cbb++, i--) { |
| 157 | if (cbb->fid.vid == vid) { |
| 158 | _debug("- Fid { vl=%08llx n=%llu u=%u }", |
| 159 | cbb->fid.vid, |
| 160 | cbb->fid.vnode, |
| 161 | cbb->fid.unique); |
| 162 | --*_count; |
David Howells | 2032596 | 2020-04-30 01:03:49 +0100 | [diff] [blame] | 163 | if (volume) |
| 164 | afs_break_one_callback(volume, &cbb->fid); |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 165 | } else { |
| 166 | *residue++ = *cbb; |
| 167 | } |
| 168 | } |
David Howells | ec26815 | 2007-04-26 15:49:28 -0700 | [diff] [blame] | 169 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | /* |
| 172 | * allow the fileserver to break callback promises |
| 173 | */ |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 174 | void afs_break_callbacks(struct afs_server *server, size_t count, |
David Howells | 5cf9dd5 | 2018-04-09 21:12:31 +0100 | [diff] [blame] | 175 | struct afs_callback_break *callbacks) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | { |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 177 | _enter("%p,%zu,", server, count); |
| 178 | |
| 179 | ASSERT(server != NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 181 | rcu_read_lock(); |
David Howells | 68251f0 | 2018-05-12 22:31:33 +0100 | [diff] [blame] | 182 | |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 183 | while (count > 0) |
| 184 | afs_break_some_callbacks(server, callbacks, &count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | |
David Howells | 8230fd8 | 2020-03-27 15:02:44 +0000 | [diff] [blame] | 186 | rcu_read_unlock(); |
David Howells | 08e0e7c | 2007-04-26 15:55:03 -0700 | [diff] [blame] | 187 | return; |
David Howells | ec26815 | 2007-04-26 15:49:28 -0700 | [diff] [blame] | 188 | } |