Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Multipath TCP |
| 3 | * |
| 4 | * Copyright (c) 2019, Intel Corporation. |
| 5 | */ |
Geliang Tang | c85adce | 2020-04-03 17:14:08 +0800 | [diff] [blame] | 6 | #define pr_fmt(fmt) "MPTCP: " fmt |
| 7 | |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 8 | #include <linux/kernel.h> |
| 9 | #include <net/tcp.h> |
| 10 | #include <net/mptcp.h> |
| 11 | #include "protocol.h" |
| 12 | |
Paolo Abeni | fc1b4e3 | 2021-08-13 15:15:46 -0700 | [diff] [blame] | 13 | #include "mib.h" |
| 14 | |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 15 | /* path manager command handlers */ |
| 16 | |
| 17 | int mptcp_pm_announce_addr(struct mptcp_sock *msk, |
Geliang Tang | 6a6c05a | 2020-09-24 08:29:50 +0800 | [diff] [blame] | 18 | const struct mptcp_addr_info *addr, |
Geliang Tang | f7efc77 | 2021-03-26 11:26:31 -0700 | [diff] [blame] | 19 | bool echo) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 20 | { |
Geliang Tang | 13ad9f0 | 2020-12-09 15:51:27 -0800 | [diff] [blame] | 21 | u8 add_addr = READ_ONCE(msk->pm.addr_signal); |
Geliang Tang | d91d322 | 2020-11-19 11:45:59 -0800 | [diff] [blame] | 22 | |
Yonglong Li | 18fc1a9 | 2021-08-23 18:05:40 -0700 | [diff] [blame] | 23 | pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo); |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 24 | |
Florian Westphal | 3abc05d | 2021-02-04 15:23:30 -0800 | [diff] [blame] | 25 | lockdep_assert_held(&msk->pm.lock); |
| 26 | |
Yonglong Li | 18fc1a9 | 2021-08-23 18:05:40 -0700 | [diff] [blame] | 27 | if (add_addr & |
| 28 | (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) { |
| 29 | pr_warn("addr_signal error, add_addr=%d, echo=%d", add_addr, echo); |
Geliang Tang | 42842a4 | 2020-12-09 15:51:26 -0800 | [diff] [blame] | 30 | return -EINVAL; |
| 31 | } |
| 32 | |
Yonglong Li | 18fc1a9 | 2021-08-23 18:05:40 -0700 | [diff] [blame] | 33 | if (echo) { |
| 34 | msk->pm.remote = *addr; |
Geliang Tang | d91d322 | 2020-11-19 11:45:59 -0800 | [diff] [blame] | 35 | add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); |
Yonglong Li | 18fc1a9 | 2021-08-23 18:05:40 -0700 | [diff] [blame] | 36 | } else { |
| 37 | msk->pm.local = *addr; |
| 38 | add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); |
| 39 | } |
Geliang Tang | 13ad9f0 | 2020-12-09 15:51:27 -0800 | [diff] [blame] | 40 | WRITE_ONCE(msk->pm.addr_signal, add_addr); |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 41 | return 0; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 42 | } |
| 43 | |
Geliang Tang | cbde278 | 2021-03-12 17:16:12 -0800 | [diff] [blame] | 44 | int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 45 | { |
Geliang Tang | 13ad9f0 | 2020-12-09 15:51:27 -0800 | [diff] [blame] | 46 | u8 rm_addr = READ_ONCE(msk->pm.addr_signal); |
Geliang Tang | 42842a4 | 2020-12-09 15:51:26 -0800 | [diff] [blame] | 47 | |
Geliang Tang | cbde278 | 2021-03-12 17:16:12 -0800 | [diff] [blame] | 48 | pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); |
Geliang Tang | b6c0838 | 2020-09-24 08:29:54 +0800 | [diff] [blame] | 49 | |
Geliang Tang | 42842a4 | 2020-12-09 15:51:26 -0800 | [diff] [blame] | 50 | if (rm_addr) { |
| 51 | pr_warn("addr_signal error, rm_addr=%d", rm_addr); |
| 52 | return -EINVAL; |
| 53 | } |
| 54 | |
Geliang Tang | cbde278 | 2021-03-12 17:16:12 -0800 | [diff] [blame] | 55 | msk->pm.rm_list_tx = *rm_list; |
Geliang Tang | 42842a4 | 2020-12-09 15:51:26 -0800 | [diff] [blame] | 56 | rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); |
Geliang Tang | 13ad9f0 | 2020-12-09 15:51:27 -0800 | [diff] [blame] | 57 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
Geliang Tang | b46a023 | 2021-03-26 11:26:41 -0700 | [diff] [blame] | 58 | mptcp_pm_nl_addr_send_ack(msk); |
Geliang Tang | b6c0838 | 2020-09-24 08:29:54 +0800 | [diff] [blame] | 59 | return 0; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 60 | } |
| 61 | |
Geliang Tang | ddd14bb | 2021-03-12 17:16:16 -0800 | [diff] [blame] | 62 | int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 63 | { |
Geliang Tang | ddd14bb | 2021-03-12 17:16:16 -0800 | [diff] [blame] | 64 | pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); |
Geliang Tang | 0ee4261 | 2020-09-24 08:29:55 +0800 | [diff] [blame] | 65 | |
| 66 | spin_lock_bh(&msk->pm.lock); |
Geliang Tang | ddd14bb | 2021-03-12 17:16:16 -0800 | [diff] [blame] | 67 | mptcp_pm_nl_rm_subflow_received(msk, rm_list); |
Geliang Tang | 0ee4261 | 2020-09-24 08:29:55 +0800 | [diff] [blame] | 68 | spin_unlock_bh(&msk->pm.lock); |
| 69 | return 0; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | /* path manager event handlers */ |
| 73 | |
Florian Westphal | 6c714f1 | 2021-02-12 15:59:58 -0800 | [diff] [blame] | 74 | void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 75 | { |
| 76 | struct mptcp_pm_data *pm = &msk->pm; |
| 77 | |
| 78 | pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); |
| 79 | |
| 80 | WRITE_ONCE(pm->server_side, server_side); |
Florian Westphal | b911c97 | 2021-02-12 16:00:01 -0800 | [diff] [blame] | 81 | mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) |
| 85 | { |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 86 | struct mptcp_pm_data *pm = &msk->pm; |
Geliang Tang | a914e58 | 2021-02-01 15:09:07 -0800 | [diff] [blame] | 87 | unsigned int subflows_max; |
Geliang Tang | f58f065 | 2020-09-24 08:29:53 +0800 | [diff] [blame] | 88 | int ret = 0; |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 89 | |
Geliang Tang | a914e58 | 2021-02-01 15:09:07 -0800 | [diff] [blame] | 90 | subflows_max = mptcp_pm_get_subflows_max(msk); |
| 91 | |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 92 | pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, |
Geliang Tang | a914e58 | 2021-02-01 15:09:07 -0800 | [diff] [blame] | 93 | subflows_max, READ_ONCE(pm->accept_subflow)); |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 94 | |
| 95 | /* try to avoid acquiring the lock below */ |
| 96 | if (!READ_ONCE(pm->accept_subflow)) |
| 97 | return false; |
| 98 | |
| 99 | spin_lock_bh(&pm->lock); |
Geliang Tang | f58f065 | 2020-09-24 08:29:53 +0800 | [diff] [blame] | 100 | if (READ_ONCE(pm->accept_subflow)) { |
Geliang Tang | a914e58 | 2021-02-01 15:09:07 -0800 | [diff] [blame] | 101 | ret = pm->subflows < subflows_max; |
| 102 | if (ret && ++pm->subflows == subflows_max) |
Geliang Tang | f58f065 | 2020-09-24 08:29:53 +0800 | [diff] [blame] | 103 | WRITE_ONCE(pm->accept_subflow, false); |
| 104 | } |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 105 | spin_unlock_bh(&pm->lock); |
| 106 | |
| 107 | return ret; |
| 108 | } |
| 109 | |
| 110 | /* return true if the new status bit is currently cleared, that is, this event |
| 111 | * can be server, eventually by an already scheduled work |
| 112 | */ |
| 113 | static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, |
| 114 | enum mptcp_pm_status new_status) |
| 115 | { |
| 116 | pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, |
| 117 | BIT(new_status)); |
| 118 | if (msk->pm.status & BIT(new_status)) |
| 119 | return false; |
| 120 | |
| 121 | msk->pm.status |= BIT(new_status); |
Paolo Abeni | ba8f48f | 2020-11-16 10:48:05 +0100 | [diff] [blame] | 122 | mptcp_schedule_work((struct sock *)msk); |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 123 | return true; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 124 | } |
| 125 | |
Florian Westphal | 6c714f1 | 2021-02-12 15:59:58 -0800 | [diff] [blame] | 126 | void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 127 | { |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 128 | struct mptcp_pm_data *pm = &msk->pm; |
Florian Westphal | b911c97 | 2021-02-12 16:00:01 -0800 | [diff] [blame] | 129 | bool announce = false; |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 130 | |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 131 | pr_debug("msk=%p", msk); |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 132 | |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 133 | spin_lock_bh(&pm->lock); |
| 134 | |
Paolo Abeni | 5b950ff | 2020-12-09 12:03:29 +0100 | [diff] [blame] | 135 | /* mptcp_pm_fully_established() can be invoked by multiple |
| 136 | * racing paths - accept() and check_fully_established() |
| 137 | * be sure to serve this event only once. |
| 138 | */ |
| 139 | if (READ_ONCE(pm->work_pending) && |
| 140 | !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 141 | mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); |
| 142 | |
Florian Westphal | b911c97 | 2021-02-12 16:00:01 -0800 | [diff] [blame] | 143 | if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) |
| 144 | announce = true; |
| 145 | |
| 146 | msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 147 | spin_unlock_bh(&pm->lock); |
Florian Westphal | b911c97 | 2021-02-12 16:00:01 -0800 | [diff] [blame] | 148 | |
| 149 | if (announce) |
| 150 | mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, gfp); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 151 | } |
| 152 | |
| 153 | void mptcp_pm_connection_closed(struct mptcp_sock *msk) |
| 154 | { |
| 155 | pr_debug("msk=%p", msk); |
| 156 | } |
| 157 | |
Geliang Tang | 62535200 | 2021-03-26 11:26:33 -0700 | [diff] [blame] | 158 | void mptcp_pm_subflow_established(struct mptcp_sock *msk) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 159 | { |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 160 | struct mptcp_pm_data *pm = &msk->pm; |
| 161 | |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 162 | pr_debug("msk=%p", msk); |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 163 | |
| 164 | if (!READ_ONCE(pm->work_pending)) |
| 165 | return; |
| 166 | |
| 167 | spin_lock_bh(&pm->lock); |
| 168 | |
| 169 | if (READ_ONCE(pm->work_pending)) |
| 170 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); |
| 171 | |
| 172 | spin_unlock_bh(&pm->lock); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 173 | } |
| 174 | |
Paolo Abeni | a88c9e4 | 2022-01-06 16:20:23 -0800 | [diff] [blame] | 175 | void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk, |
| 176 | const struct mptcp_subflow_context *subflow) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 177 | { |
Paolo Abeni | a88c9e4 | 2022-01-06 16:20:23 -0800 | [diff] [blame] | 178 | struct mptcp_pm_data *pm = &msk->pm; |
| 179 | bool update_subflows; |
| 180 | |
| 181 | update_subflows = (ssk->sk_state == TCP_CLOSE) && |
| 182 | (subflow->request_join || subflow->mp_join); |
| 183 | if (!READ_ONCE(pm->work_pending) && !update_subflows) |
| 184 | return; |
| 185 | |
| 186 | spin_lock_bh(&pm->lock); |
| 187 | if (update_subflows) |
| 188 | pm->subflows--; |
| 189 | |
| 190 | /* Even if this subflow is not really established, tell the PM to try |
| 191 | * to pick the next ones, if possible. |
| 192 | */ |
| 193 | if (mptcp_pm_nl_check_work_pending(msk)) |
| 194 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); |
| 195 | |
| 196 | spin_unlock_bh(&pm->lock); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | void mptcp_pm_add_addr_received(struct mptcp_sock *msk, |
| 200 | const struct mptcp_addr_info *addr) |
| 201 | { |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 202 | struct mptcp_pm_data *pm = &msk->pm; |
| 203 | |
| 204 | pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, |
| 205 | READ_ONCE(pm->accept_addr)); |
| 206 | |
Florian Westphal | b911c97 | 2021-02-12 16:00:01 -0800 | [diff] [blame] | 207 | mptcp_event_addr_announced(msk, addr); |
| 208 | |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 209 | spin_lock_bh(&pm->lock); |
| 210 | |
Geliang Tang | 84dfe36 | 2020-11-19 11:46:00 -0800 | [diff] [blame] | 211 | if (!READ_ONCE(pm->accept_addr)) { |
Geliang Tang | f7efc77 | 2021-03-26 11:26:31 -0700 | [diff] [blame] | 212 | mptcp_pm_announce_addr(msk, addr, true); |
Geliang Tang | 84dfe36 | 2020-11-19 11:46:00 -0800 | [diff] [blame] | 213 | mptcp_pm_add_addr_send_ack(msk); |
| 214 | } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 215 | pm->remote = *addr; |
Geliang Tang | 84dfe36 | 2020-11-19 11:46:00 -0800 | [diff] [blame] | 216 | } |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 217 | |
| 218 | spin_unlock_bh(&pm->lock); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 219 | } |
| 220 | |
Geliang Tang | 557963c | 2021-03-26 11:26:38 -0700 | [diff] [blame] | 221 | void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, |
| 222 | struct mptcp_addr_info *addr) |
| 223 | { |
| 224 | struct mptcp_pm_data *pm = &msk->pm; |
| 225 | |
| 226 | pr_debug("msk=%p", msk); |
| 227 | |
| 228 | spin_lock_bh(&pm->lock); |
| 229 | |
| 230 | if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending)) |
| 231 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); |
| 232 | |
| 233 | spin_unlock_bh(&pm->lock); |
| 234 | } |
| 235 | |
Geliang Tang | 84dfe36 | 2020-11-19 11:46:00 -0800 | [diff] [blame] | 236 | void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) |
| 237 | { |
Geliang Tang | b5a7acd | 2021-02-01 15:09:09 -0800 | [diff] [blame] | 238 | if (!mptcp_pm_should_add_signal(msk)) |
Geliang Tang | 84dfe36 | 2020-11-19 11:46:00 -0800 | [diff] [blame] | 239 | return; |
| 240 | |
| 241 | mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); |
| 242 | } |
| 243 | |
Geliang Tang | 5c4a824 | 2021-03-12 17:16:13 -0800 | [diff] [blame] | 244 | void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, |
| 245 | const struct mptcp_rm_list *rm_list) |
Geliang Tang | d0876b2 | 2020-09-24 08:29:49 +0800 | [diff] [blame] | 246 | { |
| 247 | struct mptcp_pm_data *pm = &msk->pm; |
Geliang Tang | 5c4a824 | 2021-03-12 17:16:13 -0800 | [diff] [blame] | 248 | u8 i; |
Geliang Tang | d0876b2 | 2020-09-24 08:29:49 +0800 | [diff] [blame] | 249 | |
Geliang Tang | 5c4a824 | 2021-03-12 17:16:13 -0800 | [diff] [blame] | 250 | pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr); |
Geliang Tang | d0876b2 | 2020-09-24 08:29:49 +0800 | [diff] [blame] | 251 | |
Geliang Tang | 5c4a824 | 2021-03-12 17:16:13 -0800 | [diff] [blame] | 252 | for (i = 0; i < rm_list->nr; i++) |
| 253 | mptcp_event_addr_removed(msk, rm_list->ids[i]); |
Florian Westphal | b911c97 | 2021-02-12 16:00:01 -0800 | [diff] [blame] | 254 | |
Geliang Tang | d0876b2 | 2020-09-24 08:29:49 +0800 | [diff] [blame] | 255 | spin_lock_bh(&pm->lock); |
| 256 | mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED); |
Geliang Tang | b5c55f3 | 2021-03-12 17:16:14 -0800 | [diff] [blame] | 257 | pm->rm_list_rx = *rm_list; |
Geliang Tang | d0876b2 | 2020-09-24 08:29:49 +0800 | [diff] [blame] | 258 | spin_unlock_bh(&pm->lock); |
| 259 | } |
| 260 | |
Geliang Tang | 40453a5 | 2021-01-08 16:47:58 -0800 | [diff] [blame] | 261 | void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup) |
| 262 | { |
| 263 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
| 264 | |
| 265 | pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); |
| 266 | subflow->backup = bkup; |
Florian Westphal | b911c97 | 2021-02-12 16:00:01 -0800 | [diff] [blame] | 267 | |
| 268 | mptcp_event(MPTCP_EVENT_SUB_PRIORITY, mptcp_sk(subflow->conn), sk, GFP_ATOMIC); |
Geliang Tang | 40453a5 | 2021-01-08 16:47:58 -0800 | [diff] [blame] | 269 | } |
| 270 | |
Geliang Tang | 5580d41 | 2021-08-24 16:26:16 -0700 | [diff] [blame] | 271 | void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) |
| 272 | { |
| 273 | pr_debug("fail_seq=%llu", fail_seq); |
| 274 | } |
| 275 | |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 276 | /* path manager helpers */ |
| 277 | |
Yonglong Li | 1f5e9e2 | 2021-08-23 18:05:39 -0700 | [diff] [blame] | 278 | bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, struct sk_buff *skb, |
| 279 | unsigned int opt_size, unsigned int remaining, |
Yonglong Li | f462a44 | 2021-08-23 18:05:42 -0700 | [diff] [blame] | 280 | struct mptcp_addr_info *addr, bool *echo, |
Yonglong Li | 1f5e9e2 | 2021-08-23 18:05:39 -0700 | [diff] [blame] | 281 | bool *port, bool *drop_other_suboptions) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 282 | { |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 283 | int ret = false; |
Yonglong Li | 119c022 | 2021-08-23 18:05:41 -0700 | [diff] [blame] | 284 | u8 add_addr; |
Yonglong Li | f462a44 | 2021-08-23 18:05:42 -0700 | [diff] [blame] | 285 | u8 family; |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 286 | |
| 287 | spin_lock_bh(&msk->pm.lock); |
| 288 | |
| 289 | /* double check after the lock is acquired */ |
Geliang Tang | f643b80 | 2020-09-24 08:29:47 +0800 | [diff] [blame] | 290 | if (!mptcp_pm_should_add_signal(msk)) |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 291 | goto out_unlock; |
| 292 | |
Yonglong Li | 1f5e9e2 | 2021-08-23 18:05:39 -0700 | [diff] [blame] | 293 | /* always drop every other options for pure ack ADD_ADDR; this is a |
| 294 | * plain dup-ack from TCP perspective. The other MPTCP-relevant info, |
| 295 | * if any, will be carried by the 'original' TCP ack |
| 296 | */ |
| 297 | if (skb && skb_is_tcp_pure_ack(skb)) { |
| 298 | remaining += opt_size; |
| 299 | *drop_other_suboptions = true; |
| 300 | } |
| 301 | |
Geliang Tang | d91d322 | 2020-11-19 11:45:59 -0800 | [diff] [blame] | 302 | *echo = mptcp_pm_should_add_signal_echo(msk); |
Yonglong Li | c233ef1 | 2021-08-23 18:05:43 -0700 | [diff] [blame] | 303 | *port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port); |
Matthieu Baerts | 456afe0 | 2020-10-03 17:36:56 +0200 | [diff] [blame] | 304 | |
Yonglong Li | f462a44 | 2021-08-23 18:05:42 -0700 | [diff] [blame] | 305 | family = *echo ? msk->pm.remote.family : msk->pm.local.family; |
| 306 | if (remaining < mptcp_add_addr_len(family, *echo, *port)) |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 307 | goto out_unlock; |
| 308 | |
Yonglong Li | f462a44 | 2021-08-23 18:05:42 -0700 | [diff] [blame] | 309 | if (*echo) { |
| 310 | *addr = msk->pm.remote; |
Yonglong Li | 119c022 | 2021-08-23 18:05:41 -0700 | [diff] [blame] | 311 | add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO); |
Yonglong Li | f462a44 | 2021-08-23 18:05:42 -0700 | [diff] [blame] | 312 | } else { |
| 313 | *addr = msk->pm.local; |
Yonglong Li | 119c022 | 2021-08-23 18:05:41 -0700 | [diff] [blame] | 314 | add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL); |
Yonglong Li | f462a44 | 2021-08-23 18:05:42 -0700 | [diff] [blame] | 315 | } |
Yonglong Li | 119c022 | 2021-08-23 18:05:41 -0700 | [diff] [blame] | 316 | WRITE_ONCE(msk->pm.addr_signal, add_addr); |
Peter Krystad | 926bdea | 2020-03-27 14:48:41 -0700 | [diff] [blame] | 317 | ret = true; |
| 318 | |
| 319 | out_unlock: |
| 320 | spin_unlock_bh(&msk->pm.lock); |
| 321 | return ret; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 322 | } |
| 323 | |
Geliang Tang | 5cb104a | 2020-09-24 08:29:48 +0800 | [diff] [blame] | 324 | bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, |
Geliang Tang | 6445e17 | 2021-03-12 17:16:11 -0800 | [diff] [blame] | 325 | struct mptcp_rm_list *rm_list) |
Geliang Tang | 5cb104a | 2020-09-24 08:29:48 +0800 | [diff] [blame] | 326 | { |
Geliang Tang | cbde278 | 2021-03-12 17:16:12 -0800 | [diff] [blame] | 327 | int ret = false, len; |
Yonglong Li | 119c022 | 2021-08-23 18:05:41 -0700 | [diff] [blame] | 328 | u8 rm_addr; |
Geliang Tang | 5cb104a | 2020-09-24 08:29:48 +0800 | [diff] [blame] | 329 | |
| 330 | spin_lock_bh(&msk->pm.lock); |
| 331 | |
| 332 | /* double check after the lock is acquired */ |
| 333 | if (!mptcp_pm_should_rm_signal(msk)) |
| 334 | goto out_unlock; |
| 335 | |
Yonglong Li | 119c022 | 2021-08-23 18:05:41 -0700 | [diff] [blame] | 336 | rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL); |
Geliang Tang | cbde278 | 2021-03-12 17:16:12 -0800 | [diff] [blame] | 337 | len = mptcp_rm_addr_len(&msk->pm.rm_list_tx); |
| 338 | if (len < 0) { |
Yonglong Li | 119c022 | 2021-08-23 18:05:41 -0700 | [diff] [blame] | 339 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
Geliang Tang | cbde278 | 2021-03-12 17:16:12 -0800 | [diff] [blame] | 340 | goto out_unlock; |
| 341 | } |
| 342 | if (remaining < len) |
Geliang Tang | 5cb104a | 2020-09-24 08:29:48 +0800 | [diff] [blame] | 343 | goto out_unlock; |
| 344 | |
Geliang Tang | cbde278 | 2021-03-12 17:16:12 -0800 | [diff] [blame] | 345 | *rm_list = msk->pm.rm_list_tx; |
Yonglong Li | 119c022 | 2021-08-23 18:05:41 -0700 | [diff] [blame] | 346 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
Geliang Tang | 5cb104a | 2020-09-24 08:29:48 +0800 | [diff] [blame] | 347 | ret = true; |
| 348 | |
| 349 | out_unlock: |
| 350 | spin_unlock_bh(&msk->pm.lock); |
| 351 | return ret; |
| 352 | } |
| 353 | |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 354 | int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) |
| 355 | { |
Paolo Abeni | 01cacb0 | 2020-03-27 14:48:51 -0700 | [diff] [blame] | 356 | return mptcp_pm_nl_get_local_id(msk, skc); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 357 | } |
| 358 | |
Paolo Abeni | 71b7dec | 2021-08-13 15:15:42 -0700 | [diff] [blame] | 359 | void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) |
| 360 | { |
| 361 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
| 362 | u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp); |
| 363 | |
| 364 | /* keep track of rtx periods with no progress */ |
| 365 | if (!subflow->stale_count) { |
| 366 | subflow->stale_rcv_tstamp = rcv_tstamp; |
| 367 | subflow->stale_count++; |
| 368 | } else if (subflow->stale_rcv_tstamp == rcv_tstamp) { |
| 369 | if (subflow->stale_count < U8_MAX) |
| 370 | subflow->stale_count++; |
Paolo Abeni | ff5a0b4 | 2021-08-13 15:15:45 -0700 | [diff] [blame] | 371 | mptcp_pm_nl_subflow_chk_stale(msk, ssk); |
Paolo Abeni | 71b7dec | 2021-08-13 15:15:42 -0700 | [diff] [blame] | 372 | } else { |
| 373 | subflow->stale_count = 0; |
Paolo Abeni | ff5a0b4 | 2021-08-13 15:15:45 -0700 | [diff] [blame] | 374 | mptcp_subflow_set_active(subflow); |
Paolo Abeni | 71b7dec | 2021-08-13 15:15:42 -0700 | [diff] [blame] | 375 | } |
| 376 | } |
| 377 | |
Paolo Abeni | b29fcfb | 2022-01-06 16:20:16 -0800 | [diff] [blame] | 378 | void mptcp_pm_data_reset(struct mptcp_sock *msk) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 379 | { |
| 380 | msk->pm.add_addr_signaled = 0; |
| 381 | msk->pm.add_addr_accepted = 0; |
| 382 | msk->pm.local_addr_used = 0; |
| 383 | msk->pm.subflows = 0; |
Geliang Tang | cbde278 | 2021-03-12 17:16:12 -0800 | [diff] [blame] | 384 | msk->pm.rm_list_tx.nr = 0; |
Geliang Tang | b5c55f3 | 2021-03-12 17:16:14 -0800 | [diff] [blame] | 385 | msk->pm.rm_list_rx.nr = 0; |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 386 | WRITE_ONCE(msk->pm.work_pending, false); |
Geliang Tang | 13ad9f0 | 2020-12-09 15:51:27 -0800 | [diff] [blame] | 387 | WRITE_ONCE(msk->pm.addr_signal, 0); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 388 | WRITE_ONCE(msk->pm.accept_addr, false); |
| 389 | WRITE_ONCE(msk->pm.accept_subflow, false); |
Geliang Tang | df377be | 2021-06-22 12:25:20 -0700 | [diff] [blame] | 390 | WRITE_ONCE(msk->pm.remote_deny_join_id0, false); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 391 | msk->pm.status = 0; |
Paolo Abeni | 86e39e0 | 2022-01-06 16:20:22 -0800 | [diff] [blame] | 392 | bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 393 | |
Paolo Abeni | b29fcfb | 2022-01-06 16:20:16 -0800 | [diff] [blame] | 394 | mptcp_pm_nl_data_init(msk); |
| 395 | } |
| 396 | |
| 397 | void mptcp_pm_data_init(struct mptcp_sock *msk) |
| 398 | { |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 399 | spin_lock_init(&msk->pm.lock); |
Geliang Tang | b6c0838 | 2020-09-24 08:29:54 +0800 | [diff] [blame] | 400 | INIT_LIST_HEAD(&msk->pm.anno_list); |
Paolo Abeni | b29fcfb | 2022-01-06 16:20:16 -0800 | [diff] [blame] | 401 | mptcp_pm_data_reset(msk); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 402 | } |
| 403 | |
Paolo Abeni | d39dcec | 2020-06-26 19:29:59 +0200 | [diff] [blame] | 404 | void __init mptcp_pm_init(void) |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 405 | { |
Paolo Abeni | 01cacb0 | 2020-03-27 14:48:51 -0700 | [diff] [blame] | 406 | mptcp_pm_nl_init(); |
Peter Krystad | 1b1c7a0 | 2020-03-27 14:48:38 -0700 | [diff] [blame] | 407 | } |