David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 1 | /* ar-skbuff.c: socket buffer destruction handling |
| 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
Joe Perches | 9b6d539 | 2016-06-02 12:08:52 -0700 | [diff] [blame] | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 13 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 14 | #include <linux/module.h> |
| 15 | #include <linux/net.h> |
| 16 | #include <linux/skbuff.h> |
| 17 | #include <net/sock.h> |
| 18 | #include <net/af_rxrpc.h> |
| 19 | #include "ar-internal.h" |
| 20 | |
| 21 | /* |
| 22 | * set up for the ACK at the end of the receive phase when we discard the final |
| 23 | * receive phase data packet |
| 24 | * - called with softirqs disabled |
| 25 | */ |
| 26 | static void rxrpc_request_final_ACK(struct rxrpc_call *call) |
| 27 | { |
| 28 | /* the call may be aborted before we have a chance to ACK it */ |
| 29 | write_lock(&call->state_lock); |
| 30 | |
| 31 | switch (call->state) { |
| 32 | case RXRPC_CALL_CLIENT_RECV_REPLY: |
| 33 | call->state = RXRPC_CALL_CLIENT_FINAL_ACK; |
| 34 | _debug("request final ACK"); |
| 35 | |
| 36 | /* get an extra ref on the call for the final-ACK generator to |
| 37 | * release */ |
| 38 | rxrpc_get_call(call); |
David Howells | 4c198ad | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 39 | set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 40 | if (try_to_del_timer_sync(&call->ack_timer) >= 0) |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 41 | rxrpc_queue_call(call); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 42 | break; |
| 43 | |
| 44 | case RXRPC_CALL_SERVER_RECV_REQUEST: |
| 45 | call->state = RXRPC_CALL_SERVER_ACK_REQUEST; |
| 46 | default: |
| 47 | break; |
| 48 | } |
| 49 | |
| 50 | write_unlock(&call->state_lock); |
| 51 | } |
| 52 | |
| 53 | /* |
| 54 | * drop the bottom ACK off of the call ACK window and advance the window |
| 55 | */ |
| 56 | static void rxrpc_hard_ACK_data(struct rxrpc_call *call, |
| 57 | struct rxrpc_skb_priv *sp) |
| 58 | { |
| 59 | int loop; |
| 60 | u32 seq; |
| 61 | |
| 62 | spin_lock_bh(&call->lock); |
| 63 | |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 64 | _debug("hard ACK #%u", sp->hdr.seq); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 65 | |
| 66 | for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) { |
| 67 | call->ackr_window[loop] >>= 1; |
| 68 | call->ackr_window[loop] |= |
| 69 | call->ackr_window[loop + 1] << (BITS_PER_LONG - 1); |
| 70 | } |
| 71 | |
David Howells | 0d12f8a | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 72 | seq = sp->hdr.seq; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 73 | ASSERTCMP(seq, ==, call->rx_data_eaten + 1); |
| 74 | call->rx_data_eaten = seq; |
| 75 | |
| 76 | if (call->ackr_win_top < UINT_MAX) |
| 77 | call->ackr_win_top++; |
| 78 | |
| 79 | ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE, |
| 80 | call->rx_data_post, >=, call->rx_data_recv); |
| 81 | ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE, |
| 82 | call->rx_data_recv, >=, call->rx_data_eaten); |
| 83 | |
| 84 | if (sp->hdr.flags & RXRPC_LAST_PACKET) { |
| 85 | rxrpc_request_final_ACK(call); |
| 86 | } else if (atomic_dec_and_test(&call->ackr_not_idle) && |
| 87 | test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) { |
David Howells | 5873c08 | 2014-02-07 18:58:44 +0000 | [diff] [blame] | 88 | /* We previously soft-ACK'd some received packets that have now |
| 89 | * been consumed, so send a hard-ACK if no more packets are |
| 90 | * immediately forthcoming to allow the transmitter to free up |
| 91 | * its Tx bufferage. |
| 92 | */ |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 93 | _debug("send Rx idle ACK"); |
| 94 | __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial, |
David Howells | 9823f39 | 2014-02-07 18:58:45 +0000 | [diff] [blame] | 95 | false); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | spin_unlock_bh(&call->lock); |
| 99 | } |
| 100 | |
David Howells | 372ee16 | 2016-08-03 14:11:40 +0100 | [diff] [blame] | 101 | /** |
| 102 | * rxrpc_kernel_data_consumed - Record consumption of data message |
| 103 | * @call: The call to which the message pertains. |
| 104 | * @skb: Message holding data |
| 105 | * |
| 106 | * Record the consumption of a data message and generate an ACK if appropriate. |
| 107 | * The call state is shifted if this was the final packet. The caller must be |
| 108 | * in process context with no spinlocks held. |
| 109 | * |
| 110 | * TODO: Actually generate the ACK here rather than punting this to the |
| 111 | * workqueue. |
| 112 | */ |
| 113 | void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb) |
| 114 | { |
| 115 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
| 116 | |
| 117 | _enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq); |
| 118 | |
| 119 | ASSERTCMP(sp->call, ==, call); |
| 120 | ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA); |
| 121 | |
| 122 | /* TODO: Fix the sequence number tracking */ |
| 123 | ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv); |
| 124 | ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1); |
| 125 | ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten); |
| 126 | |
| 127 | call->rx_data_recv = sp->hdr.seq; |
| 128 | rxrpc_hard_ACK_data(call, sp); |
| 129 | } |
| 130 | EXPORT_SYMBOL(rxrpc_kernel_data_consumed); |
| 131 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 132 | /* |
David Howells | 372ee16 | 2016-08-03 14:11:40 +0100 | [diff] [blame] | 133 | * Destroy a packet that has an RxRPC control buffer |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 134 | */ |
| 135 | void rxrpc_packet_destructor(struct sk_buff *skb) |
| 136 | { |
| 137 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
| 138 | struct rxrpc_call *call = sp->call; |
| 139 | |
| 140 | _enter("%p{%p}", skb, call); |
| 141 | |
| 142 | if (call) { |
David Howells | 372ee16 | 2016-08-03 14:11:40 +0100 | [diff] [blame] | 143 | if (atomic_dec_return(&call->skb_count) < 0) |
| 144 | BUG(); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 145 | rxrpc_put_call(call); |
| 146 | sp->call = NULL; |
| 147 | } |
| 148 | |
| 149 | if (skb->sk) |
| 150 | sock_rfree(skb); |
| 151 | _leave(""); |
| 152 | } |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 153 | |
| 154 | /** |
| 155 | * rxrpc_kernel_free_skb - Free an RxRPC socket buffer |
| 156 | * @skb: The socket buffer to be freed |
| 157 | * |
| 158 | * Let RxRPC free its own socket buffer, permitting it to maintain debug |
| 159 | * accounting. |
| 160 | */ |
| 161 | void rxrpc_kernel_free_skb(struct sk_buff *skb) |
| 162 | { |
| 163 | rxrpc_free_skb(skb); |
| 164 | } |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 165 | EXPORT_SYMBOL(rxrpc_kernel_free_skb); |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame^] | 166 | |
| 167 | /* |
| 168 | * Note the existence of a new-to-us socket buffer (allocated or dequeued). |
| 169 | */ |
| 170 | void rxrpc_new_skb(struct sk_buff *skb) |
| 171 | { |
| 172 | const void *here = __builtin_return_address(0); |
| 173 | int n = atomic_inc_return(&rxrpc_n_skbs); |
| 174 | trace_rxrpc_skb(skb, 0, atomic_read(&skb->users), n, here); |
| 175 | } |
| 176 | |
| 177 | /* |
| 178 | * Note the re-emergence of a socket buffer from a queue or buffer. |
| 179 | */ |
| 180 | void rxrpc_see_skb(struct sk_buff *skb) |
| 181 | { |
| 182 | const void *here = __builtin_return_address(0); |
| 183 | if (skb) { |
| 184 | int n = atomic_read(&rxrpc_n_skbs); |
| 185 | trace_rxrpc_skb(skb, 1, atomic_read(&skb->users), n, here); |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | /* |
| 190 | * Note the addition of a ref on a socket buffer. |
| 191 | */ |
| 192 | void rxrpc_get_skb(struct sk_buff *skb) |
| 193 | { |
| 194 | const void *here = __builtin_return_address(0); |
| 195 | int n = atomic_inc_return(&rxrpc_n_skbs); |
| 196 | trace_rxrpc_skb(skb, 2, atomic_read(&skb->users), n, here); |
| 197 | skb_get(skb); |
| 198 | } |
| 199 | |
| 200 | /* |
| 201 | * Note the destruction of a socket buffer. |
| 202 | */ |
| 203 | void rxrpc_free_skb(struct sk_buff *skb) |
| 204 | { |
| 205 | const void *here = __builtin_return_address(0); |
| 206 | if (skb) { |
| 207 | int n; |
| 208 | CHECK_SLAB_OKAY(&skb->users); |
| 209 | n = atomic_dec_return(&rxrpc_n_skbs); |
| 210 | trace_rxrpc_skb(skb, 3, atomic_read(&skb->users), n, here); |
| 211 | kfree_skb(skb); |
| 212 | } |
| 213 | } |
| 214 | |
| 215 | /* |
| 216 | * Clear a queue of socket buffers. |
| 217 | */ |
| 218 | void rxrpc_purge_queue(struct sk_buff_head *list) |
| 219 | { |
| 220 | const void *here = __builtin_return_address(0); |
| 221 | struct sk_buff *skb; |
| 222 | while ((skb = skb_dequeue((list))) != NULL) { |
| 223 | int n = atomic_dec_return(&rxrpc_n_skbs); |
| 224 | trace_rxrpc_skb(skb, 4, atomic_read(&skb->users), n, here); |
| 225 | kfree_skb(skb); |
| 226 | } |
| 227 | } |