blob: 8ee8b2d4a3ebd36aab92e25703d4edc0576840c1 [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* RxRPC packet transmission
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
David Howells17926a72007-04-26 15:48:28 -070014#include <linux/net.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/gfp.h>
David Howells17926a72007-04-26 15:48:28 -070016#include <linux/skbuff.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040017#include <linux/export.h>
David Howells17926a72007-04-26 15:48:28 -070018#include <net/sock.h>
19#include <net/af_rxrpc.h>
20#include "ar-internal.h"
21
David Howells26cb02a2016-10-06 08:11:49 +010022struct rxrpc_ack_buffer {
David Howells8d94aa32016-09-07 09:19:31 +010023 struct rxrpc_wire_header whdr;
David Howells26cb02a2016-10-06 08:11:49 +010024 struct rxrpc_ackpacket ack;
25 u8 acks[255];
26 u8 pad[3];
David Howells8d94aa32016-09-07 09:19:31 +010027 struct rxrpc_ackinfo ackinfo;
28};
29
David Howells26cb02a2016-10-06 08:11:49 +010030struct rxrpc_abort_buffer {
31 struct rxrpc_wire_header whdr;
32 __be32 abort_code;
33};
34
David Howells8d94aa32016-09-07 09:19:31 +010035/*
36 * Fill out an ACK packet.
37 */
David Howells1457cc42017-11-02 15:06:55 +000038static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
39 struct rxrpc_call *call,
David Howells26cb02a2016-10-06 08:11:49 +010040 struct rxrpc_ack_buffer *pkt,
David Howells805b21b2016-09-24 18:05:26 +010041 rxrpc_seq_t *_hard_ack,
David Howellsa5af7e12016-10-06 08:11:49 +010042 rxrpc_seq_t *_top,
43 u8 reason)
David Howells8d94aa32016-09-07 09:19:31 +010044{
David Howellsf3639df2016-09-17 10:49:13 +010045 rxrpc_serial_t serial;
David Howells248f2192016-09-08 11:10:12 +010046 rxrpc_seq_t hard_ack, top, seq;
47 int ix;
David Howells8d94aa32016-09-07 09:19:31 +010048 u32 mtu, jmax;
49 u8 *ackp = pkt->acks;
50
David Howells248f2192016-09-08 11:10:12 +010051 /* Barrier against rxrpc_input_data(). */
David Howellsf3639df2016-09-17 10:49:13 +010052 serial = call->ackr_serial;
David Howells248f2192016-09-08 11:10:12 +010053 hard_ack = READ_ONCE(call->rx_hard_ack);
54 top = smp_load_acquire(&call->rx_top);
David Howells805b21b2016-09-24 18:05:26 +010055 *_hard_ack = hard_ack;
56 *_top = top;
David Howells248f2192016-09-08 11:10:12 +010057
David Howells8d94aa32016-09-07 09:19:31 +010058 pkt->ack.bufferSpace = htons(8);
David Howells248f2192016-09-08 11:10:12 +010059 pkt->ack.maxSkew = htons(call->ackr_skew);
60 pkt->ack.firstPacket = htonl(hard_ack + 1);
David Howells8d94aa32016-09-07 09:19:31 +010061 pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
David Howellsf3639df2016-09-17 10:49:13 +010062 pkt->ack.serial = htonl(serial);
David Howellsa5af7e12016-10-06 08:11:49 +010063 pkt->ack.reason = reason;
David Howells248f2192016-09-08 11:10:12 +010064 pkt->ack.nAcks = top - hard_ack;
David Howells8d94aa32016-09-07 09:19:31 +010065
David Howellsa5af7e12016-10-06 08:11:49 +010066 if (reason == RXRPC_ACK_PING)
David Howells8e831342016-09-22 00:29:31 +010067 pkt->whdr.flags |= RXRPC_REQUEST_ACK;
68
David Howells248f2192016-09-08 11:10:12 +010069 if (after(top, hard_ack)) {
70 seq = hard_ack + 1;
71 do {
72 ix = seq & RXRPC_RXTX_BUFF_MASK;
73 if (call->rxtx_buffer[ix])
74 *ackp++ = RXRPC_ACK_TYPE_ACK;
75 else
76 *ackp++ = RXRPC_ACK_TYPE_NACK;
77 seq++;
78 } while (before_eq(seq, top));
79 }
80
David Howells1457cc42017-11-02 15:06:55 +000081 mtu = conn->params.peer->if_mtu;
82 mtu -= conn->params.peer->hdrsize;
David Howells75e42122016-09-13 22:36:22 +010083 jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
David Howells8d94aa32016-09-07 09:19:31 +010084 pkt->ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
85 pkt->ackinfo.maxMTU = htonl(mtu);
David Howells75e42122016-09-13 22:36:22 +010086 pkt->ackinfo.rwind = htonl(call->rx_winsize);
David Howells8d94aa32016-09-07 09:19:31 +010087 pkt->ackinfo.jumbo_max = htonl(jmax);
88
89 *ackp++ = 0;
90 *ackp++ = 0;
91 *ackp++ = 0;
David Howells248f2192016-09-08 11:10:12 +010092 return top - hard_ack + 3;
David Howells8d94aa32016-09-07 09:19:31 +010093}
94
95/*
David Howells26cb02a2016-10-06 08:11:49 +010096 * Send an ACK call packet.
David Howells8d94aa32016-09-07 09:19:31 +010097 */
David Howellsa5af7e12016-10-06 08:11:49 +010098int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
David Howells8d94aa32016-09-07 09:19:31 +010099{
100 struct rxrpc_connection *conn = NULL;
David Howells26cb02a2016-10-06 08:11:49 +0100101 struct rxrpc_ack_buffer *pkt;
David Howells8d94aa32016-09-07 09:19:31 +0100102 struct msghdr msg;
103 struct kvec iov[2];
104 rxrpc_serial_t serial;
David Howells805b21b2016-09-24 18:05:26 +0100105 rxrpc_seq_t hard_ack, top;
David Howells8d94aa32016-09-07 09:19:31 +0100106 size_t len, n;
David Howells26cb02a2016-10-06 08:11:49 +0100107 int ret;
David Howellsa5af7e12016-10-06 08:11:49 +0100108 u8 reason;
David Howells8d94aa32016-09-07 09:19:31 +0100109
110 spin_lock_bh(&call->lock);
111 if (call->conn)
112 conn = rxrpc_get_connection_maybe(call->conn);
113 spin_unlock_bh(&call->lock);
114 if (!conn)
115 return -ECONNRESET;
116
117 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
118 if (!pkt) {
119 rxrpc_put_connection(conn);
120 return -ENOMEM;
121 }
122
David Howells8d94aa32016-09-07 09:19:31 +0100123 msg.msg_name = &call->peer->srx.transport;
124 msg.msg_namelen = call->peer->srx.transport_len;
125 msg.msg_control = NULL;
126 msg.msg_controllen = 0;
127 msg.msg_flags = 0;
128
129 pkt->whdr.epoch = htonl(conn->proto.epoch);
130 pkt->whdr.cid = htonl(call->cid);
131 pkt->whdr.callNumber = htonl(call->call_id);
132 pkt->whdr.seq = 0;
David Howells26cb02a2016-10-06 08:11:49 +0100133 pkt->whdr.type = RXRPC_PACKET_TYPE_ACK;
134 pkt->whdr.flags = RXRPC_SLOW_START_OK | conn->out_clientflag;
David Howells8d94aa32016-09-07 09:19:31 +0100135 pkt->whdr.userStatus = 0;
136 pkt->whdr.securityIndex = call->security_ix;
137 pkt->whdr._rsvd = 0;
138 pkt->whdr.serviceId = htons(call->service_id);
139
David Howells26cb02a2016-10-06 08:11:49 +0100140 spin_lock_bh(&call->lock);
David Howellsa5af7e12016-10-06 08:11:49 +0100141 if (ping) {
142 reason = RXRPC_ACK_PING;
143 } else {
144 reason = call->ackr_reason;
145 if (!call->ackr_reason) {
146 spin_unlock_bh(&call->lock);
147 ret = 0;
148 goto out;
149 }
150 call->ackr_reason = 0;
David Howells8d94aa32016-09-07 09:19:31 +0100151 }
David Howells1457cc42017-11-02 15:06:55 +0000152 n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason);
David Howells26cb02a2016-10-06 08:11:49 +0100153
154 spin_unlock_bh(&call->lock);
155
156 iov[0].iov_base = pkt;
157 iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n;
158 iov[1].iov_base = &pkt->ackinfo;
159 iov[1].iov_len = sizeof(pkt->ackinfo);
160 len = iov[0].iov_len + iov[1].iov_len;
David Howells8d94aa32016-09-07 09:19:31 +0100161
David Howellsb86e2182016-09-23 15:08:48 +0100162 serial = atomic_inc_return(&conn->serial);
163 pkt->whdr.serial = htonl(serial);
David Howells26cb02a2016-10-06 08:11:49 +0100164 trace_rxrpc_tx_ack(call, serial,
165 ntohl(pkt->ack.firstPacket),
166 ntohl(pkt->ack.serial),
167 pkt->ack.reason, pkt->ack.nAcks);
David Howellsb86e2182016-09-23 15:08:48 +0100168
David Howells8e831342016-09-22 00:29:31 +0100169 if (ping) {
David Howellsa5af7e12016-10-06 08:11:49 +0100170 call->ping_serial = serial;
David Howells8e831342016-09-22 00:29:31 +0100171 smp_wmb();
172 /* We need to stick a time in before we send the packet in case
173 * the reply gets back before kernel_sendmsg() completes - but
174 * asking UDP to send the packet can take a relatively long
175 * time, so we update the time after, on the assumption that
176 * the packet transmission is more likely to happen towards the
177 * end of the kernel_sendmsg() call.
178 */
David Howellsa5af7e12016-10-06 08:11:49 +0100179 call->ping_time = ktime_get_real();
David Howells8e831342016-09-22 00:29:31 +0100180 set_bit(RXRPC_CALL_PINGING, &call->flags);
181 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
182 }
David Howells26cb02a2016-10-06 08:11:49 +0100183
184 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
David Howells8e831342016-09-22 00:29:31 +0100185 if (ping)
David Howellsa5af7e12016-10-06 08:11:49 +0100186 call->ping_time = ktime_get_real();
David Howells8d94aa32016-09-07 09:19:31 +0100187
David Howells26cb02a2016-10-06 08:11:49 +0100188 if (call->state < RXRPC_CALL_COMPLETE) {
David Howells805b21b2016-09-24 18:05:26 +0100189 if (ret < 0) {
David Howellsa5af7e12016-10-06 08:11:49 +0100190 if (ping)
191 clear_bit(RXRPC_CALL_PINGING, &call->flags);
David Howells248f2192016-09-08 11:10:12 +0100192 rxrpc_propose_ACK(call, pkt->ack.reason,
193 ntohs(pkt->ack.maxSkew),
194 ntohl(pkt->ack.serial),
David Howells9c7ad432016-09-23 13:50:40 +0100195 true, true,
196 rxrpc_propose_ack_retry_tx);
David Howells805b21b2016-09-24 18:05:26 +0100197 } else {
198 spin_lock_bh(&call->lock);
199 if (after(hard_ack, call->ackr_consumed))
200 call->ackr_consumed = hard_ack;
201 if (after(top, call->ackr_seen))
202 call->ackr_seen = top;
203 spin_unlock_bh(&call->lock);
David Howells248f2192016-09-08 11:10:12 +0100204 }
205 }
206
David Howells8d94aa32016-09-07 09:19:31 +0100207out:
208 rxrpc_put_connection(conn);
209 kfree(pkt);
210 return ret;
211}
212
David Howells5873c082014-02-07 18:58:44 +0000213/*
David Howells26cb02a2016-10-06 08:11:49 +0100214 * Send an ABORT call packet.
215 */
216int rxrpc_send_abort_packet(struct rxrpc_call *call)
217{
218 struct rxrpc_connection *conn = NULL;
219 struct rxrpc_abort_buffer pkt;
220 struct msghdr msg;
221 struct kvec iov[1];
222 rxrpc_serial_t serial;
223 int ret;
224
225 spin_lock_bh(&call->lock);
226 if (call->conn)
227 conn = rxrpc_get_connection_maybe(call->conn);
228 spin_unlock_bh(&call->lock);
229 if (!conn)
230 return -ECONNRESET;
231
232 msg.msg_name = &call->peer->srx.transport;
233 msg.msg_namelen = call->peer->srx.transport_len;
234 msg.msg_control = NULL;
235 msg.msg_controllen = 0;
236 msg.msg_flags = 0;
237
238 pkt.whdr.epoch = htonl(conn->proto.epoch);
239 pkt.whdr.cid = htonl(call->cid);
240 pkt.whdr.callNumber = htonl(call->call_id);
241 pkt.whdr.seq = 0;
242 pkt.whdr.type = RXRPC_PACKET_TYPE_ABORT;
243 pkt.whdr.flags = conn->out_clientflag;
244 pkt.whdr.userStatus = 0;
245 pkt.whdr.securityIndex = call->security_ix;
246 pkt.whdr._rsvd = 0;
247 pkt.whdr.serviceId = htons(call->service_id);
248 pkt.abort_code = htonl(call->abort_code);
249
250 iov[0].iov_base = &pkt;
251 iov[0].iov_len = sizeof(pkt);
252
253 serial = atomic_inc_return(&conn->serial);
254 pkt.whdr.serial = htonl(serial);
255
256 ret = kernel_sendmsg(conn->params.local->socket,
257 &msg, iov, 1, sizeof(pkt));
258
259 rxrpc_put_connection(conn);
260 return ret;
261}
262
263/*
David Howells17926a72007-04-26 15:48:28 -0700264 * send a packet through the transport endpoint
265 */
David Howellsa1767072016-09-29 22:37:15 +0100266int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
267 bool retrans)
David Howells17926a72007-04-26 15:48:28 -0700268{
David Howells5a924b82016-09-22 00:29:31 +0100269 struct rxrpc_connection *conn = call->conn;
270 struct rxrpc_wire_header whdr;
271 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howells17926a72007-04-26 15:48:28 -0700272 struct msghdr msg;
David Howells5a924b82016-09-22 00:29:31 +0100273 struct kvec iov[2];
274 rxrpc_serial_t serial;
275 size_t len;
David Howellsa1767072016-09-29 22:37:15 +0100276 bool lost = false;
David Howells17926a72007-04-26 15:48:28 -0700277 int ret, opt;
278
279 _enter(",{%d}", skb->len);
280
David Howells5a924b82016-09-22 00:29:31 +0100281 /* Each transmission of a Tx packet needs a new serial number */
282 serial = atomic_inc_return(&conn->serial);
David Howells17926a72007-04-26 15:48:28 -0700283
David Howells5a924b82016-09-22 00:29:31 +0100284 whdr.epoch = htonl(conn->proto.epoch);
285 whdr.cid = htonl(call->cid);
286 whdr.callNumber = htonl(call->call_id);
287 whdr.seq = htonl(sp->hdr.seq);
288 whdr.serial = htonl(serial);
289 whdr.type = RXRPC_PACKET_TYPE_DATA;
290 whdr.flags = sp->hdr.flags;
291 whdr.userStatus = 0;
292 whdr.securityIndex = call->security_ix;
293 whdr._rsvd = htons(sp->hdr._rsvd);
294 whdr.serviceId = htons(call->service_id);
295
David Howells4e255722017-06-05 14:30:49 +0100296 if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
297 sp->hdr.seq == 1)
298 whdr.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
299
David Howells5a924b82016-09-22 00:29:31 +0100300 iov[0].iov_base = &whdr;
301 iov[0].iov_len = sizeof(whdr);
302 iov[1].iov_base = skb->head;
303 iov[1].iov_len = skb->len;
304 len = iov[0].iov_len + iov[1].iov_len;
305
306 msg.msg_name = &call->peer->srx.transport;
307 msg.msg_namelen = call->peer->srx.transport_len;
David Howells17926a72007-04-26 15:48:28 -0700308 msg.msg_control = NULL;
309 msg.msg_controllen = 0;
310 msg.msg_flags = 0;
311
David Howells57494342016-09-24 18:05:27 +0100312 /* If our RTT cache needs working on, request an ACK. Also request
313 * ACKs if a DATA packet appears to have been lost.
314 */
David Howellsbf7d6202016-10-06 08:11:51 +0100315 if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
316 (retrans ||
317 call->cong_mode == RXRPC_CALL_SLOW_START ||
318 (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
319 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
320 ktime_get_real())))
David Howells0d4b1032016-09-22 00:29:31 +0100321 whdr.flags |= RXRPC_REQUEST_ACK;
322
David Howells8a681c362016-09-17 10:49:15 +0100323 if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
324 static int lose;
325 if ((lose++ & 7) == 7) {
David Howellsa1767072016-09-29 22:37:15 +0100326 ret = 0;
327 lost = true;
328 goto done;
David Howells8a681c362016-09-17 10:49:15 +0100329 }
330 }
331
David Howells5a924b82016-09-22 00:29:31 +0100332 _proto("Tx DATA %%%u { #%u }", serial, sp->hdr.seq);
333
David Howells17926a72007-04-26 15:48:28 -0700334 /* send the packet with the don't fragment bit set if we currently
335 * think it's small enough */
David Howells5a924b82016-09-22 00:29:31 +0100336 if (iov[1].iov_len >= call->peer->maxdata)
337 goto send_fragmentable;
David Howells17926a72007-04-26 15:48:28 -0700338
David Howells5a924b82016-09-22 00:29:31 +0100339 down_read(&conn->params.local->defrag_sem);
340 /* send the packet by UDP
341 * - returns -EMSGSIZE if UDP would have to fragment the packet
342 * to go out of the interface
343 * - in which case, we'll have processed the ICMP error
344 * message and update the peer record
345 */
346 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
David Howells17926a72007-04-26 15:48:28 -0700347
David Howells5a924b82016-09-22 00:29:31 +0100348 up_read(&conn->params.local->defrag_sem);
349 if (ret == -EMSGSIZE)
350 goto send_fragmentable;
351
352done:
David Howellsa1767072016-09-29 22:37:15 +0100353 trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
354 retrans, lost);
David Howells50235c42016-09-22 00:29:31 +0100355 if (ret >= 0) {
David Howells0d4b1032016-09-22 00:29:31 +0100356 ktime_t now = ktime_get_real();
357 skb->tstamp = now;
David Howells50235c42016-09-22 00:29:31 +0100358 smp_wmb();
David Howells5a924b82016-09-22 00:29:31 +0100359 sp->hdr.serial = serial;
David Howells0d4b1032016-09-22 00:29:31 +0100360 if (whdr.flags & RXRPC_REQUEST_ACK) {
361 call->peer->rtt_last_req = now;
David Howells50235c42016-09-22 00:29:31 +0100362 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
David Howells0d4b1032016-09-22 00:29:31 +0100363 }
David Howells17926a72007-04-26 15:48:28 -0700364 }
David Howells5a924b82016-09-22 00:29:31 +0100365 _leave(" = %d [%u]", ret, call->peer->maxdata);
366 return ret;
David Howells17926a72007-04-26 15:48:28 -0700367
368send_fragmentable:
369 /* attempt to send this message with fragmentation enabled */
370 _debug("send fragment");
371
David Howells985a5c82016-06-17 11:53:37 +0100372 down_write(&conn->params.local->defrag_sem);
David Howells17926a72007-04-26 15:48:28 -0700373
David Howells985a5c82016-06-17 11:53:37 +0100374 switch (conn->params.local->srx.transport.family) {
375 case AF_INET:
376 opt = IP_PMTUDISC_DONT;
377 ret = kernel_setsockopt(conn->params.local->socket,
378 SOL_IP, IP_MTU_DISCOVER,
379 (char *)&opt, sizeof(opt));
380 if (ret == 0) {
David Howells5a924b82016-09-22 00:29:31 +0100381 ret = kernel_sendmsg(conn->params.local->socket, &msg,
382 iov, 2, len);
David Howells985a5c82016-06-17 11:53:37 +0100383
384 opt = IP_PMTUDISC_DO;
385 kernel_setsockopt(conn->params.local->socket, SOL_IP,
386 IP_MTU_DISCOVER,
387 (char *)&opt, sizeof(opt));
388 }
389 break;
David Howells75b54cb2016-09-13 08:49:05 +0100390
David Howellsd1912742016-09-17 07:26:01 +0100391#ifdef CONFIG_AF_RXRPC_IPV6
David Howells75b54cb2016-09-13 08:49:05 +0100392 case AF_INET6:
393 opt = IPV6_PMTUDISC_DONT;
394 ret = kernel_setsockopt(conn->params.local->socket,
395 SOL_IPV6, IPV6_MTU_DISCOVER,
396 (char *)&opt, sizeof(opt));
397 if (ret == 0) {
398 ret = kernel_sendmsg(conn->params.local->socket, &msg,
399 iov, 1, iov[0].iov_len);
400
401 opt = IPV6_PMTUDISC_DO;
402 kernel_setsockopt(conn->params.local->socket,
403 SOL_IPV6, IPV6_MTU_DISCOVER,
404 (char *)&opt, sizeof(opt));
405 }
406 break;
David Howellsd1912742016-09-17 07:26:01 +0100407#endif
David Howells17926a72007-04-26 15:48:28 -0700408 }
409
David Howells985a5c82016-06-17 11:53:37 +0100410 up_write(&conn->params.local->defrag_sem);
David Howells5a924b82016-09-22 00:29:31 +0100411 goto done;
David Howells17926a72007-04-26 15:48:28 -0700412}
David Howells248f2192016-09-08 11:10:12 +0100413
414/*
415 * reject packets through the local endpoint
416 */
417void rxrpc_reject_packets(struct rxrpc_local *local)
418{
David Howells1c2bc7b2016-09-13 08:49:05 +0100419 struct sockaddr_rxrpc srx;
David Howells248f2192016-09-08 11:10:12 +0100420 struct rxrpc_skb_priv *sp;
421 struct rxrpc_wire_header whdr;
422 struct sk_buff *skb;
423 struct msghdr msg;
424 struct kvec iov[2];
425 size_t size;
426 __be32 code;
427
428 _enter("%d", local->debug_id);
429
430 iov[0].iov_base = &whdr;
431 iov[0].iov_len = sizeof(whdr);
432 iov[1].iov_base = &code;
433 iov[1].iov_len = sizeof(code);
434 size = sizeof(whdr) + sizeof(code);
435
David Howells1c2bc7b2016-09-13 08:49:05 +0100436 msg.msg_name = &srx.transport;
David Howells248f2192016-09-08 11:10:12 +0100437 msg.msg_control = NULL;
438 msg.msg_controllen = 0;
439 msg.msg_flags = 0;
440
David Howells248f2192016-09-08 11:10:12 +0100441 memset(&whdr, 0, sizeof(whdr));
442 whdr.type = RXRPC_PACKET_TYPE_ABORT;
443
444 while ((skb = skb_dequeue(&local->reject_queue))) {
David Howells71f3ca42016-09-17 10:49:14 +0100445 rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
David Howells248f2192016-09-08 11:10:12 +0100446 sp = rxrpc_skb(skb);
David Howells1c2bc7b2016-09-13 08:49:05 +0100447
David Howells7b674e32017-08-29 10:18:37 +0100448 if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) {
David Howells1c2bc7b2016-09-13 08:49:05 +0100449 msg.msg_namelen = srx.transport_len;
450
David Howells248f2192016-09-08 11:10:12 +0100451 code = htonl(skb->priority);
452
453 whdr.epoch = htonl(sp->hdr.epoch);
454 whdr.cid = htonl(sp->hdr.cid);
455 whdr.callNumber = htonl(sp->hdr.callNumber);
456 whdr.serviceId = htons(sp->hdr.serviceId);
457 whdr.flags = sp->hdr.flags;
458 whdr.flags ^= RXRPC_CLIENT_INITIATED;
459 whdr.flags &= RXRPC_CLIENT_INITIATED;
460
461 kernel_sendmsg(local->socket, &msg, iov, 2, size);
David Howells248f2192016-09-08 11:10:12 +0100462 }
463
David Howells71f3ca42016-09-17 10:49:14 +0100464 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
David Howells248f2192016-09-08 11:10:12 +0100465 }
466
467 _leave("");
468}