blob: 8946c89d739231efb659b3d50ddc1e3b14cc6b60 [file] [log] [blame]
Andy Grover1e23b3e2009-02-24 15:30:34 +00001/*
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -07002 * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
Andy Grover1e23b3e2009-02-24 15:30:34 +00003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover1e23b3e2009-02-24 15:30:34 +000035#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <rdma/rdma_cm.h>
38
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070039#include "rds_single_path.h"
Andy Grover1e23b3e2009-02-24 15:30:34 +000040#include "rds.h"
41#include "ib.h"
42
43static struct kmem_cache *rds_ib_incoming_slab;
44static struct kmem_cache *rds_ib_frag_slab;
45static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
46
Andy Grover1e23b3e2009-02-24 15:30:34 +000047void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
48{
49 struct rds_ib_recv_work *recv;
50 u32 i;
51
52 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
53 struct ib_sge *sge;
54
55 recv->r_ibinc = NULL;
56 recv->r_frag = NULL;
57
58 recv->r_wr.next = NULL;
59 recv->r_wr.wr_id = i;
60 recv->r_wr.sg_list = recv->r_sge;
61 recv->r_wr.num_sge = RDS_IB_RECV_SGE;
62
Andy Grover919ced42010-01-13 16:32:24 -080063 sge = &recv->r_sge[0];
Andy Grover1e23b3e2009-02-24 15:30:34 +000064 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
65 sge->length = sizeof(struct rds_header);
Jason Gunthorpee5580242015-07-30 17:22:26 -060066 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover919ced42010-01-13 16:32:24 -080067
68 sge = &recv->r_sge[1];
69 sge->addr = 0;
70 sge->length = RDS_FRAG_SIZE;
Jason Gunthorpee5580242015-07-30 17:22:26 -060071 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover1e23b3e2009-02-24 15:30:34 +000072 }
73}
74
Chris Mason33244122010-05-26 22:05:37 -070075/*
76 * The entire 'from' list, including the from element itself, is put on
77 * to the tail of the 'to' list.
78 */
79static void list_splice_entire_tail(struct list_head *from,
80 struct list_head *to)
81{
82 struct list_head *from_last = from->prev;
83
84 list_splice_tail(from_last, to);
85 list_add_tail(from_last, to);
86}
87
88static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
89{
90 struct list_head *tmp;
91
92 tmp = xchg(&cache->xfer, NULL);
93 if (tmp) {
94 if (cache->ready)
95 list_splice_entire_tail(tmp, cache->ready);
96 else
97 cache->ready = tmp;
98 }
99}
100
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700101static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp)
Chris Mason33244122010-05-26 22:05:37 -0700102{
103 struct rds_ib_cache_head *head;
104 int cpu;
105
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700106 cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp);
Chris Mason33244122010-05-26 22:05:37 -0700107 if (!cache->percpu)
108 return -ENOMEM;
109
110 for_each_possible_cpu(cpu) {
111 head = per_cpu_ptr(cache->percpu, cpu);
112 head->first = NULL;
113 head->count = 0;
114 }
115 cache->xfer = NULL;
116 cache->ready = NULL;
117
118 return 0;
119}
120
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700121int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp)
Chris Mason33244122010-05-26 22:05:37 -0700122{
123 int ret;
124
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700125 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp);
Chris Mason33244122010-05-26 22:05:37 -0700126 if (!ret) {
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700127 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp);
Chris Mason33244122010-05-26 22:05:37 -0700128 if (ret)
129 free_percpu(ic->i_cache_incs.percpu);
130 }
131
132 return ret;
133}
134
135static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
136 struct list_head *caller_list)
137{
138 struct rds_ib_cache_head *head;
139 int cpu;
140
141 for_each_possible_cpu(cpu) {
142 head = per_cpu_ptr(cache->percpu, cpu);
143 if (head->first) {
144 list_splice_entire_tail(head->first, caller_list);
145 head->first = NULL;
146 }
147 }
148
149 if (cache->ready) {
150 list_splice_entire_tail(cache->ready, caller_list);
151 cache->ready = NULL;
152 }
153}
154
155void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
156{
157 struct rds_ib_incoming *inc;
158 struct rds_ib_incoming *inc_tmp;
159 struct rds_page_frag *frag;
160 struct rds_page_frag *frag_tmp;
161 LIST_HEAD(list);
162
163 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
164 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
165 free_percpu(ic->i_cache_incs.percpu);
166
167 list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
168 list_del(&inc->ii_cache_entry);
169 WARN_ON(!list_empty(&inc->ii_frags));
170 kmem_cache_free(rds_ib_incoming_slab, inc);
171 }
172
173 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
174 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
175 free_percpu(ic->i_cache_frags.percpu);
176
177 list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
178 list_del(&frag->f_cache_entry);
179 WARN_ON(!list_empty(&frag->f_item));
180 kmem_cache_free(rds_ib_frag_slab, frag);
181 }
182}
183
184/* fwd decl */
185static void rds_ib_recv_cache_put(struct list_head *new_item,
186 struct rds_ib_refill_cache *cache);
187static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
188
189
190/* Recycle frag and attached recv buffer f_sg */
191static void rds_ib_frag_free(struct rds_ib_connection *ic,
192 struct rds_page_frag *frag)
193{
194 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
195
196 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700197 atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
198 rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
Chris Mason33244122010-05-26 22:05:37 -0700199}
200
201/* Recycle inc after freeing attached frags */
202void rds_ib_inc_free(struct rds_incoming *inc)
203{
204 struct rds_ib_incoming *ibinc;
205 struct rds_page_frag *frag;
206 struct rds_page_frag *pos;
207 struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
208
209 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
210
211 /* Free attached frags */
212 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
213 list_del_init(&frag->f_item);
214 rds_ib_frag_free(ic, frag);
215 }
216 BUG_ON(!list_empty(&ibinc->ii_frags));
217
218 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
219 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
220}
221
Andy Grover1e23b3e2009-02-24 15:30:34 +0000222static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
223 struct rds_ib_recv_work *recv)
224{
225 if (recv->r_ibinc) {
226 rds_inc_put(&recv->r_ibinc->ii_inc);
227 recv->r_ibinc = NULL;
228 }
229 if (recv->r_frag) {
Andy Groverfc24f782010-05-25 11:20:09 -0700230 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
Chris Mason33244122010-05-26 22:05:37 -0700231 rds_ib_frag_free(ic, recv->r_frag);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000232 recv->r_frag = NULL;
233 }
234}
235
236void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
237{
238 u32 i;
239
240 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
241 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000242}
243
Chris Mason037f18a32010-05-26 21:45:06 -0700244static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
245 gfp_t slab_mask)
Chris Mason33244122010-05-26 22:05:37 -0700246{
247 struct rds_ib_incoming *ibinc;
248 struct list_head *cache_item;
249 int avail_allocs;
250
251 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
252 if (cache_item) {
253 ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
254 } else {
255 avail_allocs = atomic_add_unless(&rds_ib_allocation,
256 1, rds_ib_sysctl_max_recv_allocation);
257 if (!avail_allocs) {
258 rds_ib_stats_inc(s_ib_rx_alloc_limit);
259 return NULL;
260 }
Chris Mason037f18a32010-05-26 21:45:06 -0700261 ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700262 if (!ibinc) {
263 atomic_dec(&rds_ib_allocation);
264 return NULL;
265 }
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700266 rds_ib_stats_inc(s_ib_rx_total_incs);
Chris Mason33244122010-05-26 22:05:37 -0700267 }
268 INIT_LIST_HEAD(&ibinc->ii_frags);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700269 rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr);
Chris Mason33244122010-05-26 22:05:37 -0700270
271 return ibinc;
272}
273
Chris Mason037f18a32010-05-26 21:45:06 -0700274static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
275 gfp_t slab_mask, gfp_t page_mask)
Chris Mason33244122010-05-26 22:05:37 -0700276{
277 struct rds_page_frag *frag;
278 struct list_head *cache_item;
279 int ret;
280
281 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
282 if (cache_item) {
283 frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700284 atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
285 rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
Chris Mason33244122010-05-26 22:05:37 -0700286 } else {
Chris Mason037f18a32010-05-26 21:45:06 -0700287 frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700288 if (!frag)
289 return NULL;
290
Chris Masonb4e1da32010-07-19 17:02:41 -0700291 sg_init_table(&frag->f_sg, 1);
Chris Mason33244122010-05-26 22:05:37 -0700292 ret = rds_page_remainder_alloc(&frag->f_sg,
Chris Mason037f18a32010-05-26 21:45:06 -0700293 RDS_FRAG_SIZE, page_mask);
Chris Mason33244122010-05-26 22:05:37 -0700294 if (ret) {
295 kmem_cache_free(rds_ib_frag_slab, frag);
296 return NULL;
297 }
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700298 rds_ib_stats_inc(s_ib_rx_total_frags);
Chris Mason33244122010-05-26 22:05:37 -0700299 }
300
301 INIT_LIST_HEAD(&frag->f_item);
302
303 return frag;
304}
305
Andy Grover1e23b3e2009-02-24 15:30:34 +0000306static int rds_ib_recv_refill_one(struct rds_connection *conn,
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700307 struct rds_ib_recv_work *recv, gfp_t gfp)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000308{
309 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000310 struct ib_sge *sge;
311 int ret = -ENOMEM;
Chris Mason037f18a32010-05-26 21:45:06 -0700312 gfp_t slab_mask = GFP_NOWAIT;
313 gfp_t page_mask = GFP_NOWAIT;
314
Mel Gormand0164ad2015-11-06 16:28:21 -0800315 if (gfp & __GFP_DIRECT_RECLAIM) {
Chris Mason037f18a32010-05-26 21:45:06 -0700316 slab_mask = GFP_KERNEL;
317 page_mask = GFP_HIGHUSER;
318 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000319
Chris Mason33244122010-05-26 22:05:37 -0700320 if (!ic->i_cache_incs.ready)
321 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
322 if (!ic->i_cache_frags.ready)
323 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
324
Andy Grover3427e852010-05-24 20:28:49 -0700325 /*
326 * ibinc was taken from recv if recv contained the start of a message.
327 * recvs that were continuations will still have this allocated.
328 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800329 if (!recv->r_ibinc) {
Chris Mason037f18a32010-05-26 21:45:06 -0700330 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700331 if (!recv->r_ibinc)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000332 goto out;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000333 }
334
Andy Grover3427e852010-05-24 20:28:49 -0700335 WARN_ON(recv->r_frag); /* leak! */
Chris Mason037f18a32010-05-26 21:45:06 -0700336 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
Andy Grover3427e852010-05-24 20:28:49 -0700337 if (!recv->r_frag)
338 goto out;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000339
Andy Grover0b088e02010-05-24 20:12:41 -0700340 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
341 1, DMA_FROM_DEVICE);
342 WARN_ON(ret != 1);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000343
Andy Grover919ced42010-01-13 16:32:24 -0800344 sge = &recv->r_sge[0];
Andy Grover1e23b3e2009-02-24 15:30:34 +0000345 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
346 sge->length = sizeof(struct rds_header);
347
Andy Grover919ced42010-01-13 16:32:24 -0800348 sge = &recv->r_sge[1];
Bart Van Asschea163afc2019-01-31 08:30:34 -0800349 sge->addr = sg_dma_address(&recv->r_frag->f_sg);
350 sge->length = sg_dma_len(&recv->r_frag->f_sg);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000351
352 ret = 0;
353out:
354 return ret;
355}
356
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700357static int acquire_refill(struct rds_connection *conn)
358{
359 return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0;
360}
361
362static void release_refill(struct rds_connection *conn)
363{
364 clear_bit(RDS_RECV_REFILL, &conn->c_flags);
365
366 /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
367 * hot path and finding waiters is very rare. We don't want to walk
368 * the system-wide hashed waitqueue buckets in the fast path only to
369 * almost never find waiters.
370 */
371 if (waitqueue_active(&conn->c_waitq))
372 wake_up_all(&conn->c_waitq);
373}
374
Andy Grover1e23b3e2009-02-24 15:30:34 +0000375/*
376 * This tries to allocate and post unused work requests after making sure that
377 * they have all the allocations they need to queue received fragments into
Chris Mason33244122010-05-26 22:05:37 -0700378 * sockets.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000379 */
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700380void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000381{
382 struct rds_ib_connection *ic = conn->c_transport_data;
383 struct rds_ib_recv_work *recv;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000384 unsigned int posted = 0;
385 int ret = 0;
Mel Gormand0164ad2015-11-06 16:28:21 -0800386 bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000387 u32 pos;
388
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700389 /* the goal here is to just make sure that someone, somewhere
390 * is posting buffers. If we can't get the refill lock,
391 * let them do their thing
392 */
393 if (!acquire_refill(conn))
394 return;
395
Joe Perchesf64f9e72009-11-29 16:55:45 -0800396 while ((prefill || rds_conn_up(conn)) &&
397 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000398 if (pos >= ic->i_recv_ring.w_nr) {
399 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
400 pos);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000401 break;
402 }
403
404 recv = &ic->i_recvs[pos];
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700405 ret = rds_ib_recv_refill_one(conn, recv, gfp);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000406 if (ret) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000407 break;
408 }
409
Håkon Bugge1cb483a2017-11-07 16:33:34 +0100410 rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
Andy Grover0b088e02010-05-24 20:12:41 -0700411 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
Bart Van Asschea163afc2019-01-31 08:30:34 -0800412 (long)sg_dma_address(&recv->r_frag->f_sg));
Håkon Bugge1cb483a2017-11-07 16:33:34 +0100413
414 /* XXX when can this fail? */
Bart Van Asschef112d532018-07-18 09:25:28 -0700415 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000416 if (ret) {
417 rds_ib_conn_error(conn, "recv post on "
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700418 "%pI6c returned %d, disconnecting and "
Andy Grover1e23b3e2009-02-24 15:30:34 +0000419 "reconnecting\n", &conn->c_faddr,
420 ret);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000421 break;
422 }
423
424 posted++;
425 }
426
427 /* We're doing flow control - update the window. */
428 if (ic->i_flowctl && posted)
429 rds_ib_advertise_credits(conn, posted);
430
431 if (ret)
432 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700433
434 release_refill(conn);
435
436 /* if we're called from the softirq handler, we'll be GFP_NOWAIT.
437 * in this case the ring being low is going to lead to more interrupts
438 * and we can safely let the softirq code take care of it unless the
439 * ring is completely empty.
440 *
441 * if we're called from krdsd, we'll be GFP_KERNEL. In this case
442 * we might have raced with the softirq code while we had the refill
443 * lock held. Use rds_ib_ring_low() instead of ring_empty to decide
444 * if we should requeue.
445 */
446 if (rds_conn_up(conn) &&
447 ((can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
448 rds_ib_ring_empty(&ic->i_recv_ring))) {
449 queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
450 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000451}
452
Chris Mason33244122010-05-26 22:05:37 -0700453/*
454 * We want to recycle several types of recv allocations, like incs and frags.
455 * To use this, the *_free() function passes in the ptr to a list_head within
456 * the recyclee, as well as the cache to put it on.
457 *
458 * First, we put the memory on a percpu list. When this reaches a certain size,
459 * We move it to an intermediate non-percpu list in a lockless manner, with some
460 * xchg/compxchg wizardry.
461 *
462 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
463 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
464 * list_empty() will return true with one element is actually present.
465 */
466static void rds_ib_recv_cache_put(struct list_head *new_item,
467 struct rds_ib_refill_cache *cache)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000468{
Chris Mason33244122010-05-26 22:05:37 -0700469 unsigned long flags;
Gerald Schaeferc1964032014-01-16 16:54:48 +0100470 struct list_head *old, *chpfirst;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000471
Chris Mason33244122010-05-26 22:05:37 -0700472 local_irq_save(flags);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000473
Shan Weiae4b46e2012-11-12 15:52:01 +0000474 chpfirst = __this_cpu_read(cache->percpu->first);
475 if (!chpfirst)
Chris Mason33244122010-05-26 22:05:37 -0700476 INIT_LIST_HEAD(new_item);
477 else /* put on front */
Shan Weiae4b46e2012-11-12 15:52:01 +0000478 list_add_tail(new_item, chpfirst);
Chris Mason33244122010-05-26 22:05:37 -0700479
Gerald Schaeferc1964032014-01-16 16:54:48 +0100480 __this_cpu_write(cache->percpu->first, new_item);
Shan Weiae4b46e2012-11-12 15:52:01 +0000481 __this_cpu_inc(cache->percpu->count);
482
483 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
Chris Mason33244122010-05-26 22:05:37 -0700484 goto end;
485
486 /*
487 * Return our per-cpu first list to the cache's xfer by atomically
488 * grabbing the current xfer list, appending it to our per-cpu list,
489 * and then atomically returning that entire list back to the
490 * cache's xfer list as long as it's still empty.
491 */
492 do {
493 old = xchg(&cache->xfer, NULL);
494 if (old)
Shan Weiae4b46e2012-11-12 15:52:01 +0000495 list_splice_entire_tail(old, chpfirst);
496 old = cmpxchg(&cache->xfer, NULL, chpfirst);
Chris Mason33244122010-05-26 22:05:37 -0700497 } while (old);
498
Shan Weiae4b46e2012-11-12 15:52:01 +0000499
Gerald Schaeferc1964032014-01-16 16:54:48 +0100500 __this_cpu_write(cache->percpu->first, NULL);
Shan Weiae4b46e2012-11-12 15:52:01 +0000501 __this_cpu_write(cache->percpu->count, 0);
Chris Mason33244122010-05-26 22:05:37 -0700502end:
503 local_irq_restore(flags);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000504}
505
Chris Mason33244122010-05-26 22:05:37 -0700506static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000507{
Chris Mason33244122010-05-26 22:05:37 -0700508 struct list_head *head = cache->ready;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000509
Chris Mason33244122010-05-26 22:05:37 -0700510 if (head) {
511 if (!list_empty(head)) {
512 cache->ready = head->next;
513 list_del_init(head);
514 } else
515 cache->ready = NULL;
516 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000517
Chris Mason33244122010-05-26 22:05:37 -0700518 return head;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000519}
520
Al Viroc310e722014-11-20 09:21:14 -0500521int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000522{
523 struct rds_ib_incoming *ibinc;
524 struct rds_page_frag *frag;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000525 unsigned long to_copy;
526 unsigned long frag_off = 0;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000527 int copied = 0;
528 int ret;
529 u32 len;
530
531 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
532 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
533 len = be32_to_cpu(inc->i_hdr.h_len);
534
Al Viroc310e722014-11-20 09:21:14 -0500535 while (iov_iter_count(to) && copied < len) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000536 if (frag_off == RDS_FRAG_SIZE) {
537 frag = list_entry(frag->f_item.next,
538 struct rds_page_frag, f_item);
539 frag_off = 0;
540 }
Al Viroc310e722014-11-20 09:21:14 -0500541 to_copy = min_t(unsigned long, iov_iter_count(to),
542 RDS_FRAG_SIZE - frag_off);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000543 to_copy = min_t(unsigned long, to_copy, len - copied);
544
Andy Grover1e23b3e2009-02-24 15:30:34 +0000545 /* XXX needs + offset for multiple recvs per page */
Al Viroc310e722014-11-20 09:21:14 -0500546 rds_stats_add(s_copy_to_user, to_copy);
547 ret = copy_page_to_iter(sg_page(&frag->f_sg),
548 frag->f_sg.offset + frag_off,
549 to_copy,
550 to);
551 if (ret != to_copy)
552 return -EFAULT;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000553
Andy Grover1e23b3e2009-02-24 15:30:34 +0000554 frag_off += to_copy;
555 copied += to_copy;
556 }
557
558 return copied;
559}
560
561/* ic starts out kzalloc()ed */
562void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
563{
564 struct ib_send_wr *wr = &ic->i_ack_wr;
565 struct ib_sge *sge = &ic->i_ack_sge;
566
567 sge->addr = ic->i_ack_dma;
568 sge->length = sizeof(struct rds_header);
Jason Gunthorpee5580242015-07-30 17:22:26 -0600569 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000570
571 wr->sg_list = sge;
572 wr->num_sge = 1;
573 wr->opcode = IB_WR_SEND;
574 wr->wr_id = RDS_IB_ACK_WR_ID;
575 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
576}
577
578/*
579 * You'd think that with reliable IB connections you wouldn't need to ack
580 * messages that have been received. The problem is that IB hardware generates
581 * an ack message before it has DMAed the message into memory. This creates a
582 * potential message loss if the HCA is disabled for any reason between when it
583 * sends the ack and before the message is DMAed and processed. This is only a
584 * potential issue if another HCA is available for fail-over.
585 *
586 * When the remote host receives our ack they'll free the sent message from
587 * their send queue. To decrease the latency of this we always send an ack
588 * immediately after we've received messages.
589 *
590 * For simplicity, we only have one ack in flight at a time. This puts
591 * pressure on senders to have deep enough send queues to absorb the latency of
592 * a single ack frame being in flight. This might not be good enough.
593 *
594 * This is implemented by have a long-lived send_wr and sge which point to a
595 * statically allocated ack frame. This ack wr does not fall under the ring
596 * accounting that the tx and rx wrs do. The QP attribute specifically makes
597 * room for it beyond the ring size. Send completion notices its special
598 * wr_id and avoids working with the ring in that case.
599 */
Andy Grover8cbd9602009-04-01 08:20:20 +0000600#ifndef KERNEL_HAS_ATOMIC64
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400601void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000602{
Andy Grover8cbd9602009-04-01 08:20:20 +0000603 unsigned long flags;
604
605 spin_lock_irqsave(&ic->i_ack_lock, flags);
606 ic->i_ack_next = seq;
607 if (ack_required)
608 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
609 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
610}
611
612static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
613{
614 unsigned long flags;
615 u64 seq;
616
617 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
618
619 spin_lock_irqsave(&ic->i_ack_lock, flags);
620 seq = ic->i_ack_next;
621 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
622
623 return seq;
624}
625#else
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400626void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
Andy Grover8cbd9602009-04-01 08:20:20 +0000627{
628 atomic64_set(&ic->i_ack_next, seq);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000629 if (ack_required) {
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100630 smp_mb__before_atomic();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000631 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
632 }
633}
634
635static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
636{
637 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100638 smp_mb__after_atomic();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000639
Andy Grover8cbd9602009-04-01 08:20:20 +0000640 return atomic64_read(&ic->i_ack_next);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000641}
Andy Grover8cbd9602009-04-01 08:20:20 +0000642#endif
643
Andy Grover1e23b3e2009-02-24 15:30:34 +0000644
645static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
646{
647 struct rds_header *hdr = ic->i_ack;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000648 u64 seq;
649 int ret;
650
651 seq = rds_ib_get_ack(ic);
652
653 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
654 rds_message_populate_header(hdr, 0, 0, 0);
655 hdr->h_ack = cpu_to_be64(seq);
656 hdr->h_credit = adv_credits;
657 rds_message_make_checksum(hdr);
658 ic->i_ack_queued = jiffies;
659
Bart Van Asschef112d532018-07-18 09:25:28 -0700660 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000661 if (unlikely(ret)) {
662 /* Failed to send. Release the WR, and
663 * force another ACK.
664 */
665 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
666 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
667
668 rds_ib_stats_inc(s_ib_ack_send_failure);
Andy Grover735f61e2010-03-11 13:49:55 +0000669
670 rds_ib_conn_error(ic->conn, "sending ack failed\n");
Andy Grover1e23b3e2009-02-24 15:30:34 +0000671 } else
672 rds_ib_stats_inc(s_ib_ack_sent);
673}
674
675/*
676 * There are 3 ways of getting acknowledgements to the peer:
677 * 1. We call rds_ib_attempt_ack from the recv completion handler
678 * to send an ACK-only frame.
679 * However, there can be only one such frame in the send queue
680 * at any time, so we may have to postpone it.
681 * 2. When another (data) packet is transmitted while there's
682 * an ACK in the queue, we piggyback the ACK sequence number
683 * on the data packet.
684 * 3. If the ACK WR is done sending, we get called from the
685 * send queue completion handler, and check whether there's
686 * another ACK pending (postponed because the WR was on the
687 * queue). If so, we transmit it.
688 *
689 * We maintain 2 variables:
690 * - i_ack_flags, which keeps track of whether the ACK WR
691 * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
692 * - i_ack_next, which is the last sequence number we received
693 *
694 * Potentially, send queue and receive queue handlers can run concurrently.
Andy Grover8cbd9602009-04-01 08:20:20 +0000695 * It would be nice to not have to use a spinlock to synchronize things,
696 * but the one problem that rules this out is that 64bit updates are
697 * not atomic on all platforms. Things would be a lot simpler if
698 * we had atomic64 or maybe cmpxchg64 everywhere.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000699 *
700 * Reconnecting complicates this picture just slightly. When we
701 * reconnect, we may be seeing duplicate packets. The peer
702 * is retransmitting them, because it hasn't seen an ACK for
703 * them. It is important that we ACK these.
704 *
705 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
706 * this flag set *MUST* be acknowledged immediately.
707 */
708
709/*
710 * When we get here, we're called from the recv queue handler.
711 * Check whether we ought to transmit an ACK.
712 */
713void rds_ib_attempt_ack(struct rds_ib_connection *ic)
714{
715 unsigned int adv_credits;
716
717 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
718 return;
719
720 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
721 rds_ib_stats_inc(s_ib_ack_send_delayed);
722 return;
723 }
724
725 /* Can we get a send credit? */
Steve Wise7b70d032009-04-09 14:09:39 +0000726 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000727 rds_ib_stats_inc(s_ib_tx_throttle);
728 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
729 return;
730 }
731
732 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
733 rds_ib_send_ack(ic, adv_credits);
734}
735
736/*
737 * We get here from the send completion handler, when the
738 * adapter tells us the ACK frame was sent.
739 */
740void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
741{
742 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
743 rds_ib_attempt_ack(ic);
744}
745
746/*
747 * This is called by the regular xmit code when it wants to piggyback
748 * an ACK on an outgoing frame.
749 */
750u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
751{
752 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
753 rds_ib_stats_inc(s_ib_ack_send_piggybacked);
754 return rds_ib_get_ack(ic);
755}
756
757/*
758 * It's kind of lame that we're copying from the posted receive pages into
759 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
760 * them. But receiving new congestion bitmaps should be a *rare* event, so
761 * hopefully we won't need to invest that complexity in making it more
762 * efficient. By copying we can share a simpler core with TCP which has to
763 * copy.
764 */
765static void rds_ib_cong_recv(struct rds_connection *conn,
766 struct rds_ib_incoming *ibinc)
767{
768 struct rds_cong_map *map;
769 unsigned int map_off;
770 unsigned int map_page;
771 struct rds_page_frag *frag;
772 unsigned long frag_off;
773 unsigned long to_copy;
774 unsigned long copied;
Nicholas Mc Guiref3505742019-04-30 05:12:57 +0200775 __le64 uncongested = 0;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000776 void *addr;
777
778 /* catch completely corrupt packets */
779 if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
780 return;
781
782 map = conn->c_fcong;
783 map_page = 0;
784 map_off = 0;
785
786 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
787 frag_off = 0;
788
789 copied = 0;
790
791 while (copied < RDS_CONG_MAP_BYTES) {
Nicholas Mc Guiref3505742019-04-30 05:12:57 +0200792 __le64 *src, *dst;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000793 unsigned int k;
794
795 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
796 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
797
Cong Wang6114eab2011-11-25 23:14:40 +0800798 addr = kmap_atomic(sg_page(&frag->f_sg));
Andy Grover1e23b3e2009-02-24 15:30:34 +0000799
shamir rabinovitch579ba852016-04-07 07:57:36 -0400800 src = addr + frag->f_sg.offset + frag_off;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000801 dst = (void *)map->m_page_addrs[map_page] + map_off;
802 for (k = 0; k < to_copy; k += 8) {
803 /* Record ports that became uncongested, ie
804 * bits that changed from 0 to 1. */
805 uncongested |= ~(*src) & *dst;
806 *dst++ = *src++;
807 }
Cong Wang6114eab2011-11-25 23:14:40 +0800808 kunmap_atomic(addr);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000809
810 copied += to_copy;
811
812 map_off += to_copy;
813 if (map_off == PAGE_SIZE) {
814 map_off = 0;
815 map_page++;
816 }
817
818 frag_off += to_copy;
819 if (frag_off == RDS_FRAG_SIZE) {
820 frag = list_entry(frag->f_item.next,
821 struct rds_page_frag, f_item);
822 frag_off = 0;
823 }
824 }
825
826 /* the congestion map is in little endian order */
Nicholas Mc Guiref3505742019-04-30 05:12:57 +0200827 rds_cong_map_updated(map, le64_to_cpu(uncongested));
Andy Grover1e23b3e2009-02-24 15:30:34 +0000828}
829
Andy Grover1e23b3e2009-02-24 15:30:34 +0000830static void rds_ib_process_recv(struct rds_connection *conn,
Andy Grover597ddd52009-07-17 13:13:27 +0000831 struct rds_ib_recv_work *recv, u32 data_len,
Andy Grover1e23b3e2009-02-24 15:30:34 +0000832 struct rds_ib_ack_state *state)
833{
834 struct rds_ib_connection *ic = conn->c_transport_data;
835 struct rds_ib_incoming *ibinc = ic->i_ibinc;
836 struct rds_header *ihdr, *hdr;
837
838 /* XXX shut down the connection if port 0,0 are seen? */
839
840 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
Andy Grover597ddd52009-07-17 13:13:27 +0000841 data_len);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000842
Andy Grover597ddd52009-07-17 13:13:27 +0000843 if (data_len < sizeof(struct rds_header)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000844 rds_ib_conn_error(conn, "incoming message "
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700845 "from %pI6c didn't include a "
Andy Grover1e23b3e2009-02-24 15:30:34 +0000846 "header, disconnecting and "
847 "reconnecting\n",
848 &conn->c_faddr);
849 return;
850 }
Andy Grover597ddd52009-07-17 13:13:27 +0000851 data_len -= sizeof(struct rds_header);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000852
Andy Groverf147dd92010-01-13 15:50:09 -0800853 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
Andy Grover1e23b3e2009-02-24 15:30:34 +0000854
855 /* Validate the checksum. */
856 if (!rds_message_verify_checksum(ihdr)) {
857 rds_ib_conn_error(conn, "incoming message "
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700858 "from %pI6c has corrupted header - "
Andy Grover1e23b3e2009-02-24 15:30:34 +0000859 "forcing a reconnect\n",
860 &conn->c_faddr);
861 rds_stats_inc(s_recv_drop_bad_checksum);
862 return;
863 }
864
865 /* Process the ACK sequence which comes with every packet */
866 state->ack_recv = be64_to_cpu(ihdr->h_ack);
867 state->ack_recv_valid = 1;
868
869 /* Process the credits update if there was one */
870 if (ihdr->h_credit)
871 rds_ib_send_add_credits(conn, ihdr->h_credit);
872
Andy Grover597ddd52009-07-17 13:13:27 +0000873 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000874 /* This is an ACK-only packet. The fact that it gets
875 * special treatment here is that historically, ACKs
876 * were rather special beasts.
877 */
878 rds_ib_stats_inc(s_ib_ack_received);
879
880 /*
881 * Usually the frags make their way on to incs and are then freed as
882 * the inc is freed. We don't go that route, so we have to drop the
883 * page ref ourselves. We can't just leave the page on the recv
884 * because that confuses the dma mapping of pages and each recv's use
Andy Grover0b088e02010-05-24 20:12:41 -0700885 * of a partial page.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000886 *
887 * FIXME: Fold this into the code path below.
888 */
Chris Mason33244122010-05-26 22:05:37 -0700889 rds_ib_frag_free(ic, recv->r_frag);
Andy Grover0b088e02010-05-24 20:12:41 -0700890 recv->r_frag = NULL;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000891 return;
892 }
893
894 /*
895 * If we don't already have an inc on the connection then this
896 * fragment has a header and starts a message.. copy its header
897 * into the inc and save the inc so we can hang upcoming fragments
898 * off its list.
899 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800900 if (!ibinc) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000901 ibinc = recv->r_ibinc;
902 recv->r_ibinc = NULL;
903 ic->i_ibinc = ibinc;
904
905 hdr = &ibinc->ii_inc.i_hdr;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700906 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
907 local_clock();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000908 memcpy(hdr, ihdr, sizeof(*hdr));
909 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
Santosh Shilimkar32890252016-07-04 22:35:15 -0700910 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
911 local_clock();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000912
913 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
914 ic->i_recv_data_rem, hdr->h_flags);
915 } else {
916 hdr = &ibinc->ii_inc.i_hdr;
917 /* We can't just use memcmp here; fragments of a
918 * single message may carry different ACKs */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800919 if (hdr->h_sequence != ihdr->h_sequence ||
920 hdr->h_len != ihdr->h_len ||
921 hdr->h_sport != ihdr->h_sport ||
922 hdr->h_dport != ihdr->h_dport) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000923 rds_ib_conn_error(conn,
924 "fragment header mismatch; forcing reconnect\n");
925 return;
926 }
927 }
928
929 list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
930 recv->r_frag = NULL;
931
932 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
933 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
934 else {
935 ic->i_recv_data_rem = 0;
936 ic->i_ibinc = NULL;
937
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700938 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000939 rds_ib_cong_recv(conn, ibinc);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700940 } else {
941 rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr,
Cong Wang6114eab2011-11-25 23:14:40 +0800942 &ibinc->ii_inc, GFP_ATOMIC);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000943 state->ack_next = be64_to_cpu(hdr->h_sequence);
944 state->ack_next_valid = 1;
945 }
946
947 /* Evaluate the ACK_REQUIRED flag *after* we received
948 * the complete frame, and after bumping the next_rx
949 * sequence. */
950 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
951 rds_stats_inc(s_recv_ack_required);
952 state->ack_required = 1;
953 }
954
955 rds_inc_put(&ibinc->ii_inc);
956 }
957}
958
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400959void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
960 struct ib_wc *wc,
961 struct rds_ib_ack_state *state)
Andy Groverd521b632009-10-30 08:51:57 +0000962{
963 struct rds_connection *conn = ic->conn;
Andy Groverd521b632009-10-30 08:51:57 +0000964 struct rds_ib_recv_work *recv;
965
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400966 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
967 (unsigned long long)wc->wr_id, wc->status,
968 ib_wc_status_msg(wc->status), wc->byte_len,
969 be32_to_cpu(wc->ex.imm_data));
Andy Grover1e23b3e2009-02-24 15:30:34 +0000970
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400971 rds_ib_stats_inc(s_ib_rx_cq_event);
972 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
973 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1,
974 DMA_FROM_DEVICE);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000975
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400976 /* Also process recvs in connecting state because it is possible
977 * to get a recv completion _before_ the rdmacm ESTABLISHED
978 * event is processed.
979 */
980 if (wc->status == IB_WC_SUCCESS) {
981 rds_ib_process_recv(conn, recv, wc->byte_len, state);
982 } else {
983 /* We expect errors as the qp is drained during shutdown */
984 if (rds_conn_up(conn) || rds_conn_connecting(conn))
Santosh Shilimkarfd261ce2018-10-13 22:13:23 +0800985 rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), disconnecting and reconnecting\n",
Santosh Shilimkarff3f19a2016-03-14 07:43:55 -0700986 &conn->c_laddr, &conn->c_faddr,
Santosh Shilimkarfd261ce2018-10-13 22:13:23 +0800987 conn->c_tos, wc->status,
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400988 ib_wc_status_msg(wc->status));
Andy Grover1e23b3e2009-02-24 15:30:34 +0000989 }
Andy Groverd521b632009-10-30 08:51:57 +0000990
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400991 /* rds_ib_process_recv() doesn't always consume the frag, and
992 * we might not have called it at all if the wc didn't indicate
993 * success. We already unmapped the frag's pages, though, and
994 * the following rds_ib_ring_free() call tells the refill path
995 * that it will not find an allocated frag here. Make sure we
996 * keep that promise by freeing a frag that's still on the ring.
997 */
998 if (recv->r_frag) {
999 rds_ib_frag_free(ic, recv->r_frag);
1000 recv->r_frag = NULL;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001001 }
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -04001002 rds_ib_ring_free(&ic->i_recv_ring, 1);
Andy Grover1e23b3e2009-02-24 15:30:34 +00001003
1004 /* If we ever end up with a really empty receive ring, we're
1005 * in deep trouble, as the sender will definitely see RNR
1006 * timeouts. */
1007 if (rds_ib_ring_empty(&ic->i_recv_ring))
1008 rds_ib_stats_inc(s_ib_rx_ring_empty);
1009
Håkon Bugge05bfd7d2017-08-08 11:13:32 +02001010 if (rds_ib_ring_low(&ic->i_recv_ring)) {
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001011 rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
Håkon Bugge05bfd7d2017-08-08 11:13:32 +02001012 rds_ib_stats_inc(s_ib_rx_refill_from_cq);
1013 }
Andy Grover1e23b3e2009-02-24 15:30:34 +00001014}
1015
Sowmini Varadhan2da43c42016-06-30 16:11:15 -07001016int rds_ib_recv_path(struct rds_conn_path *cp)
Andy Grover1e23b3e2009-02-24 15:30:34 +00001017{
Sowmini Varadhan2da43c42016-06-30 16:11:15 -07001018 struct rds_connection *conn = cp->cp_conn;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001019 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001020
1021 rdsdebug("conn %p\n", conn);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001022 if (rds_conn_up(conn)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +00001023 rds_ib_attempt_ack(ic);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001024 rds_ib_recv_refill(conn, 0, GFP_KERNEL);
Håkon Bugge05bfd7d2017-08-08 11:13:32 +02001025 rds_ib_stats_inc(s_ib_rx_refill_from_thread);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001026 }
Andy Grover1e23b3e2009-02-24 15:30:34 +00001027
Håkon Buggefa525312018-07-16 15:06:39 +02001028 return 0;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001029}
1030
Zach Brownef87b7e2010-07-09 12:26:20 -07001031int rds_ib_recv_init(void)
Andy Grover1e23b3e2009-02-24 15:30:34 +00001032{
1033 struct sysinfo si;
1034 int ret = -ENOMEM;
1035
1036 /* Default to 30% of all available RAM for recv memory */
1037 si_meminfo(&si);
1038 rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
1039
1040 rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
1041 sizeof(struct rds_ib_incoming),
Andy Groverc20f5b92010-07-07 16:46:26 -07001042 0, SLAB_HWCACHE_ALIGN, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -08001043 if (!rds_ib_incoming_slab)
Andy Grover1e23b3e2009-02-24 15:30:34 +00001044 goto out;
1045
1046 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
1047 sizeof(struct rds_page_frag),
Andy Groverc20f5b92010-07-07 16:46:26 -07001048 0, SLAB_HWCACHE_ALIGN, NULL);
santosh.shilimkar@oracle.comba54d3c2015-08-25 12:01:58 -07001049 if (!rds_ib_frag_slab) {
Andy Grover1e23b3e2009-02-24 15:30:34 +00001050 kmem_cache_destroy(rds_ib_incoming_slab);
santosh.shilimkar@oracle.comba54d3c2015-08-25 12:01:58 -07001051 rds_ib_incoming_slab = NULL;
1052 } else
Andy Grover1e23b3e2009-02-24 15:30:34 +00001053 ret = 0;
1054out:
1055 return ret;
1056}
1057
1058void rds_ib_recv_exit(void)
1059{
1060 kmem_cache_destroy(rds_ib_incoming_slab);
1061 kmem_cache_destroy(rds_ib_frag_slab);
1062}