blob: 694d411dc72f7a256101e4bccc2355e417eff3e7 [file] [log] [blame]
Andy Grover1e23b3e2009-02-24 15:30:34 +00001/*
Ka-Cheong Poon9b17f582019-10-02 21:11:08 -07002 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
Andy Grover1e23b3e2009-02-24 15:30:34 +00003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover1e23b3e2009-02-24 15:30:34 +000035#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <rdma/rdma_cm.h>
38
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070039#include "rds_single_path.h"
Andy Grover1e23b3e2009-02-24 15:30:34 +000040#include "rds.h"
41#include "ib.h"
42
43static struct kmem_cache *rds_ib_incoming_slab;
44static struct kmem_cache *rds_ib_frag_slab;
45static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
46
Andy Grover1e23b3e2009-02-24 15:30:34 +000047void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
48{
49 struct rds_ib_recv_work *recv;
50 u32 i;
51
52 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
53 struct ib_sge *sge;
54
55 recv->r_ibinc = NULL;
56 recv->r_frag = NULL;
57
58 recv->r_wr.next = NULL;
59 recv->r_wr.wr_id = i;
60 recv->r_wr.sg_list = recv->r_sge;
61 recv->r_wr.num_sge = RDS_IB_RECV_SGE;
62
Andy Grover919ced42010-01-13 16:32:24 -080063 sge = &recv->r_sge[0];
Ka-Cheong Poon9b17f582019-10-02 21:11:08 -070064 sge->addr = ic->i_recv_hdrs_dma[i];
Andy Grover1e23b3e2009-02-24 15:30:34 +000065 sge->length = sizeof(struct rds_header);
Jason Gunthorpee5580242015-07-30 17:22:26 -060066 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover919ced42010-01-13 16:32:24 -080067
68 sge = &recv->r_sge[1];
69 sge->addr = 0;
70 sge->length = RDS_FRAG_SIZE;
Jason Gunthorpee5580242015-07-30 17:22:26 -060071 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover1e23b3e2009-02-24 15:30:34 +000072 }
73}
74
Chris Mason33244122010-05-26 22:05:37 -070075/*
76 * The entire 'from' list, including the from element itself, is put on
77 * to the tail of the 'to' list.
78 */
79static void list_splice_entire_tail(struct list_head *from,
80 struct list_head *to)
81{
82 struct list_head *from_last = from->prev;
83
84 list_splice_tail(from_last, to);
85 list_add_tail(from_last, to);
86}
87
88static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
89{
90 struct list_head *tmp;
91
92 tmp = xchg(&cache->xfer, NULL);
93 if (tmp) {
94 if (cache->ready)
95 list_splice_entire_tail(tmp, cache->ready);
96 else
97 cache->ready = tmp;
98 }
99}
100
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700101static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp)
Chris Mason33244122010-05-26 22:05:37 -0700102{
103 struct rds_ib_cache_head *head;
104 int cpu;
105
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700106 cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp);
Chris Mason33244122010-05-26 22:05:37 -0700107 if (!cache->percpu)
108 return -ENOMEM;
109
110 for_each_possible_cpu(cpu) {
111 head = per_cpu_ptr(cache->percpu, cpu);
112 head->first = NULL;
113 head->count = 0;
114 }
115 cache->xfer = NULL;
116 cache->ready = NULL;
117
118 return 0;
119}
120
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700121int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp)
Chris Mason33244122010-05-26 22:05:37 -0700122{
123 int ret;
124
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700125 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp);
Chris Mason33244122010-05-26 22:05:37 -0700126 if (!ret) {
Ka-Cheong Poonf394ad22018-07-30 22:48:41 -0700127 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp);
Chris Mason33244122010-05-26 22:05:37 -0700128 if (ret)
129 free_percpu(ic->i_cache_incs.percpu);
130 }
131
132 return ret;
133}
134
135static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
136 struct list_head *caller_list)
137{
138 struct rds_ib_cache_head *head;
139 int cpu;
140
141 for_each_possible_cpu(cpu) {
142 head = per_cpu_ptr(cache->percpu, cpu);
143 if (head->first) {
144 list_splice_entire_tail(head->first, caller_list);
145 head->first = NULL;
146 }
147 }
148
149 if (cache->ready) {
150 list_splice_entire_tail(cache->ready, caller_list);
151 cache->ready = NULL;
152 }
153}
154
155void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
156{
157 struct rds_ib_incoming *inc;
158 struct rds_ib_incoming *inc_tmp;
159 struct rds_page_frag *frag;
160 struct rds_page_frag *frag_tmp;
161 LIST_HEAD(list);
162
163 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
164 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
165 free_percpu(ic->i_cache_incs.percpu);
166
167 list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
168 list_del(&inc->ii_cache_entry);
169 WARN_ON(!list_empty(&inc->ii_frags));
170 kmem_cache_free(rds_ib_incoming_slab, inc);
Zhu Yanjunb50e0582019-06-03 08:48:19 -0400171 atomic_dec(&rds_ib_allocation);
Chris Mason33244122010-05-26 22:05:37 -0700172 }
173
174 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
175 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
176 free_percpu(ic->i_cache_frags.percpu);
177
178 list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
179 list_del(&frag->f_cache_entry);
180 WARN_ON(!list_empty(&frag->f_item));
181 kmem_cache_free(rds_ib_frag_slab, frag);
182 }
183}
184
185/* fwd decl */
186static void rds_ib_recv_cache_put(struct list_head *new_item,
187 struct rds_ib_refill_cache *cache);
188static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
189
190
191/* Recycle frag and attached recv buffer f_sg */
192static void rds_ib_frag_free(struct rds_ib_connection *ic,
193 struct rds_page_frag *frag)
194{
195 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
196
197 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700198 atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
199 rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
Chris Mason33244122010-05-26 22:05:37 -0700200}
201
202/* Recycle inc after freeing attached frags */
203void rds_ib_inc_free(struct rds_incoming *inc)
204{
205 struct rds_ib_incoming *ibinc;
206 struct rds_page_frag *frag;
207 struct rds_page_frag *pos;
208 struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
209
210 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
211
212 /* Free attached frags */
213 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
214 list_del_init(&frag->f_item);
215 rds_ib_frag_free(ic, frag);
216 }
217 BUG_ON(!list_empty(&ibinc->ii_frags));
218
219 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
220 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
221}
222
Andy Grover1e23b3e2009-02-24 15:30:34 +0000223static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
224 struct rds_ib_recv_work *recv)
225{
226 if (recv->r_ibinc) {
227 rds_inc_put(&recv->r_ibinc->ii_inc);
228 recv->r_ibinc = NULL;
229 }
230 if (recv->r_frag) {
Andy Groverfc24f782010-05-25 11:20:09 -0700231 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
Chris Mason33244122010-05-26 22:05:37 -0700232 rds_ib_frag_free(ic, recv->r_frag);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000233 recv->r_frag = NULL;
234 }
235}
236
237void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
238{
239 u32 i;
240
241 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
242 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000243}
244
Chris Mason037f18a32010-05-26 21:45:06 -0700245static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
246 gfp_t slab_mask)
Chris Mason33244122010-05-26 22:05:37 -0700247{
248 struct rds_ib_incoming *ibinc;
249 struct list_head *cache_item;
250 int avail_allocs;
251
252 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
253 if (cache_item) {
254 ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
255 } else {
256 avail_allocs = atomic_add_unless(&rds_ib_allocation,
257 1, rds_ib_sysctl_max_recv_allocation);
258 if (!avail_allocs) {
259 rds_ib_stats_inc(s_ib_rx_alloc_limit);
260 return NULL;
261 }
Chris Mason037f18a32010-05-26 21:45:06 -0700262 ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700263 if (!ibinc) {
264 atomic_dec(&rds_ib_allocation);
265 return NULL;
266 }
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700267 rds_ib_stats_inc(s_ib_rx_total_incs);
Chris Mason33244122010-05-26 22:05:37 -0700268 }
269 INIT_LIST_HEAD(&ibinc->ii_frags);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700270 rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr);
Chris Mason33244122010-05-26 22:05:37 -0700271
272 return ibinc;
273}
274
Chris Mason037f18a32010-05-26 21:45:06 -0700275static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
276 gfp_t slab_mask, gfp_t page_mask)
Chris Mason33244122010-05-26 22:05:37 -0700277{
278 struct rds_page_frag *frag;
279 struct list_head *cache_item;
280 int ret;
281
282 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
283 if (cache_item) {
284 frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700285 atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
286 rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
Chris Mason33244122010-05-26 22:05:37 -0700287 } else {
Chris Mason037f18a32010-05-26 21:45:06 -0700288 frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700289 if (!frag)
290 return NULL;
291
Chris Masonb4e1da32010-07-19 17:02:41 -0700292 sg_init_table(&frag->f_sg, 1);
Chris Mason33244122010-05-26 22:05:37 -0700293 ret = rds_page_remainder_alloc(&frag->f_sg,
Chris Mason037f18a32010-05-26 21:45:06 -0700294 RDS_FRAG_SIZE, page_mask);
Chris Mason33244122010-05-26 22:05:37 -0700295 if (ret) {
296 kmem_cache_free(rds_ib_frag_slab, frag);
297 return NULL;
298 }
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700299 rds_ib_stats_inc(s_ib_rx_total_frags);
Chris Mason33244122010-05-26 22:05:37 -0700300 }
301
302 INIT_LIST_HEAD(&frag->f_item);
303
304 return frag;
305}
306
Andy Grover1e23b3e2009-02-24 15:30:34 +0000307static int rds_ib_recv_refill_one(struct rds_connection *conn,
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700308 struct rds_ib_recv_work *recv, gfp_t gfp)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000309{
310 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000311 struct ib_sge *sge;
312 int ret = -ENOMEM;
Chris Mason037f18a32010-05-26 21:45:06 -0700313 gfp_t slab_mask = GFP_NOWAIT;
314 gfp_t page_mask = GFP_NOWAIT;
315
Mel Gormand0164ad2015-11-06 16:28:21 -0800316 if (gfp & __GFP_DIRECT_RECLAIM) {
Chris Mason037f18a32010-05-26 21:45:06 -0700317 slab_mask = GFP_KERNEL;
318 page_mask = GFP_HIGHUSER;
319 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000320
Chris Mason33244122010-05-26 22:05:37 -0700321 if (!ic->i_cache_incs.ready)
322 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
323 if (!ic->i_cache_frags.ready)
324 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
325
Andy Grover3427e852010-05-24 20:28:49 -0700326 /*
327 * ibinc was taken from recv if recv contained the start of a message.
328 * recvs that were continuations will still have this allocated.
329 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800330 if (!recv->r_ibinc) {
Chris Mason037f18a32010-05-26 21:45:06 -0700331 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700332 if (!recv->r_ibinc)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000333 goto out;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000334 }
335
Andy Grover3427e852010-05-24 20:28:49 -0700336 WARN_ON(recv->r_frag); /* leak! */
Chris Mason037f18a32010-05-26 21:45:06 -0700337 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
Andy Grover3427e852010-05-24 20:28:49 -0700338 if (!recv->r_frag)
339 goto out;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000340
Andy Grover0b088e02010-05-24 20:12:41 -0700341 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
342 1, DMA_FROM_DEVICE);
343 WARN_ON(ret != 1);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000344
Andy Grover919ced42010-01-13 16:32:24 -0800345 sge = &recv->r_sge[0];
Ka-Cheong Poon9b17f582019-10-02 21:11:08 -0700346 sge->addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
Andy Grover1e23b3e2009-02-24 15:30:34 +0000347 sge->length = sizeof(struct rds_header);
348
Andy Grover919ced42010-01-13 16:32:24 -0800349 sge = &recv->r_sge[1];
Bart Van Asschea163afc2019-01-31 08:30:34 -0800350 sge->addr = sg_dma_address(&recv->r_frag->f_sg);
351 sge->length = sg_dma_len(&recv->r_frag->f_sg);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000352
353 ret = 0;
354out:
355 return ret;
356}
357
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700358static int acquire_refill(struct rds_connection *conn)
359{
360 return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0;
361}
362
363static void release_refill(struct rds_connection *conn)
364{
365 clear_bit(RDS_RECV_REFILL, &conn->c_flags);
366
367 /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
368 * hot path and finding waiters is very rare. We don't want to walk
369 * the system-wide hashed waitqueue buckets in the fast path only to
370 * almost never find waiters.
371 */
372 if (waitqueue_active(&conn->c_waitq))
373 wake_up_all(&conn->c_waitq);
374}
375
Andy Grover1e23b3e2009-02-24 15:30:34 +0000376/*
377 * This tries to allocate and post unused work requests after making sure that
378 * they have all the allocations they need to queue received fragments into
Chris Mason33244122010-05-26 22:05:37 -0700379 * sockets.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000380 */
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700381void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000382{
383 struct rds_ib_connection *ic = conn->c_transport_data;
384 struct rds_ib_recv_work *recv;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000385 unsigned int posted = 0;
386 int ret = 0;
Mel Gormand0164ad2015-11-06 16:28:21 -0800387 bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM);
Chris Mason65dedd72012-02-03 11:07:54 -0500388 bool must_wake = false;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000389 u32 pos;
390
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700391 /* the goal here is to just make sure that someone, somewhere
392 * is posting buffers. If we can't get the refill lock,
393 * let them do their thing
394 */
395 if (!acquire_refill(conn))
396 return;
397
Joe Perchesf64f9e72009-11-29 16:55:45 -0800398 while ((prefill || rds_conn_up(conn)) &&
399 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000400 if (pos >= ic->i_recv_ring.w_nr) {
401 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
402 pos);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000403 break;
404 }
405
406 recv = &ic->i_recvs[pos];
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700407 ret = rds_ib_recv_refill_one(conn, recv, gfp);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000408 if (ret) {
Chris Mason65dedd72012-02-03 11:07:54 -0500409 must_wake = true;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000410 break;
411 }
412
Håkon Bugge1cb483a2017-11-07 16:33:34 +0100413 rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
Andy Grover0b088e02010-05-24 20:12:41 -0700414 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
Bart Van Asschea163afc2019-01-31 08:30:34 -0800415 (long)sg_dma_address(&recv->r_frag->f_sg));
Håkon Bugge1cb483a2017-11-07 16:33:34 +0100416
417 /* XXX when can this fail? */
Bart Van Asschef112d532018-07-18 09:25:28 -0700418 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000419 if (ret) {
420 rds_ib_conn_error(conn, "recv post on "
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700421 "%pI6c returned %d, disconnecting and "
Andy Grover1e23b3e2009-02-24 15:30:34 +0000422 "reconnecting\n", &conn->c_faddr,
423 ret);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000424 break;
425 }
426
427 posted++;
Chris Mason65dedd72012-02-03 11:07:54 -0500428
429 if ((posted > 128 && need_resched()) || posted > 8192) {
430 must_wake = true;
431 break;
432 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000433 }
434
435 /* We're doing flow control - update the window. */
436 if (ic->i_flowctl && posted)
437 rds_ib_advertise_credits(conn, posted);
438
439 if (ret)
440 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700441
442 release_refill(conn);
443
444 /* if we're called from the softirq handler, we'll be GFP_NOWAIT.
445 * in this case the ring being low is going to lead to more interrupts
446 * and we can safely let the softirq code take care of it unless the
447 * ring is completely empty.
448 *
449 * if we're called from krdsd, we'll be GFP_KERNEL. In this case
450 * we might have raced with the softirq code while we had the refill
451 * lock held. Use rds_ib_ring_low() instead of ring_empty to decide
452 * if we should requeue.
453 */
454 if (rds_conn_up(conn) &&
Chris Mason65dedd72012-02-03 11:07:54 -0500455 (must_wake ||
456 (can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700457 rds_ib_ring_empty(&ic->i_recv_ring))) {
458 queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
459 }
Chris Mason65dedd72012-02-03 11:07:54 -0500460 if (can_wait)
461 cond_resched();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000462}
463
Chris Mason33244122010-05-26 22:05:37 -0700464/*
465 * We want to recycle several types of recv allocations, like incs and frags.
466 * To use this, the *_free() function passes in the ptr to a list_head within
467 * the recyclee, as well as the cache to put it on.
468 *
469 * First, we put the memory on a percpu list. When this reaches a certain size,
470 * We move it to an intermediate non-percpu list in a lockless manner, with some
471 * xchg/compxchg wizardry.
472 *
473 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
474 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
475 * list_empty() will return true with one element is actually present.
476 */
477static void rds_ib_recv_cache_put(struct list_head *new_item,
478 struct rds_ib_refill_cache *cache)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000479{
Chris Mason33244122010-05-26 22:05:37 -0700480 unsigned long flags;
Gerald Schaeferc1964032014-01-16 16:54:48 +0100481 struct list_head *old, *chpfirst;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000482
Chris Mason33244122010-05-26 22:05:37 -0700483 local_irq_save(flags);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000484
Shan Weiae4b46e2012-11-12 15:52:01 +0000485 chpfirst = __this_cpu_read(cache->percpu->first);
486 if (!chpfirst)
Chris Mason33244122010-05-26 22:05:37 -0700487 INIT_LIST_HEAD(new_item);
488 else /* put on front */
Shan Weiae4b46e2012-11-12 15:52:01 +0000489 list_add_tail(new_item, chpfirst);
Chris Mason33244122010-05-26 22:05:37 -0700490
Gerald Schaeferc1964032014-01-16 16:54:48 +0100491 __this_cpu_write(cache->percpu->first, new_item);
Shan Weiae4b46e2012-11-12 15:52:01 +0000492 __this_cpu_inc(cache->percpu->count);
493
494 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
Chris Mason33244122010-05-26 22:05:37 -0700495 goto end;
496
497 /*
498 * Return our per-cpu first list to the cache's xfer by atomically
499 * grabbing the current xfer list, appending it to our per-cpu list,
500 * and then atomically returning that entire list back to the
501 * cache's xfer list as long as it's still empty.
502 */
503 do {
504 old = xchg(&cache->xfer, NULL);
505 if (old)
Shan Weiae4b46e2012-11-12 15:52:01 +0000506 list_splice_entire_tail(old, chpfirst);
507 old = cmpxchg(&cache->xfer, NULL, chpfirst);
Chris Mason33244122010-05-26 22:05:37 -0700508 } while (old);
509
Shan Weiae4b46e2012-11-12 15:52:01 +0000510
Gerald Schaeferc1964032014-01-16 16:54:48 +0100511 __this_cpu_write(cache->percpu->first, NULL);
Shan Weiae4b46e2012-11-12 15:52:01 +0000512 __this_cpu_write(cache->percpu->count, 0);
Chris Mason33244122010-05-26 22:05:37 -0700513end:
514 local_irq_restore(flags);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000515}
516
Chris Mason33244122010-05-26 22:05:37 -0700517static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000518{
Chris Mason33244122010-05-26 22:05:37 -0700519 struct list_head *head = cache->ready;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000520
Chris Mason33244122010-05-26 22:05:37 -0700521 if (head) {
522 if (!list_empty(head)) {
523 cache->ready = head->next;
524 list_del_init(head);
525 } else
526 cache->ready = NULL;
527 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000528
Chris Mason33244122010-05-26 22:05:37 -0700529 return head;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000530}
531
Al Viroc310e722014-11-20 09:21:14 -0500532int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000533{
534 struct rds_ib_incoming *ibinc;
535 struct rds_page_frag *frag;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000536 unsigned long to_copy;
537 unsigned long frag_off = 0;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000538 int copied = 0;
539 int ret;
540 u32 len;
541
542 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
543 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
544 len = be32_to_cpu(inc->i_hdr.h_len);
545
Al Viroc310e722014-11-20 09:21:14 -0500546 while (iov_iter_count(to) && copied < len) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000547 if (frag_off == RDS_FRAG_SIZE) {
548 frag = list_entry(frag->f_item.next,
549 struct rds_page_frag, f_item);
550 frag_off = 0;
551 }
Al Viroc310e722014-11-20 09:21:14 -0500552 to_copy = min_t(unsigned long, iov_iter_count(to),
553 RDS_FRAG_SIZE - frag_off);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000554 to_copy = min_t(unsigned long, to_copy, len - copied);
555
Andy Grover1e23b3e2009-02-24 15:30:34 +0000556 /* XXX needs + offset for multiple recvs per page */
Al Viroc310e722014-11-20 09:21:14 -0500557 rds_stats_add(s_copy_to_user, to_copy);
558 ret = copy_page_to_iter(sg_page(&frag->f_sg),
559 frag->f_sg.offset + frag_off,
560 to_copy,
561 to);
562 if (ret != to_copy)
563 return -EFAULT;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000564
Andy Grover1e23b3e2009-02-24 15:30:34 +0000565 frag_off += to_copy;
566 copied += to_copy;
567 }
568
569 return copied;
570}
571
572/* ic starts out kzalloc()ed */
573void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
574{
575 struct ib_send_wr *wr = &ic->i_ack_wr;
576 struct ib_sge *sge = &ic->i_ack_sge;
577
578 sge->addr = ic->i_ack_dma;
579 sge->length = sizeof(struct rds_header);
Jason Gunthorpee5580242015-07-30 17:22:26 -0600580 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000581
582 wr->sg_list = sge;
583 wr->num_sge = 1;
584 wr->opcode = IB_WR_SEND;
585 wr->wr_id = RDS_IB_ACK_WR_ID;
586 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
587}
588
589/*
590 * You'd think that with reliable IB connections you wouldn't need to ack
591 * messages that have been received. The problem is that IB hardware generates
592 * an ack message before it has DMAed the message into memory. This creates a
593 * potential message loss if the HCA is disabled for any reason between when it
594 * sends the ack and before the message is DMAed and processed. This is only a
595 * potential issue if another HCA is available for fail-over.
596 *
597 * When the remote host receives our ack they'll free the sent message from
598 * their send queue. To decrease the latency of this we always send an ack
599 * immediately after we've received messages.
600 *
601 * For simplicity, we only have one ack in flight at a time. This puts
602 * pressure on senders to have deep enough send queues to absorb the latency of
603 * a single ack frame being in flight. This might not be good enough.
604 *
605 * This is implemented by have a long-lived send_wr and sge which point to a
606 * statically allocated ack frame. This ack wr does not fall under the ring
607 * accounting that the tx and rx wrs do. The QP attribute specifically makes
608 * room for it beyond the ring size. Send completion notices its special
609 * wr_id and avoids working with the ring in that case.
610 */
Andy Grover8cbd9602009-04-01 08:20:20 +0000611#ifndef KERNEL_HAS_ATOMIC64
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400612void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000613{
Andy Grover8cbd9602009-04-01 08:20:20 +0000614 unsigned long flags;
615
616 spin_lock_irqsave(&ic->i_ack_lock, flags);
617 ic->i_ack_next = seq;
618 if (ack_required)
619 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
620 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
621}
622
623static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
624{
625 unsigned long flags;
626 u64 seq;
627
628 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
629
630 spin_lock_irqsave(&ic->i_ack_lock, flags);
631 seq = ic->i_ack_next;
632 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
633
634 return seq;
635}
636#else
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400637void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
Andy Grover8cbd9602009-04-01 08:20:20 +0000638{
639 atomic64_set(&ic->i_ack_next, seq);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000640 if (ack_required) {
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100641 smp_mb__before_atomic();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000642 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
643 }
644}
645
646static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
647{
648 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100649 smp_mb__after_atomic();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000650
Andy Grover8cbd9602009-04-01 08:20:20 +0000651 return atomic64_read(&ic->i_ack_next);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000652}
Andy Grover8cbd9602009-04-01 08:20:20 +0000653#endif
654
Andy Grover1e23b3e2009-02-24 15:30:34 +0000655
656static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
657{
658 struct rds_header *hdr = ic->i_ack;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000659 u64 seq;
660 int ret;
661
662 seq = rds_ib_get_ack(ic);
663
664 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
665 rds_message_populate_header(hdr, 0, 0, 0);
666 hdr->h_ack = cpu_to_be64(seq);
667 hdr->h_credit = adv_credits;
668 rds_message_make_checksum(hdr);
669 ic->i_ack_queued = jiffies;
670
Bart Van Asschef112d532018-07-18 09:25:28 -0700671 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000672 if (unlikely(ret)) {
673 /* Failed to send. Release the WR, and
674 * force another ACK.
675 */
676 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
677 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
678
679 rds_ib_stats_inc(s_ib_ack_send_failure);
Andy Grover735f61e2010-03-11 13:49:55 +0000680
681 rds_ib_conn_error(ic->conn, "sending ack failed\n");
Andy Grover1e23b3e2009-02-24 15:30:34 +0000682 } else
683 rds_ib_stats_inc(s_ib_ack_sent);
684}
685
686/*
687 * There are 3 ways of getting acknowledgements to the peer:
688 * 1. We call rds_ib_attempt_ack from the recv completion handler
689 * to send an ACK-only frame.
690 * However, there can be only one such frame in the send queue
691 * at any time, so we may have to postpone it.
692 * 2. When another (data) packet is transmitted while there's
693 * an ACK in the queue, we piggyback the ACK sequence number
694 * on the data packet.
695 * 3. If the ACK WR is done sending, we get called from the
696 * send queue completion handler, and check whether there's
697 * another ACK pending (postponed because the WR was on the
698 * queue). If so, we transmit it.
699 *
700 * We maintain 2 variables:
701 * - i_ack_flags, which keeps track of whether the ACK WR
702 * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
703 * - i_ack_next, which is the last sequence number we received
704 *
705 * Potentially, send queue and receive queue handlers can run concurrently.
Andy Grover8cbd9602009-04-01 08:20:20 +0000706 * It would be nice to not have to use a spinlock to synchronize things,
707 * but the one problem that rules this out is that 64bit updates are
708 * not atomic on all platforms. Things would be a lot simpler if
709 * we had atomic64 or maybe cmpxchg64 everywhere.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000710 *
711 * Reconnecting complicates this picture just slightly. When we
712 * reconnect, we may be seeing duplicate packets. The peer
713 * is retransmitting them, because it hasn't seen an ACK for
714 * them. It is important that we ACK these.
715 *
716 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
717 * this flag set *MUST* be acknowledged immediately.
718 */
719
720/*
721 * When we get here, we're called from the recv queue handler.
722 * Check whether we ought to transmit an ACK.
723 */
724void rds_ib_attempt_ack(struct rds_ib_connection *ic)
725{
726 unsigned int adv_credits;
727
728 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
729 return;
730
731 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
732 rds_ib_stats_inc(s_ib_ack_send_delayed);
733 return;
734 }
735
736 /* Can we get a send credit? */
Steve Wise7b70d032009-04-09 14:09:39 +0000737 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000738 rds_ib_stats_inc(s_ib_tx_throttle);
739 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
740 return;
741 }
742
743 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
744 rds_ib_send_ack(ic, adv_credits);
745}
746
747/*
748 * We get here from the send completion handler, when the
749 * adapter tells us the ACK frame was sent.
750 */
751void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
752{
753 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
754 rds_ib_attempt_ack(ic);
755}
756
757/*
758 * This is called by the regular xmit code when it wants to piggyback
759 * an ACK on an outgoing frame.
760 */
761u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
762{
763 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
764 rds_ib_stats_inc(s_ib_ack_send_piggybacked);
765 return rds_ib_get_ack(ic);
766}
767
768/*
769 * It's kind of lame that we're copying from the posted receive pages into
770 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
771 * them. But receiving new congestion bitmaps should be a *rare* event, so
772 * hopefully we won't need to invest that complexity in making it more
773 * efficient. By copying we can share a simpler core with TCP which has to
774 * copy.
775 */
776static void rds_ib_cong_recv(struct rds_connection *conn,
777 struct rds_ib_incoming *ibinc)
778{
779 struct rds_cong_map *map;
780 unsigned int map_off;
781 unsigned int map_page;
782 struct rds_page_frag *frag;
783 unsigned long frag_off;
784 unsigned long to_copy;
785 unsigned long copied;
Nicholas Mc Guiref3505742019-04-30 05:12:57 +0200786 __le64 uncongested = 0;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000787 void *addr;
788
789 /* catch completely corrupt packets */
790 if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
791 return;
792
793 map = conn->c_fcong;
794 map_page = 0;
795 map_off = 0;
796
797 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
798 frag_off = 0;
799
800 copied = 0;
801
802 while (copied < RDS_CONG_MAP_BYTES) {
Nicholas Mc Guiref3505742019-04-30 05:12:57 +0200803 __le64 *src, *dst;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000804 unsigned int k;
805
806 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
807 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
808
Cong Wang6114eab2011-11-25 23:14:40 +0800809 addr = kmap_atomic(sg_page(&frag->f_sg));
Andy Grover1e23b3e2009-02-24 15:30:34 +0000810
shamir rabinovitch579ba852016-04-07 07:57:36 -0400811 src = addr + frag->f_sg.offset + frag_off;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000812 dst = (void *)map->m_page_addrs[map_page] + map_off;
813 for (k = 0; k < to_copy; k += 8) {
814 /* Record ports that became uncongested, ie
815 * bits that changed from 0 to 1. */
816 uncongested |= ~(*src) & *dst;
817 *dst++ = *src++;
818 }
Cong Wang6114eab2011-11-25 23:14:40 +0800819 kunmap_atomic(addr);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000820
821 copied += to_copy;
822
823 map_off += to_copy;
824 if (map_off == PAGE_SIZE) {
825 map_off = 0;
826 map_page++;
827 }
828
829 frag_off += to_copy;
830 if (frag_off == RDS_FRAG_SIZE) {
831 frag = list_entry(frag->f_item.next,
832 struct rds_page_frag, f_item);
833 frag_off = 0;
834 }
835 }
836
837 /* the congestion map is in little endian order */
Nicholas Mc Guiref3505742019-04-30 05:12:57 +0200838 rds_cong_map_updated(map, le64_to_cpu(uncongested));
Andy Grover1e23b3e2009-02-24 15:30:34 +0000839}
840
Andy Grover1e23b3e2009-02-24 15:30:34 +0000841static void rds_ib_process_recv(struct rds_connection *conn,
Andy Grover597ddd52009-07-17 13:13:27 +0000842 struct rds_ib_recv_work *recv, u32 data_len,
Andy Grover1e23b3e2009-02-24 15:30:34 +0000843 struct rds_ib_ack_state *state)
844{
845 struct rds_ib_connection *ic = conn->c_transport_data;
846 struct rds_ib_incoming *ibinc = ic->i_ibinc;
847 struct rds_header *ihdr, *hdr;
848
849 /* XXX shut down the connection if port 0,0 are seen? */
850
851 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
Andy Grover597ddd52009-07-17 13:13:27 +0000852 data_len);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000853
Andy Grover597ddd52009-07-17 13:13:27 +0000854 if (data_len < sizeof(struct rds_header)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000855 rds_ib_conn_error(conn, "incoming message "
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700856 "from %pI6c didn't include a "
Andy Grover1e23b3e2009-02-24 15:30:34 +0000857 "header, disconnecting and "
858 "reconnecting\n",
859 &conn->c_faddr);
860 return;
861 }
Andy Grover597ddd52009-07-17 13:13:27 +0000862 data_len -= sizeof(struct rds_header);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000863
Ka-Cheong Poon9b17f582019-10-02 21:11:08 -0700864 ihdr = ic->i_recv_hdrs[recv - ic->i_recvs];
Andy Grover1e23b3e2009-02-24 15:30:34 +0000865
866 /* Validate the checksum. */
867 if (!rds_message_verify_checksum(ihdr)) {
868 rds_ib_conn_error(conn, "incoming message "
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700869 "from %pI6c has corrupted header - "
Andy Grover1e23b3e2009-02-24 15:30:34 +0000870 "forcing a reconnect\n",
871 &conn->c_faddr);
872 rds_stats_inc(s_recv_drop_bad_checksum);
873 return;
874 }
875
876 /* Process the ACK sequence which comes with every packet */
877 state->ack_recv = be64_to_cpu(ihdr->h_ack);
878 state->ack_recv_valid = 1;
879
880 /* Process the credits update if there was one */
881 if (ihdr->h_credit)
882 rds_ib_send_add_credits(conn, ihdr->h_credit);
883
Andy Grover597ddd52009-07-17 13:13:27 +0000884 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000885 /* This is an ACK-only packet. The fact that it gets
886 * special treatment here is that historically, ACKs
887 * were rather special beasts.
888 */
889 rds_ib_stats_inc(s_ib_ack_received);
890
891 /*
892 * Usually the frags make their way on to incs and are then freed as
893 * the inc is freed. We don't go that route, so we have to drop the
894 * page ref ourselves. We can't just leave the page on the recv
895 * because that confuses the dma mapping of pages and each recv's use
Andy Grover0b088e02010-05-24 20:12:41 -0700896 * of a partial page.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000897 *
898 * FIXME: Fold this into the code path below.
899 */
Chris Mason33244122010-05-26 22:05:37 -0700900 rds_ib_frag_free(ic, recv->r_frag);
Andy Grover0b088e02010-05-24 20:12:41 -0700901 recv->r_frag = NULL;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000902 return;
903 }
904
905 /*
906 * If we don't already have an inc on the connection then this
907 * fragment has a header and starts a message.. copy its header
908 * into the inc and save the inc so we can hang upcoming fragments
909 * off its list.
910 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800911 if (!ibinc) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000912 ibinc = recv->r_ibinc;
913 recv->r_ibinc = NULL;
914 ic->i_ibinc = ibinc;
915
916 hdr = &ibinc->ii_inc.i_hdr;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700917 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
918 local_clock();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000919 memcpy(hdr, ihdr, sizeof(*hdr));
920 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
Santosh Shilimkar32890252016-07-04 22:35:15 -0700921 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
922 local_clock();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000923
924 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
925 ic->i_recv_data_rem, hdr->h_flags);
926 } else {
927 hdr = &ibinc->ii_inc.i_hdr;
928 /* We can't just use memcmp here; fragments of a
929 * single message may carry different ACKs */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800930 if (hdr->h_sequence != ihdr->h_sequence ||
931 hdr->h_len != ihdr->h_len ||
932 hdr->h_sport != ihdr->h_sport ||
933 hdr->h_dport != ihdr->h_dport) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000934 rds_ib_conn_error(conn,
935 "fragment header mismatch; forcing reconnect\n");
936 return;
937 }
938 }
939
940 list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
941 recv->r_frag = NULL;
942
943 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
944 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
945 else {
946 ic->i_recv_data_rem = 0;
947 ic->i_ibinc = NULL;
948
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700949 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000950 rds_ib_cong_recv(conn, ibinc);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700951 } else {
952 rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr,
Cong Wang6114eab2011-11-25 23:14:40 +0800953 &ibinc->ii_inc, GFP_ATOMIC);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000954 state->ack_next = be64_to_cpu(hdr->h_sequence);
955 state->ack_next_valid = 1;
956 }
957
958 /* Evaluate the ACK_REQUIRED flag *after* we received
959 * the complete frame, and after bumping the next_rx
960 * sequence. */
961 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
962 rds_stats_inc(s_recv_ack_required);
963 state->ack_required = 1;
964 }
965
966 rds_inc_put(&ibinc->ii_inc);
967 }
968}
969
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400970void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
971 struct ib_wc *wc,
972 struct rds_ib_ack_state *state)
Andy Groverd521b632009-10-30 08:51:57 +0000973{
974 struct rds_connection *conn = ic->conn;
Andy Groverd521b632009-10-30 08:51:57 +0000975 struct rds_ib_recv_work *recv;
976
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400977 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
978 (unsigned long long)wc->wr_id, wc->status,
979 ib_wc_status_msg(wc->status), wc->byte_len,
980 be32_to_cpu(wc->ex.imm_data));
Andy Grover1e23b3e2009-02-24 15:30:34 +0000981
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400982 rds_ib_stats_inc(s_ib_rx_cq_event);
983 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
984 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1,
985 DMA_FROM_DEVICE);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000986
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400987 /* Also process recvs in connecting state because it is possible
988 * to get a recv completion _before_ the rdmacm ESTABLISHED
989 * event is processed.
990 */
991 if (wc->status == IB_WC_SUCCESS) {
992 rds_ib_process_recv(conn, recv, wc->byte_len, state);
993 } else {
994 /* We expect errors as the qp is drained during shutdown */
995 if (rds_conn_up(conn) || rds_conn_connecting(conn))
Sudhakar Dindukurtifab401e2019-10-01 16:33:14 -0700996 rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
Santosh Shilimkarff3f19a2016-03-14 07:43:55 -0700997 &conn->c_laddr, &conn->c_faddr,
Santosh Shilimkarfd261ce2018-10-13 22:13:23 +0800998 conn->c_tos, wc->status,
Sudhakar Dindukurtifab401e2019-10-01 16:33:14 -0700999 ib_wc_status_msg(wc->status),
1000 wc->vendor_err);
Andy Grover1e23b3e2009-02-24 15:30:34 +00001001 }
Andy Groverd521b632009-10-30 08:51:57 +00001002
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -04001003 /* rds_ib_process_recv() doesn't always consume the frag, and
1004 * we might not have called it at all if the wc didn't indicate
1005 * success. We already unmapped the frag's pages, though, and
1006 * the following rds_ib_ring_free() call tells the refill path
1007 * that it will not find an allocated frag here. Make sure we
1008 * keep that promise by freeing a frag that's still on the ring.
1009 */
1010 if (recv->r_frag) {
1011 rds_ib_frag_free(ic, recv->r_frag);
1012 recv->r_frag = NULL;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001013 }
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -04001014 rds_ib_ring_free(&ic->i_recv_ring, 1);
Andy Grover1e23b3e2009-02-24 15:30:34 +00001015
1016 /* If we ever end up with a really empty receive ring, we're
1017 * in deep trouble, as the sender will definitely see RNR
1018 * timeouts. */
1019 if (rds_ib_ring_empty(&ic->i_recv_ring))
1020 rds_ib_stats_inc(s_ib_rx_ring_empty);
1021
Håkon Bugge05bfd7d2017-08-08 11:13:32 +02001022 if (rds_ib_ring_low(&ic->i_recv_ring)) {
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001023 rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
Håkon Bugge05bfd7d2017-08-08 11:13:32 +02001024 rds_ib_stats_inc(s_ib_rx_refill_from_cq);
1025 }
Andy Grover1e23b3e2009-02-24 15:30:34 +00001026}
1027
Sowmini Varadhan2da43c42016-06-30 16:11:15 -07001028int rds_ib_recv_path(struct rds_conn_path *cp)
Andy Grover1e23b3e2009-02-24 15:30:34 +00001029{
Sowmini Varadhan2da43c42016-06-30 16:11:15 -07001030 struct rds_connection *conn = cp->cp_conn;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001031 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001032
1033 rdsdebug("conn %p\n", conn);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001034 if (rds_conn_up(conn)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +00001035 rds_ib_attempt_ack(ic);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001036 rds_ib_recv_refill(conn, 0, GFP_KERNEL);
Håkon Bugge05bfd7d2017-08-08 11:13:32 +02001037 rds_ib_stats_inc(s_ib_rx_refill_from_thread);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001038 }
Andy Grover1e23b3e2009-02-24 15:30:34 +00001039
Håkon Buggefa525312018-07-16 15:06:39 +02001040 return 0;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001041}
1042
Zach Brownef87b7e2010-07-09 12:26:20 -07001043int rds_ib_recv_init(void)
Andy Grover1e23b3e2009-02-24 15:30:34 +00001044{
1045 struct sysinfo si;
1046 int ret = -ENOMEM;
1047
1048 /* Default to 30% of all available RAM for recv memory */
1049 si_meminfo(&si);
1050 rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
1051
Dag Moxnesbf1867d2019-08-23 16:03:18 +02001052 rds_ib_incoming_slab =
1053 kmem_cache_create_usercopy("rds_ib_incoming",
1054 sizeof(struct rds_ib_incoming),
1055 0, SLAB_HWCACHE_ALIGN,
1056 offsetof(struct rds_ib_incoming,
1057 ii_inc.i_usercopy),
1058 sizeof(struct rds_inc_usercopy),
1059 NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -08001060 if (!rds_ib_incoming_slab)
Andy Grover1e23b3e2009-02-24 15:30:34 +00001061 goto out;
1062
1063 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
1064 sizeof(struct rds_page_frag),
Andy Groverc20f5b92010-07-07 16:46:26 -07001065 0, SLAB_HWCACHE_ALIGN, NULL);
santosh.shilimkar@oracle.comba54d3c2015-08-25 12:01:58 -07001066 if (!rds_ib_frag_slab) {
Andy Grover1e23b3e2009-02-24 15:30:34 +00001067 kmem_cache_destroy(rds_ib_incoming_slab);
santosh.shilimkar@oracle.comba54d3c2015-08-25 12:01:58 -07001068 rds_ib_incoming_slab = NULL;
1069 } else
Andy Grover1e23b3e2009-02-24 15:30:34 +00001070 ret = 0;
1071out:
1072 return ret;
1073}
1074
1075void rds_ib_recv_exit(void)
1076{
Zhu Yanjunb50e0582019-06-03 08:48:19 -04001077 WARN_ON(atomic_read(&rds_ib_allocation));
1078
Andy Grover1e23b3e2009-02-24 15:30:34 +00001079 kmem_cache_destroy(rds_ib_incoming_slab);
1080 kmem_cache_destroy(rds_ib_frag_slab);
1081}