blob: 8cc65a7182f39366a2929a4fdbca28177f28009b [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Mike Marciniszyn2e2ba092018-06-04 11:44:02 -07002 * Copyright(c) 2015 - 2018 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/spinlock.h>
49
50#include "hfi.h"
51#include "mad.h"
52#include "qp.h"
Mike Marciniszyn45842ab2016-02-14 12:44:34 -080053#include "verbs_txreq.h"
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -080054#include "trace.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040055
Mike Marciniszyn77241052015-07-30 15:17:43 -040056static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
57{
58 return (gid->global.interface_id == id &&
59 (gid->global.subnet_prefix == gid_prefix ||
60 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
61}
62
63/*
64 *
65 * This should be called with the QP r_lock held.
66 *
67 * The s_lock will be acquired around the hfi1_migrate_qp() call.
68 */
Don Hiatt90397462017-05-12 09:20:20 -070069int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
Mike Marciniszyn77241052015-07-30 15:17:43 -040070{
71 __be64 guid;
72 unsigned long flags;
Don Hiatt90397462017-05-12 09:20:20 -070073 struct rvt_qp *qp = packet->qp;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -040074 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
Don Hiatt90397462017-05-12 09:20:20 -070075 u32 dlid = packet->dlid;
76 u32 slid = packet->slid;
77 u32 sl = packet->sl;
Sebastian Sanchez6d6b8842018-02-01 10:46:23 -080078 bool migrated = packet->migrated;
79 u16 pkey = packet->pkey;
Don Hiatt90397462017-05-12 09:20:20 -070080
81 if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
82 if (!packet->grh) {
Don Hiatt5786adf32017-08-04 13:54:10 -070083 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
84 IB_AH_GRH) &&
85 (packet->etype != RHF_RCV_TYPE_BYPASS))
Don Hiatt90397462017-05-12 09:20:20 -070086 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040087 } else {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -040088 const struct ib_global_route *grh;
89
90 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
91 IB_AH_GRH))
Don Hiatt90397462017-05-12 09:20:20 -070092 return 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -040093 grh = rdma_ah_read_grh(&qp->alt_ah_attr);
94 guid = get_sguid(ibp, grh->sgid_index);
Don Hiatt90397462017-05-12 09:20:20 -070095 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080096 guid))
Don Hiatt90397462017-05-12 09:20:20 -070097 return 1;
Jubin John17fb4f22016-02-14 20:21:52 -080098 if (!gid_ok(
Don Hiatt90397462017-05-12 09:20:20 -070099 &packet->grh->sgid,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400100 grh->dgid.global.subnet_prefix,
101 grh->dgid.global.interface_id))
Don Hiatt90397462017-05-12 09:20:20 -0700102 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400103 }
Don Hiatt5786adf32017-08-04 13:54:10 -0700104 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
Don Hiatt90397462017-05-12 09:20:20 -0700105 sc5, slid))) {
Don Hiatt5786adf32017-08-04 13:54:10 -0700106 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
107 slid, dlid);
Don Hiatt90397462017-05-12 09:20:20 -0700108 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400109 }
110 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
Don Hiatt90397462017-05-12 09:20:20 -0700111 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400112 ppd_from_ibp(ibp)->port !=
113 rdma_ah_get_port_num(&qp->alt_ah_attr))
Don Hiatt90397462017-05-12 09:20:20 -0700114 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400115 spin_lock_irqsave(&qp->s_lock, flags);
116 hfi1_migrate_qp(qp);
117 spin_unlock_irqrestore(&qp->s_lock, flags);
118 } else {
Don Hiatt90397462017-05-12 09:20:20 -0700119 if (!packet->grh) {
Don Hiatt5786adf32017-08-04 13:54:10 -0700120 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
121 IB_AH_GRH) &&
122 (packet->etype != RHF_RCV_TYPE_BYPASS))
Don Hiatt90397462017-05-12 09:20:20 -0700123 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400124 } else {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400125 const struct ib_global_route *grh;
126
127 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
128 IB_AH_GRH))
Don Hiatt90397462017-05-12 09:20:20 -0700129 return 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400130 grh = rdma_ah_read_grh(&qp->remote_ah_attr);
131 guid = get_sguid(ibp, grh->sgid_index);
Don Hiatt90397462017-05-12 09:20:20 -0700132 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800133 guid))
Don Hiatt90397462017-05-12 09:20:20 -0700134 return 1;
Jubin John17fb4f22016-02-14 20:21:52 -0800135 if (!gid_ok(
Don Hiatt90397462017-05-12 09:20:20 -0700136 &packet->grh->sgid,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400137 grh->dgid.global.subnet_prefix,
138 grh->dgid.global.interface_id))
Don Hiatt90397462017-05-12 09:20:20 -0700139 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400140 }
Don Hiatt5786adf32017-08-04 13:54:10 -0700141 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
Don Hiatt90397462017-05-12 09:20:20 -0700142 sc5, slid))) {
Don Hiatt5786adf32017-08-04 13:54:10 -0700143 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
144 slid, dlid);
Don Hiatt90397462017-05-12 09:20:20 -0700145 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400146 }
147 /* Validate the SLID. See Ch. 9.6.1.5 */
Don Hiatt90397462017-05-12 09:20:20 -0700148 if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
Mike Marciniszyn77241052015-07-30 15:17:43 -0400149 ppd_from_ibp(ibp)->port != qp->port_num)
Don Hiatt90397462017-05-12 09:20:20 -0700150 return 1;
151 if (qp->s_mig_state == IB_MIG_REARM && !migrated)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400152 qp->s_mig_state = IB_MIG_ARMED;
153 }
154
155 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400156}
157
158/**
Mike Marciniszyn77241052015-07-30 15:17:43 -0400159 * hfi1_make_grh - construct a GRH header
160 * @ibp: a pointer to the IB port
161 * @hdr: a pointer to the GRH header being constructed
162 * @grh: the global route address to send to
Don Hiatt88733e32017-08-04 13:54:23 -0700163 * @hwords: size of header after grh being sent in dwords
Mike Marciniszyn77241052015-07-30 15:17:43 -0400164 * @nwords: the number of 32 bit words of data being sent
165 *
166 * Return the size of the header in 32 bit words.
167 */
168u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400169 const struct ib_global_route *grh, u32 hwords, u32 nwords)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400170{
171 hdr->version_tclass_flow =
172 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
173 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
174 (grh->flow_label << IB_GRH_FLOW_SHIFT));
Don Hiatt88733e32017-08-04 13:54:23 -0700175 hdr->paylen = cpu_to_be16((hwords + nwords) << 2);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400176 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
177 hdr->next_hdr = IB_GRH_NEXT_HDR;
178 hdr->hop_limit = grh->hop_limit;
179 /* The SGID is 32-bit aligned. */
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800180 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400181 hdr->sgid.global.interface_id =
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -0700182 grh->sgid_index < HFI1_GUIDS_PER_PORT ?
183 get_sguid(ibp, grh->sgid_index) :
184 get_sguid(ibp, HFI1_PORT_GUID_INDEX);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400185 hdr->dgid = grh->dgid;
186
187 /* GRH header size in 32-bit words. */
188 return sizeof(struct ib_grh) / sizeof(u32);
189}
190
Don Hiatt30e07412017-08-04 13:54:04 -0700191#define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
192 hdr.ibh.u.oth.bth[2]) / 4)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400193
194/**
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700195 * build_ahg - create ahg in s_ahg
Mike Marciniszyn77241052015-07-30 15:17:43 -0400196 * @qp: a pointer to QP
197 * @npsn: the next PSN for the request/response
198 *
199 * This routine handles the AHG by allocating an ahg entry and causing the
200 * copy of the first middle.
201 *
202 * Subsequent middles use the copied entry, editing the
203 * PSN with 1 or 2 edits.
204 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800205static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400206{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800207 struct hfi1_qp_priv *priv = qp->priv;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800208
Mike Marciniszyn2e2ba092018-06-04 11:44:02 -0700209 if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400210 clear_ahg(qp);
Mike Marciniszyn2e2ba092018-06-04 11:44:02 -0700211 if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400212 /* first middle that needs copy */
Mike Marciniszynd7b8ba52015-11-09 19:13:59 -0500213 if (qp->s_ahgidx < 0)
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800214 qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400215 if (qp->s_ahgidx >= 0) {
216 qp->s_ahgpsn = npsn;
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700217 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400218 /* save to protect a change in another thread */
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700219 priv->s_ahg->ahgidx = qp->s_ahgidx;
Mike Marciniszyn2e2ba092018-06-04 11:44:02 -0700220 qp->s_flags |= HFI1_S_AHG_VALID;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400221 }
222 } else {
223 /* subsequent middle after valid */
224 if (qp->s_ahgidx >= 0) {
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700225 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
226 priv->s_ahg->ahgidx = qp->s_ahgidx;
227 priv->s_ahg->ahgcount++;
228 priv->s_ahg->ahgdesc[0] =
Mike Marciniszyn77241052015-07-30 15:17:43 -0400229 sdma_build_ahg_descriptor(
230 (__force u16)cpu_to_be16((u16)npsn),
231 BTH2_OFFSET,
232 16,
233 16);
234 if ((npsn & 0xffff0000) !=
235 (qp->s_ahgpsn & 0xffff0000)) {
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700236 priv->s_ahg->ahgcount++;
237 priv->s_ahg->ahgdesc[1] =
Mike Marciniszyn77241052015-07-30 15:17:43 -0400238 sdma_build_ahg_descriptor(
239 (__force u16)cpu_to_be16(
240 (u16)(npsn >> 16)),
241 BTH2_OFFSET,
242 0,
243 16);
244 }
245 }
246 }
247}
248
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700249static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
250 struct ib_other_headers *ohdr,
251 u32 bth0, u32 bth1, u32 bth2)
252{
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700253 ohdr->bth[0] = cpu_to_be32(bth0);
254 ohdr->bth[1] = cpu_to_be32(bth1);
255 ohdr->bth[2] = cpu_to_be32(bth2);
256}
257
Mike Marciniszyn0a0bcb02018-05-01 05:35:51 -0700258/**
259 * hfi1_make_ruc_header_16B - build a 16B header
260 * @qp: the queue pair
261 * @ohdr: a pointer to the destination header memory
262 * @bth0: bth0 passed in from the RC/UC builder
Lee Jones0654a742021-01-26 12:47:25 +0000263 * @bth1: bth1 passed in from the RC/UC builder
Mike Marciniszyn0a0bcb02018-05-01 05:35:51 -0700264 * @bth2: bth2 passed in from the RC/UC builder
265 * @middle: non zero implies indicates ahg "could" be used
266 * @ps: the current packet state
267 *
268 * This routine may disarm ahg under these situations:
269 * - packet needs a GRH
270 * - BECN needed
271 * - migration state not IB_MIG_MIGRATED
272 */
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700273static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
274 struct ib_other_headers *ohdr,
Mitko Haralanov44e43d92019-01-24 06:09:46 -0800275 u32 bth0, u32 bth1, u32 bth2,
276 int middle,
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700277 struct hfi1_pkt_state *ps)
278{
279 struct hfi1_qp_priv *priv = qp->priv;
280 struct hfi1_ibport *ibp = ps->ibp;
281 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700282 u32 slid;
283 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
284 u8 l4 = OPA_16B_L4_IB_LOCAL;
Mitko Haralanov96362582018-02-01 10:46:07 -0800285 u8 extra_bytes = hfi1_get_16b_padding(
286 (ps->s_txreq->hdr_dwords << 2),
287 ps->s_txreq->s_cur_size);
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700288 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
289 extra_bytes + SIZE_OF_LT) >> 2);
Sebastian Sanchezca85bb12018-02-01 10:46:38 -0800290 bool becn = false;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700291
292 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
293 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
294 struct ib_grh *grh;
295 struct ib_global_route *grd =
296 rdma_ah_retrieve_grh(&qp->remote_ah_attr);
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700297 /*
298 * Ensure OPA GIDs are transformed to IB gids
299 * before creating the GRH.
300 */
301 if (grd->sgid_index == OPA_GID_INDEX)
302 grd->sgid_index = 0;
303 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
304 l4 = OPA_16B_L4_IB_GLOBAL;
Mike Marciniszyn78d36332018-02-01 10:52:35 -0800305 ps->s_txreq->hdr_dwords +=
306 hfi1_make_grh(ibp, grh, grd,
307 ps->s_txreq->hdr_dwords - LRH_16B_DWORDS,
308 nwords);
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700309 middle = 0;
310 }
311
312 if (qp->s_mig_state == IB_MIG_MIGRATED)
313 bth1 |= OPA_BTH_MIG_REQ;
314 else
315 middle = 0;
316
Mike Marciniszyn0a0bcb02018-05-01 05:35:51 -0700317 if (qp->s_flags & RVT_S_ECN) {
318 qp->s_flags &= ~RVT_S_ECN;
319 /* we recently received a FECN, so return a BECN */
320 becn = true;
321 middle = 0;
322 }
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700323 if (middle)
324 build_ahg(qp, bth2);
325 else
Mike Marciniszyn2e2ba092018-06-04 11:44:02 -0700326 qp->s_flags &= ~HFI1_S_AHG_VALID;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700327
328 bth0 |= pkey;
329 bth0 |= extra_bytes << 20;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700330 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
331
332 if (!ppd->lid)
333 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
334 else
335 slid = ppd->lid |
336 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
337 ((1 << ppd->lmc) - 1));
338
339 hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
340 slid,
341 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
342 16B),
Mitko Haralanov96362582018-02-01 10:46:07 -0800343 (ps->s_txreq->hdr_dwords + nwords) >> 1,
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700344 pkey, becn, 0, l4, priv->s_sc);
345}
346
Mike Marciniszyn0a0bcb02018-05-01 05:35:51 -0700347/**
348 * hfi1_make_ruc_header_9B - build a 9B header
349 * @qp: the queue pair
350 * @ohdr: a pointer to the destination header memory
351 * @bth0: bth0 passed in from the RC/UC builder
Lee Jones0654a742021-01-26 12:47:25 +0000352 * @bth1: bth1 passed in from the RC/UC builder
Mike Marciniszyn0a0bcb02018-05-01 05:35:51 -0700353 * @bth2: bth2 passed in from the RC/UC builder
354 * @middle: non zero implies indicates ahg "could" be used
355 * @ps: the current packet state
356 *
357 * This routine may disarm ahg under these situations:
358 * - packet needs a GRH
359 * - BECN needed
360 * - migration state not IB_MIG_MIGRATED
361 */
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700362static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
363 struct ib_other_headers *ohdr,
Mitko Haralanov44e43d92019-01-24 06:09:46 -0800364 u32 bth0, u32 bth1, u32 bth2,
365 int middle,
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700366 struct hfi1_pkt_state *ps)
367{
368 struct hfi1_qp_priv *priv = qp->priv;
369 struct hfi1_ibport *ibp = ps->ibp;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700370 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
371 u16 lrh0 = HFI1_LRH_BTH;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700372 u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
373 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
374 extra_bytes) >> 2);
375
376 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
377 struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700378
379 lrh0 = HFI1_LRH_GRH;
Mitko Haralanov96362582018-02-01 10:46:07 -0800380 ps->s_txreq->hdr_dwords +=
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700381 hfi1_make_grh(ibp, grh,
382 rdma_ah_read_grh(&qp->remote_ah_attr),
Mike Marciniszyn78d36332018-02-01 10:52:35 -0800383 ps->s_txreq->hdr_dwords - LRH_9B_DWORDS,
384 nwords);
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700385 middle = 0;
386 }
387 lrh0 |= (priv->s_sc & 0xf) << 12 |
388 (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
389
390 if (qp->s_mig_state == IB_MIG_MIGRATED)
391 bth0 |= IB_BTH_MIG_REQ;
392 else
393 middle = 0;
394
Mike Marciniszyn0a0bcb02018-05-01 05:35:51 -0700395 if (qp->s_flags & RVT_S_ECN) {
396 qp->s_flags &= ~RVT_S_ECN;
397 /* we recently received a FECN, so return a BECN */
398 bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
399 middle = 0;
400 }
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700401 if (middle)
402 build_ahg(qp, bth2);
403 else
Mike Marciniszyn2e2ba092018-06-04 11:44:02 -0700404 qp->s_flags &= ~HFI1_S_AHG_VALID;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700405
406 bth0 |= pkey;
407 bth0 |= extra_bytes << 20;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700408 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700409 hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
410 lrh0,
Mitko Haralanov96362582018-02-01 10:46:07 -0800411 ps->s_txreq->hdr_dwords + nwords,
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700412 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
413 ppd_from_ibp(ibp)->lid |
414 rdma_ah_get_path_bits(&qp->remote_ah_attr));
415}
416
417typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
418 struct ib_other_headers *ohdr,
Mitko Haralanov44e43d92019-01-24 06:09:46 -0800419 u32 bth0, u32 bth1, u32 bth2, int middle,
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700420 struct hfi1_pkt_state *ps);
421
422/* We support only two types - 9B and 16B for now */
423static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
424 [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
425 [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
426};
427
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700428void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
Mitko Haralanov44e43d92019-01-24 06:09:46 -0800429 u32 bth0, u32 bth1, u32 bth2, int middle,
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800430 struct hfi1_pkt_state *ps)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400431{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800432 struct hfi1_qp_priv *priv = qp->priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400433
Mike Marciniszyn77241052015-07-30 15:17:43 -0400434 /*
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700435 * reset s_ahg/AHG fields
Mike Marciniszyn77241052015-07-30 15:17:43 -0400436 *
437 * This insures that the ahgentry/ahgcount
438 * are at a non-AHG default to protect
439 * build_verbs_tx_desc() from using
440 * an include ahgidx.
441 *
442 * build_ahg() will modify as appropriate
443 * to use the AHG feature.
444 */
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700445 priv->s_ahg->tx_flags = 0;
446 priv->s_ahg->ahgcount = 0;
447 priv->s_ahg->ahgidx = 0;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700448
449 /* Make the appropriate header */
Mitko Haralanov44e43d92019-01-24 06:09:46 -0800450 hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth1, bth2, middle,
451 ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400452}
453
Dean Luickb4219222015-10-26 10:28:35 -0400454/* when sending, force a reschedule every one of these periods */
455#define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
456
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700457/**
Kaike Wan572f0c32019-01-23 21:51:27 -0800458 * hfi1_schedule_send_yield - test for a yield required for QP
459 * send engine
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700460 * @qp: a pointer to QP
461 * @ps: a pointer to a structure with commonly lookup values for
Cai Huoqing991c4272021-07-29 16:23:46 +0800462 * the send engine progress
Lee Jones0654a742021-01-26 12:47:25 +0000463 * @tid: true if it is the tid leg
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700464 *
465 * This routine checks if the time slice for the QP has expired
466 * for RC QPs, if so an additional work entry is queued. At this
467 * point, other QPs have an opportunity to be scheduled. It
468 * returns true if a yield is required, otherwise, false
469 * is returned.
470 */
Kaike Wan572f0c32019-01-23 21:51:27 -0800471bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
472 bool tid)
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700473{
Kaike Wanbcad2912017-07-24 07:45:37 -0700474 ps->pkts_sent = true;
475
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700476 if (unlikely(time_after(jiffies, ps->timeout))) {
477 if (!ps->in_thread ||
478 workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
479 spin_lock_irqsave(&qp->s_lock, ps->flags);
Kaike Wan572f0c32019-01-23 21:51:27 -0800480 if (!tid) {
481 qp->s_flags &= ~RVT_S_BUSY;
482 hfi1_schedule_send(qp);
483 } else {
484 struct hfi1_qp_priv *priv = qp->priv;
485
486 if (priv->s_flags &
487 HFI1_S_TID_BUSY_SET) {
488 qp->s_flags &= ~RVT_S_BUSY;
489 priv->s_flags &=
490 ~(HFI1_S_TID_BUSY_SET |
491 RVT_S_BUSY);
492 } else {
493 priv->s_flags &= ~RVT_S_BUSY;
494 }
495 hfi1_schedule_tid_send(qp);
496 }
497
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700498 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
499 this_cpu_inc(*ps->ppd->dd->send_schedule);
500 trace_hfi1_rc_expired_time_slice(qp, true);
501 return true;
502 }
503
504 cond_resched();
505 this_cpu_inc(*ps->ppd->dd->send_schedule);
506 ps->timeout = jiffies + ps->timeout_int;
507 }
508
509 trace_hfi1_rc_expired_time_slice(qp, false);
510 return false;
511}
512
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700513void hfi1_do_send_from_rvt(struct rvt_qp *qp)
514{
515 hfi1_do_send(qp, false);
516}
517
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800518void _hfi1_do_send(struct work_struct *work)
519{
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700520 struct iowait_work *w = container_of(work, struct iowait_work, iowork);
521 struct rvt_qp *qp = iowait_to_qp(w->iow);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800522
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700523 hfi1_do_send(qp, true);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800524}
525
Mike Marciniszyn77241052015-07-30 15:17:43 -0400526/**
527 * hfi1_do_send - perform a send on a QP
Kaike Wanea752bc2019-04-11 07:15:49 -0700528 * @qp: a pointer to the QP
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700529 * @in_thread: true if in a workqueue thread
Mike Marciniszyn77241052015-07-30 15:17:43 -0400530 *
531 * Process entries in the send work queue until credit or queue is
Dennis Dalessandroca00c622016-09-25 07:42:08 -0700532 * exhausted. Only allow one CPU to send a packet per QP.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400533 * Otherwise, two threads could send packets out of order.
534 */
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700535void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400536{
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500537 struct hfi1_pkt_state ps;
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800538 struct hfi1_qp_priv *priv = qp->priv;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800539 int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400540
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500541 ps.dev = to_idev(qp->ibqp.device);
542 ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
543 ps.ppd = ppd_from_ibp(ps.ibp);
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700544 ps.in_thread = in_thread;
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700545 ps.wait = iowait_get_ib_work(&priv->s_iowait);
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700546
547 trace_hfi1_rc_do_send(qp, in_thread);
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500548
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800549 switch (qp->ibqp.qp_type) {
550 case IB_QPT_RC:
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400551 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
552 ~((1 << ps.ppd->lmc) - 1)) ==
553 ps.ppd->lid)) {
Venkata Sandeep Dhanalakota15703462018-09-26 10:44:52 -0700554 rvt_ruc_loopback(qp);
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800555 return;
556 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400557 make_req = hfi1_make_rc_req;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700558 ps.timeout_int = qp->timeout_jiffies;
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800559 break;
560 case IB_QPT_UC:
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400561 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
562 ~((1 << ps.ppd->lmc) - 1)) ==
563 ps.ppd->lid)) {
Venkata Sandeep Dhanalakota15703462018-09-26 10:44:52 -0700564 rvt_ruc_loopback(qp);
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800565 return;
566 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400567 make_req = hfi1_make_uc_req;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700568 ps.timeout_int = SEND_RESCHED_TIMEOUT;
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800569 break;
570 default:
Mike Marciniszyn77241052015-07-30 15:17:43 -0400571 make_req = hfi1_make_ud_req;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700572 ps.timeout_int = SEND_RESCHED_TIMEOUT;
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800573 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400574
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700575 spin_lock_irqsave(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400576
577 /* Return if we are already busy processing a work request. */
578 if (!hfi1_send_ok(qp)) {
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700579 if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
580 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700581 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400582 return;
583 }
584
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800585 qp->s_flags |= RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400586
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700587 ps.timeout_int = ps.timeout_int / 8;
588 ps.timeout = jiffies + ps.timeout_int;
589 ps.cpu = priv->s_sde ? priv->s_sde->cpu :
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800590 cpumask_first(cpumask_of_node(ps.ppd->dd->node));
Kaike Wanbcad2912017-07-24 07:45:37 -0700591 ps.pkts_sent = false;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700592
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800593 /* insure a pre-built packet is handled */
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700594 ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400595 do {
596 /* Check for a constructed packet to be sent. */
Mitko Haralanov96362582018-02-01 10:46:07 -0800597 if (ps.s_txreq) {
Kaike Wan572f0c32019-01-23 21:51:27 -0800598 if (priv->s_flags & HFI1_S_TID_BUSY_SET)
599 qp->s_flags |= RVT_S_BUSY;
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700600 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400601 /*
602 * If the packet cannot be sent now, return and
Dennis Dalessandroca00c622016-09-25 07:42:08 -0700603 * the send engine will be woken up later.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400604 */
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500605 if (hfi1_verbs_send(qp, &ps))
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800606 return;
Dennis Dalessandro5da0fc92018-09-28 07:17:09 -0700607
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800608 /* allow other tasks to run */
Kaike Wan572f0c32019-01-23 21:51:27 -0800609 if (hfi1_schedule_send_yield(qp, &ps, false))
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700610 return;
611
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700612 spin_lock_irqsave(&qp->s_lock, ps.flags);
Dean Luickb4219222015-10-26 10:28:35 -0400613 }
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800614 } while (make_req(qp, &ps));
Kaike Wanbcad2912017-07-24 07:45:37 -0700615 iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700616 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400617}