Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1 | /* |
Mike Marciniszyn | 2e2ba09 | 2018-06-04 11:44:02 -0700 | [diff] [blame] | 2 | * Copyright(c) 2015 - 2018 Intel Corporation. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 3 | * |
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 5 | * redistributing this file, you may do so under either license. |
| 6 | * |
| 7 | * GPL LICENSE SUMMARY |
| 8 | * |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of version 2 of the GNU General Public License as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * General Public License for more details. |
| 17 | * |
| 18 | * BSD LICENSE |
| 19 | * |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 20 | * Redistribution and use in source and binary forms, with or without |
| 21 | * modification, are permitted provided that the following conditions |
| 22 | * are met: |
| 23 | * |
| 24 | * - Redistributions of source code must retain the above copyright |
| 25 | * notice, this list of conditions and the following disclaimer. |
| 26 | * - Redistributions in binary form must reproduce the above copyright |
| 27 | * notice, this list of conditions and the following disclaimer in |
| 28 | * the documentation and/or other materials provided with the |
| 29 | * distribution. |
| 30 | * - Neither the name of Intel Corporation nor the names of its |
| 31 | * contributors may be used to endorse or promote products derived |
| 32 | * from this software without specific prior written permission. |
| 33 | * |
| 34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 45 | * |
| 46 | */ |
| 47 | |
| 48 | #include <linux/spinlock.h> |
| 49 | |
| 50 | #include "hfi.h" |
| 51 | #include "mad.h" |
| 52 | #include "qp.h" |
Mike Marciniszyn | 45842ab | 2016-02-14 12:44:34 -0800 | [diff] [blame] | 53 | #include "verbs_txreq.h" |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 54 | #include "trace.h" |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 55 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 56 | static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) |
| 57 | { |
| 58 | return (gid->global.interface_id == id && |
| 59 | (gid->global.subnet_prefix == gid_prefix || |
| 60 | gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX)); |
| 61 | } |
| 62 | |
| 63 | /* |
| 64 | * |
| 65 | * This should be called with the QP r_lock held. |
| 66 | * |
| 67 | * The s_lock will be acquired around the hfi1_migrate_qp() call. |
| 68 | */ |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 69 | int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 70 | { |
| 71 | __be64 guid; |
| 72 | unsigned long flags; |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 73 | struct rvt_qp *qp = packet->qp; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 74 | u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)]; |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 75 | u32 dlid = packet->dlid; |
| 76 | u32 slid = packet->slid; |
| 77 | u32 sl = packet->sl; |
Sebastian Sanchez | 6d6b884 | 2018-02-01 10:46:23 -0800 | [diff] [blame] | 78 | bool migrated = packet->migrated; |
| 79 | u16 pkey = packet->pkey; |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 80 | |
| 81 | if (qp->s_mig_state == IB_MIG_ARMED && migrated) { |
| 82 | if (!packet->grh) { |
Don Hiatt | 5786adf3 | 2017-08-04 13:54:10 -0700 | [diff] [blame] | 83 | if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) & |
| 84 | IB_AH_GRH) && |
| 85 | (packet->etype != RHF_RCV_TYPE_BYPASS)) |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 86 | return 1; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 87 | } else { |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 88 | const struct ib_global_route *grh; |
| 89 | |
| 90 | if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) & |
| 91 | IB_AH_GRH)) |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 92 | return 1; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 93 | grh = rdma_ah_read_grh(&qp->alt_ah_attr); |
| 94 | guid = get_sguid(ibp, grh->sgid_index); |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 95 | if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix, |
Dennis Dalessandro | 4eb0688 | 2016-01-19 14:42:39 -0800 | [diff] [blame] | 96 | guid)) |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 97 | return 1; |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 98 | if (!gid_ok( |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 99 | &packet->grh->sgid, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 100 | grh->dgid.global.subnet_prefix, |
| 101 | grh->dgid.global.interface_id)) |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 102 | return 1; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 103 | } |
Don Hiatt | 5786adf3 | 2017-08-04 13:54:10 -0700 | [diff] [blame] | 104 | if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey, |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 105 | sc5, slid))) { |
Don Hiatt | 5786adf3 | 2017-08-04 13:54:10 -0700 | [diff] [blame] | 106 | hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num, |
| 107 | slid, dlid); |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 108 | return 1; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 109 | } |
| 110 | /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 111 | if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) || |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 112 | ppd_from_ibp(ibp)->port != |
| 113 | rdma_ah_get_port_num(&qp->alt_ah_attr)) |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 114 | return 1; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 115 | spin_lock_irqsave(&qp->s_lock, flags); |
| 116 | hfi1_migrate_qp(qp); |
| 117 | spin_unlock_irqrestore(&qp->s_lock, flags); |
| 118 | } else { |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 119 | if (!packet->grh) { |
Don Hiatt | 5786adf3 | 2017-08-04 13:54:10 -0700 | [diff] [blame] | 120 | if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & |
| 121 | IB_AH_GRH) && |
| 122 | (packet->etype != RHF_RCV_TYPE_BYPASS)) |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 123 | return 1; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 124 | } else { |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 125 | const struct ib_global_route *grh; |
| 126 | |
| 127 | if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & |
| 128 | IB_AH_GRH)) |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 129 | return 1; |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 130 | grh = rdma_ah_read_grh(&qp->remote_ah_attr); |
| 131 | guid = get_sguid(ibp, grh->sgid_index); |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 132 | if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix, |
Dennis Dalessandro | 4eb0688 | 2016-01-19 14:42:39 -0800 | [diff] [blame] | 133 | guid)) |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 134 | return 1; |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 135 | if (!gid_ok( |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 136 | &packet->grh->sgid, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 137 | grh->dgid.global.subnet_prefix, |
| 138 | grh->dgid.global.interface_id)) |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 139 | return 1; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 140 | } |
Don Hiatt | 5786adf3 | 2017-08-04 13:54:10 -0700 | [diff] [blame] | 141 | if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey, |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 142 | sc5, slid))) { |
Don Hiatt | 5786adf3 | 2017-08-04 13:54:10 -0700 | [diff] [blame] | 143 | hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num, |
| 144 | slid, dlid); |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 145 | return 1; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 146 | } |
| 147 | /* Validate the SLID. See Ch. 9.6.1.5 */ |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 148 | if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) || |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 149 | ppd_from_ibp(ibp)->port != qp->port_num) |
Don Hiatt | 9039746 | 2017-05-12 09:20:20 -0700 | [diff] [blame] | 150 | return 1; |
| 151 | if (qp->s_mig_state == IB_MIG_REARM && !migrated) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 152 | qp->s_mig_state = IB_MIG_ARMED; |
| 153 | } |
| 154 | |
| 155 | return 0; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | /** |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 159 | * hfi1_make_grh - construct a GRH header |
| 160 | * @ibp: a pointer to the IB port |
| 161 | * @hdr: a pointer to the GRH header being constructed |
| 162 | * @grh: the global route address to send to |
Don Hiatt | 88733e3 | 2017-08-04 13:54:23 -0700 | [diff] [blame] | 163 | * @hwords: size of header after grh being sent in dwords |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 164 | * @nwords: the number of 32 bit words of data being sent |
| 165 | * |
| 166 | * Return the size of the header in 32 bit words. |
| 167 | */ |
| 168 | u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 169 | const struct ib_global_route *grh, u32 hwords, u32 nwords) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 170 | { |
| 171 | hdr->version_tclass_flow = |
| 172 | cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) | |
| 173 | (grh->traffic_class << IB_GRH_TCLASS_SHIFT) | |
| 174 | (grh->flow_label << IB_GRH_FLOW_SHIFT)); |
Don Hiatt | 88733e3 | 2017-08-04 13:54:23 -0700 | [diff] [blame] | 175 | hdr->paylen = cpu_to_be16((hwords + nwords) << 2); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 176 | /* next_hdr is defined by C8-7 in ch. 8.4.1 */ |
| 177 | hdr->next_hdr = IB_GRH_NEXT_HDR; |
| 178 | hdr->hop_limit = grh->hop_limit; |
| 179 | /* The SGID is 32-bit aligned. */ |
Dennis Dalessandro | 4eb0688 | 2016-01-19 14:42:39 -0800 | [diff] [blame] | 180 | hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 181 | hdr->sgid.global.interface_id = |
Jakub Pawlak | a6cd5f0 | 2016-10-17 04:19:30 -0700 | [diff] [blame] | 182 | grh->sgid_index < HFI1_GUIDS_PER_PORT ? |
| 183 | get_sguid(ibp, grh->sgid_index) : |
| 184 | get_sguid(ibp, HFI1_PORT_GUID_INDEX); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 185 | hdr->dgid = grh->dgid; |
| 186 | |
| 187 | /* GRH header size in 32-bit words. */ |
| 188 | return sizeof(struct ib_grh) / sizeof(u32); |
| 189 | } |
| 190 | |
Don Hiatt | 30e0741 | 2017-08-04 13:54:04 -0700 | [diff] [blame] | 191 | #define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \ |
| 192 | hdr.ibh.u.oth.bth[2]) / 4) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 193 | |
| 194 | /** |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 195 | * build_ahg - create ahg in s_ahg |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 196 | * @qp: a pointer to QP |
| 197 | * @npsn: the next PSN for the request/response |
| 198 | * |
| 199 | * This routine handles the AHG by allocating an ahg entry and causing the |
| 200 | * copy of the first middle. |
| 201 | * |
| 202 | * Subsequent middles use the copied entry, editing the |
| 203 | * PSN with 1 or 2 edits. |
| 204 | */ |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 205 | static inline void build_ahg(struct rvt_qp *qp, u32 npsn) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 206 | { |
Dennis Dalessandro | 4c6829c | 2016-01-19 14:42:00 -0800 | [diff] [blame] | 207 | struct hfi1_qp_priv *priv = qp->priv; |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 208 | |
Mike Marciniszyn | 2e2ba09 | 2018-06-04 11:44:02 -0700 | [diff] [blame] | 209 | if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 210 | clear_ahg(qp); |
Mike Marciniszyn | 2e2ba09 | 2018-06-04 11:44:02 -0700 | [diff] [blame] | 211 | if (!(qp->s_flags & HFI1_S_AHG_VALID)) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 212 | /* first middle that needs copy */ |
Mike Marciniszyn | d7b8ba5 | 2015-11-09 19:13:59 -0500 | [diff] [blame] | 213 | if (qp->s_ahgidx < 0) |
Dennis Dalessandro | 4c6829c | 2016-01-19 14:42:00 -0800 | [diff] [blame] | 214 | qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 215 | if (qp->s_ahgidx >= 0) { |
| 216 | qp->s_ahgpsn = npsn; |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 217 | priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 218 | /* save to protect a change in another thread */ |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 219 | priv->s_ahg->ahgidx = qp->s_ahgidx; |
Mike Marciniszyn | 2e2ba09 | 2018-06-04 11:44:02 -0700 | [diff] [blame] | 220 | qp->s_flags |= HFI1_S_AHG_VALID; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 221 | } |
| 222 | } else { |
| 223 | /* subsequent middle after valid */ |
| 224 | if (qp->s_ahgidx >= 0) { |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 225 | priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG; |
| 226 | priv->s_ahg->ahgidx = qp->s_ahgidx; |
| 227 | priv->s_ahg->ahgcount++; |
| 228 | priv->s_ahg->ahgdesc[0] = |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 229 | sdma_build_ahg_descriptor( |
| 230 | (__force u16)cpu_to_be16((u16)npsn), |
| 231 | BTH2_OFFSET, |
| 232 | 16, |
| 233 | 16); |
| 234 | if ((npsn & 0xffff0000) != |
| 235 | (qp->s_ahgpsn & 0xffff0000)) { |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 236 | priv->s_ahg->ahgcount++; |
| 237 | priv->s_ahg->ahgdesc[1] = |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 238 | sdma_build_ahg_descriptor( |
| 239 | (__force u16)cpu_to_be16( |
| 240 | (u16)(npsn >> 16)), |
| 241 | BTH2_OFFSET, |
| 242 | 0, |
| 243 | 16); |
| 244 | } |
| 245 | } |
| 246 | } |
| 247 | } |
| 248 | |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 249 | static inline void hfi1_make_ruc_bth(struct rvt_qp *qp, |
| 250 | struct ib_other_headers *ohdr, |
| 251 | u32 bth0, u32 bth1, u32 bth2) |
| 252 | { |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 253 | ohdr->bth[0] = cpu_to_be32(bth0); |
| 254 | ohdr->bth[1] = cpu_to_be32(bth1); |
| 255 | ohdr->bth[2] = cpu_to_be32(bth2); |
| 256 | } |
| 257 | |
Mike Marciniszyn | 0a0bcb0 | 2018-05-01 05:35:51 -0700 | [diff] [blame] | 258 | /** |
| 259 | * hfi1_make_ruc_header_16B - build a 16B header |
| 260 | * @qp: the queue pair |
| 261 | * @ohdr: a pointer to the destination header memory |
| 262 | * @bth0: bth0 passed in from the RC/UC builder |
Lee Jones | 0654a74 | 2021-01-26 12:47:25 +0000 | [diff] [blame] | 263 | * @bth1: bth1 passed in from the RC/UC builder |
Mike Marciniszyn | 0a0bcb0 | 2018-05-01 05:35:51 -0700 | [diff] [blame] | 264 | * @bth2: bth2 passed in from the RC/UC builder |
| 265 | * @middle: non zero implies indicates ahg "could" be used |
| 266 | * @ps: the current packet state |
| 267 | * |
| 268 | * This routine may disarm ahg under these situations: |
| 269 | * - packet needs a GRH |
| 270 | * - BECN needed |
| 271 | * - migration state not IB_MIG_MIGRATED |
| 272 | */ |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 273 | static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, |
| 274 | struct ib_other_headers *ohdr, |
Mitko Haralanov | 44e43d9 | 2019-01-24 06:09:46 -0800 | [diff] [blame] | 275 | u32 bth0, u32 bth1, u32 bth2, |
| 276 | int middle, |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 277 | struct hfi1_pkt_state *ps) |
| 278 | { |
| 279 | struct hfi1_qp_priv *priv = qp->priv; |
| 280 | struct hfi1_ibport *ibp = ps->ibp; |
| 281 | struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 282 | u32 slid; |
| 283 | u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); |
| 284 | u8 l4 = OPA_16B_L4_IB_LOCAL; |
Mitko Haralanov | 9636258 | 2018-02-01 10:46:07 -0800 | [diff] [blame] | 285 | u8 extra_bytes = hfi1_get_16b_padding( |
| 286 | (ps->s_txreq->hdr_dwords << 2), |
| 287 | ps->s_txreq->s_cur_size); |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 288 | u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size + |
| 289 | extra_bytes + SIZE_OF_LT) >> 2); |
Sebastian Sanchez | ca85bb1 | 2018-02-01 10:46:38 -0800 | [diff] [blame] | 290 | bool becn = false; |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 291 | |
| 292 | if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) && |
| 293 | hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) { |
| 294 | struct ib_grh *grh; |
| 295 | struct ib_global_route *grd = |
| 296 | rdma_ah_retrieve_grh(&qp->remote_ah_attr); |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 297 | /* |
| 298 | * Ensure OPA GIDs are transformed to IB gids |
| 299 | * before creating the GRH. |
| 300 | */ |
| 301 | if (grd->sgid_index == OPA_GID_INDEX) |
| 302 | grd->sgid_index = 0; |
| 303 | grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh; |
| 304 | l4 = OPA_16B_L4_IB_GLOBAL; |
Mike Marciniszyn | 78d3633 | 2018-02-01 10:52:35 -0800 | [diff] [blame] | 305 | ps->s_txreq->hdr_dwords += |
| 306 | hfi1_make_grh(ibp, grh, grd, |
| 307 | ps->s_txreq->hdr_dwords - LRH_16B_DWORDS, |
| 308 | nwords); |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 309 | middle = 0; |
| 310 | } |
| 311 | |
| 312 | if (qp->s_mig_state == IB_MIG_MIGRATED) |
| 313 | bth1 |= OPA_BTH_MIG_REQ; |
| 314 | else |
| 315 | middle = 0; |
| 316 | |
Mike Marciniszyn | 0a0bcb0 | 2018-05-01 05:35:51 -0700 | [diff] [blame] | 317 | if (qp->s_flags & RVT_S_ECN) { |
| 318 | qp->s_flags &= ~RVT_S_ECN; |
| 319 | /* we recently received a FECN, so return a BECN */ |
| 320 | becn = true; |
| 321 | middle = 0; |
| 322 | } |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 323 | if (middle) |
| 324 | build_ahg(qp, bth2); |
| 325 | else |
Mike Marciniszyn | 2e2ba09 | 2018-06-04 11:44:02 -0700 | [diff] [blame] | 326 | qp->s_flags &= ~HFI1_S_AHG_VALID; |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 327 | |
| 328 | bth0 |= pkey; |
| 329 | bth0 |= extra_bytes << 20; |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 330 | hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); |
| 331 | |
| 332 | if (!ppd->lid) |
| 333 | slid = be32_to_cpu(OPA_LID_PERMISSIVE); |
| 334 | else |
| 335 | slid = ppd->lid | |
| 336 | (rdma_ah_get_path_bits(&qp->remote_ah_attr) & |
| 337 | ((1 << ppd->lmc) - 1)); |
| 338 | |
| 339 | hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah, |
| 340 | slid, |
| 341 | opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), |
| 342 | 16B), |
Mitko Haralanov | 9636258 | 2018-02-01 10:46:07 -0800 | [diff] [blame] | 343 | (ps->s_txreq->hdr_dwords + nwords) >> 1, |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 344 | pkey, becn, 0, l4, priv->s_sc); |
| 345 | } |
| 346 | |
Mike Marciniszyn | 0a0bcb0 | 2018-05-01 05:35:51 -0700 | [diff] [blame] | 347 | /** |
| 348 | * hfi1_make_ruc_header_9B - build a 9B header |
| 349 | * @qp: the queue pair |
| 350 | * @ohdr: a pointer to the destination header memory |
| 351 | * @bth0: bth0 passed in from the RC/UC builder |
Lee Jones | 0654a74 | 2021-01-26 12:47:25 +0000 | [diff] [blame] | 352 | * @bth1: bth1 passed in from the RC/UC builder |
Mike Marciniszyn | 0a0bcb0 | 2018-05-01 05:35:51 -0700 | [diff] [blame] | 353 | * @bth2: bth2 passed in from the RC/UC builder |
| 354 | * @middle: non zero implies indicates ahg "could" be used |
| 355 | * @ps: the current packet state |
| 356 | * |
| 357 | * This routine may disarm ahg under these situations: |
| 358 | * - packet needs a GRH |
| 359 | * - BECN needed |
| 360 | * - migration state not IB_MIG_MIGRATED |
| 361 | */ |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 362 | static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, |
| 363 | struct ib_other_headers *ohdr, |
Mitko Haralanov | 44e43d9 | 2019-01-24 06:09:46 -0800 | [diff] [blame] | 364 | u32 bth0, u32 bth1, u32 bth2, |
| 365 | int middle, |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 366 | struct hfi1_pkt_state *ps) |
| 367 | { |
| 368 | struct hfi1_qp_priv *priv = qp->priv; |
| 369 | struct hfi1_ibport *ibp = ps->ibp; |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 370 | u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index); |
| 371 | u16 lrh0 = HFI1_LRH_BTH; |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 372 | u8 extra_bytes = -ps->s_txreq->s_cur_size & 3; |
| 373 | u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size + |
| 374 | extra_bytes) >> 2); |
| 375 | |
| 376 | if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) { |
| 377 | struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh; |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 378 | |
| 379 | lrh0 = HFI1_LRH_GRH; |
Mitko Haralanov | 9636258 | 2018-02-01 10:46:07 -0800 | [diff] [blame] | 380 | ps->s_txreq->hdr_dwords += |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 381 | hfi1_make_grh(ibp, grh, |
| 382 | rdma_ah_read_grh(&qp->remote_ah_attr), |
Mike Marciniszyn | 78d3633 | 2018-02-01 10:52:35 -0800 | [diff] [blame] | 383 | ps->s_txreq->hdr_dwords - LRH_9B_DWORDS, |
| 384 | nwords); |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 385 | middle = 0; |
| 386 | } |
| 387 | lrh0 |= (priv->s_sc & 0xf) << 12 | |
| 388 | (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4; |
| 389 | |
| 390 | if (qp->s_mig_state == IB_MIG_MIGRATED) |
| 391 | bth0 |= IB_BTH_MIG_REQ; |
| 392 | else |
| 393 | middle = 0; |
| 394 | |
Mike Marciniszyn | 0a0bcb0 | 2018-05-01 05:35:51 -0700 | [diff] [blame] | 395 | if (qp->s_flags & RVT_S_ECN) { |
| 396 | qp->s_flags &= ~RVT_S_ECN; |
| 397 | /* we recently received a FECN, so return a BECN */ |
| 398 | bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT); |
| 399 | middle = 0; |
| 400 | } |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 401 | if (middle) |
| 402 | build_ahg(qp, bth2); |
| 403 | else |
Mike Marciniszyn | 2e2ba09 | 2018-06-04 11:44:02 -0700 | [diff] [blame] | 404 | qp->s_flags &= ~HFI1_S_AHG_VALID; |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 405 | |
| 406 | bth0 |= pkey; |
| 407 | bth0 |= extra_bytes << 20; |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 408 | hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 409 | hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh, |
| 410 | lrh0, |
Mitko Haralanov | 9636258 | 2018-02-01 10:46:07 -0800 | [diff] [blame] | 411 | ps->s_txreq->hdr_dwords + nwords, |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 412 | opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B), |
| 413 | ppd_from_ibp(ibp)->lid | |
| 414 | rdma_ah_get_path_bits(&qp->remote_ah_attr)); |
| 415 | } |
| 416 | |
| 417 | typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp, |
| 418 | struct ib_other_headers *ohdr, |
Mitko Haralanov | 44e43d9 | 2019-01-24 06:09:46 -0800 | [diff] [blame] | 419 | u32 bth0, u32 bth1, u32 bth2, int middle, |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 420 | struct hfi1_pkt_state *ps); |
| 421 | |
| 422 | /* We support only two types - 9B and 16B for now */ |
| 423 | static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = { |
| 424 | [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B, |
| 425 | [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B |
| 426 | }; |
| 427 | |
Mike Marciniszyn | 261a435 | 2016-09-06 04:35:05 -0700 | [diff] [blame] | 428 | void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr, |
Mitko Haralanov | 44e43d9 | 2019-01-24 06:09:46 -0800 | [diff] [blame] | 429 | u32 bth0, u32 bth1, u32 bth2, int middle, |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 430 | struct hfi1_pkt_state *ps) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 431 | { |
Dennis Dalessandro | 4c6829c | 2016-01-19 14:42:00 -0800 | [diff] [blame] | 432 | struct hfi1_qp_priv *priv = qp->priv; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 433 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 434 | /* |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 435 | * reset s_ahg/AHG fields |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 436 | * |
| 437 | * This insures that the ahgentry/ahgcount |
| 438 | * are at a non-AHG default to protect |
| 439 | * build_verbs_tx_desc() from using |
| 440 | * an include ahgidx. |
| 441 | * |
| 442 | * build_ahg() will modify as appropriate |
| 443 | * to use the AHG feature. |
| 444 | */ |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 445 | priv->s_ahg->tx_flags = 0; |
| 446 | priv->s_ahg->ahgcount = 0; |
| 447 | priv->s_ahg->ahgidx = 0; |
Don Hiatt | 5b6cabb | 2017-08-04 13:54:41 -0700 | [diff] [blame] | 448 | |
| 449 | /* Make the appropriate header */ |
Mitko Haralanov | 44e43d9 | 2019-01-24 06:09:46 -0800 | [diff] [blame] | 450 | hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth1, bth2, middle, |
| 451 | ps); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 452 | } |
| 453 | |
Dean Luick | b421922 | 2015-10-26 10:28:35 -0400 | [diff] [blame] | 454 | /* when sending, force a reschedule every one of these periods */ |
| 455 | #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */ |
| 456 | |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 457 | /** |
Kaike Wan | 572f0c3 | 2019-01-23 21:51:27 -0800 | [diff] [blame] | 458 | * hfi1_schedule_send_yield - test for a yield required for QP |
| 459 | * send engine |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 460 | * @qp: a pointer to QP |
| 461 | * @ps: a pointer to a structure with commonly lookup values for |
Cai Huoqing | 991c427 | 2021-07-29 16:23:46 +0800 | [diff] [blame^] | 462 | * the send engine progress |
Lee Jones | 0654a74 | 2021-01-26 12:47:25 +0000 | [diff] [blame] | 463 | * @tid: true if it is the tid leg |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 464 | * |
| 465 | * This routine checks if the time slice for the QP has expired |
| 466 | * for RC QPs, if so an additional work entry is queued. At this |
| 467 | * point, other QPs have an opportunity to be scheduled. It |
| 468 | * returns true if a yield is required, otherwise, false |
| 469 | * is returned. |
| 470 | */ |
Kaike Wan | 572f0c3 | 2019-01-23 21:51:27 -0800 | [diff] [blame] | 471 | bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps, |
| 472 | bool tid) |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 473 | { |
Kaike Wan | bcad291 | 2017-07-24 07:45:37 -0700 | [diff] [blame] | 474 | ps->pkts_sent = true; |
| 475 | |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 476 | if (unlikely(time_after(jiffies, ps->timeout))) { |
| 477 | if (!ps->in_thread || |
| 478 | workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) { |
| 479 | spin_lock_irqsave(&qp->s_lock, ps->flags); |
Kaike Wan | 572f0c3 | 2019-01-23 21:51:27 -0800 | [diff] [blame] | 480 | if (!tid) { |
| 481 | qp->s_flags &= ~RVT_S_BUSY; |
| 482 | hfi1_schedule_send(qp); |
| 483 | } else { |
| 484 | struct hfi1_qp_priv *priv = qp->priv; |
| 485 | |
| 486 | if (priv->s_flags & |
| 487 | HFI1_S_TID_BUSY_SET) { |
| 488 | qp->s_flags &= ~RVT_S_BUSY; |
| 489 | priv->s_flags &= |
| 490 | ~(HFI1_S_TID_BUSY_SET | |
| 491 | RVT_S_BUSY); |
| 492 | } else { |
| 493 | priv->s_flags &= ~RVT_S_BUSY; |
| 494 | } |
| 495 | hfi1_schedule_tid_send(qp); |
| 496 | } |
| 497 | |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 498 | spin_unlock_irqrestore(&qp->s_lock, ps->flags); |
| 499 | this_cpu_inc(*ps->ppd->dd->send_schedule); |
| 500 | trace_hfi1_rc_expired_time_slice(qp, true); |
| 501 | return true; |
| 502 | } |
| 503 | |
| 504 | cond_resched(); |
| 505 | this_cpu_inc(*ps->ppd->dd->send_schedule); |
| 506 | ps->timeout = jiffies + ps->timeout_int; |
| 507 | } |
| 508 | |
| 509 | trace_hfi1_rc_expired_time_slice(qp, false); |
| 510 | return false; |
| 511 | } |
| 512 | |
Mike Marciniszyn | b6eac93 | 2017-04-09 10:16:35 -0700 | [diff] [blame] | 513 | void hfi1_do_send_from_rvt(struct rvt_qp *qp) |
| 514 | { |
| 515 | hfi1_do_send(qp, false); |
| 516 | } |
| 517 | |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 518 | void _hfi1_do_send(struct work_struct *work) |
| 519 | { |
Dennis Dalessandro | 5da0fc9 | 2018-09-28 07:17:09 -0700 | [diff] [blame] | 520 | struct iowait_work *w = container_of(work, struct iowait_work, iowork); |
| 521 | struct rvt_qp *qp = iowait_to_qp(w->iow); |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 522 | |
Mike Marciniszyn | b6eac93 | 2017-04-09 10:16:35 -0700 | [diff] [blame] | 523 | hfi1_do_send(qp, true); |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 524 | } |
| 525 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 526 | /** |
| 527 | * hfi1_do_send - perform a send on a QP |
Kaike Wan | ea752bc | 2019-04-11 07:15:49 -0700 | [diff] [blame] | 528 | * @qp: a pointer to the QP |
Mike Marciniszyn | b6eac93 | 2017-04-09 10:16:35 -0700 | [diff] [blame] | 529 | * @in_thread: true if in a workqueue thread |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 530 | * |
| 531 | * Process entries in the send work queue until credit or queue is |
Dennis Dalessandro | ca00c62 | 2016-09-25 07:42:08 -0700 | [diff] [blame] | 532 | * exhausted. Only allow one CPU to send a packet per QP. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 533 | * Otherwise, two threads could send packets out of order. |
| 534 | */ |
Mike Marciniszyn | b6eac93 | 2017-04-09 10:16:35 -0700 | [diff] [blame] | 535 | void hfi1_do_send(struct rvt_qp *qp, bool in_thread) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 536 | { |
Dennis Dalessandro | d46e514 | 2015-11-11 00:34:37 -0500 | [diff] [blame] | 537 | struct hfi1_pkt_state ps; |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 538 | struct hfi1_qp_priv *priv = qp->priv; |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 539 | int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 540 | |
Dennis Dalessandro | d46e514 | 2015-11-11 00:34:37 -0500 | [diff] [blame] | 541 | ps.dev = to_idev(qp->ibqp.device); |
| 542 | ps.ibp = to_iport(qp->ibqp.device, qp->port_num); |
| 543 | ps.ppd = ppd_from_ibp(ps.ibp); |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 544 | ps.in_thread = in_thread; |
Dennis Dalessandro | 5da0fc9 | 2018-09-28 07:17:09 -0700 | [diff] [blame] | 545 | ps.wait = iowait_get_ib_work(&priv->s_iowait); |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 546 | |
| 547 | trace_hfi1_rc_do_send(qp, in_thread); |
Dennis Dalessandro | d46e514 | 2015-11-11 00:34:37 -0500 | [diff] [blame] | 548 | |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 549 | switch (qp->ibqp.qp_type) { |
| 550 | case IB_QPT_RC: |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 551 | if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) & |
| 552 | ~((1 << ps.ppd->lmc) - 1)) == |
| 553 | ps.ppd->lid)) { |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 554 | rvt_ruc_loopback(qp); |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 555 | return; |
| 556 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 557 | make_req = hfi1_make_rc_req; |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 558 | ps.timeout_int = qp->timeout_jiffies; |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 559 | break; |
| 560 | case IB_QPT_UC: |
Dasaratharaman Chandramouli | d8966fc | 2017-04-29 14:41:28 -0400 | [diff] [blame] | 561 | if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) & |
| 562 | ~((1 << ps.ppd->lmc) - 1)) == |
| 563 | ps.ppd->lid)) { |
Venkata Sandeep Dhanalakota | 1570346 | 2018-09-26 10:44:52 -0700 | [diff] [blame] | 564 | rvt_ruc_loopback(qp); |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 565 | return; |
| 566 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 567 | make_req = hfi1_make_uc_req; |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 568 | ps.timeout_int = SEND_RESCHED_TIMEOUT; |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 569 | break; |
| 570 | default: |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 571 | make_req = hfi1_make_ud_req; |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 572 | ps.timeout_int = SEND_RESCHED_TIMEOUT; |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 573 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 574 | |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 575 | spin_lock_irqsave(&qp->s_lock, ps.flags); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 576 | |
| 577 | /* Return if we are already busy processing a work request. */ |
| 578 | if (!hfi1_send_ok(qp)) { |
Dennis Dalessandro | 5da0fc9 | 2018-09-28 07:17:09 -0700 | [diff] [blame] | 579 | if (qp->s_flags & HFI1_S_ANY_WAIT_IO) |
| 580 | iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB); |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 581 | spin_unlock_irqrestore(&qp->s_lock, ps.flags); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 582 | return; |
| 583 | } |
| 584 | |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 585 | qp->s_flags |= RVT_S_BUSY; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 586 | |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 587 | ps.timeout_int = ps.timeout_int / 8; |
| 588 | ps.timeout = jiffies + ps.timeout_int; |
| 589 | ps.cpu = priv->s_sde ? priv->s_sde->cpu : |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 590 | cpumask_first(cpumask_of_node(ps.ppd->dd->node)); |
Kaike Wan | bcad291 | 2017-07-24 07:45:37 -0700 | [diff] [blame] | 591 | ps.pkts_sent = false; |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 592 | |
Mike Marciniszyn | 711e104 | 2016-02-14 12:45:18 -0800 | [diff] [blame] | 593 | /* insure a pre-built packet is handled */ |
Dennis Dalessandro | 5da0fc9 | 2018-09-28 07:17:09 -0700 | [diff] [blame] | 594 | ps.s_txreq = get_waiting_verbs_txreq(ps.wait); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 595 | do { |
| 596 | /* Check for a constructed packet to be sent. */ |
Mitko Haralanov | 9636258 | 2018-02-01 10:46:07 -0800 | [diff] [blame] | 597 | if (ps.s_txreq) { |
Kaike Wan | 572f0c3 | 2019-01-23 21:51:27 -0800 | [diff] [blame] | 598 | if (priv->s_flags & HFI1_S_TID_BUSY_SET) |
| 599 | qp->s_flags |= RVT_S_BUSY; |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 600 | spin_unlock_irqrestore(&qp->s_lock, ps.flags); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 601 | /* |
| 602 | * If the packet cannot be sent now, return and |
Dennis Dalessandro | ca00c62 | 2016-09-25 07:42:08 -0700 | [diff] [blame] | 603 | * the send engine will be woken up later. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 604 | */ |
Dennis Dalessandro | d46e514 | 2015-11-11 00:34:37 -0500 | [diff] [blame] | 605 | if (hfi1_verbs_send(qp, &ps)) |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 606 | return; |
Dennis Dalessandro | 5da0fc9 | 2018-09-28 07:17:09 -0700 | [diff] [blame] | 607 | |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 608 | /* allow other tasks to run */ |
Kaike Wan | 572f0c3 | 2019-01-23 21:51:27 -0800 | [diff] [blame] | 609 | if (hfi1_schedule_send_yield(qp, &ps, false)) |
Mike Marciniszyn | dd1ed10 | 2017-05-04 05:14:10 -0700 | [diff] [blame] | 610 | return; |
| 611 | |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 612 | spin_lock_irqsave(&qp->s_lock, ps.flags); |
Dean Luick | b421922 | 2015-10-26 10:28:35 -0400 | [diff] [blame] | 613 | } |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 614 | } while (make_req(qp, &ps)); |
Kaike Wan | bcad291 | 2017-07-24 07:45:37 -0700 | [diff] [blame] | 615 | iowait_starve_clear(ps.pkts_sent, &priv->s_iowait); |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 616 | spin_unlock_irqrestore(&qp->s_lock, ps.flags); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 617 | } |