Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1 | /* |
Jubin John | 05d6ac1 | 2016-02-14 20:22:17 -0800 | [diff] [blame] | 2 | * Copyright(c) 2015, 2016 Intel Corporation. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 3 | * |
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 5 | * redistributing this file, you may do so under either license. |
| 6 | * |
| 7 | * GPL LICENSE SUMMARY |
| 8 | * |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of version 2 of the GNU General Public License as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * General Public License for more details. |
| 17 | * |
| 18 | * BSD LICENSE |
| 19 | * |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 20 | * Redistribution and use in source and binary forms, with or without |
| 21 | * modification, are permitted provided that the following conditions |
| 22 | * are met: |
| 23 | * |
| 24 | * - Redistributions of source code must retain the above copyright |
| 25 | * notice, this list of conditions and the following disclaimer. |
| 26 | * - Redistributions in binary form must reproduce the above copyright |
| 27 | * notice, this list of conditions and the following disclaimer in |
| 28 | * the documentation and/or other materials provided with the |
| 29 | * distribution. |
| 30 | * - Neither the name of Intel Corporation nor the names of its |
| 31 | * contributors may be used to endorse or promote products derived |
| 32 | * from this software without specific prior written permission. |
| 33 | * |
| 34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 45 | * |
| 46 | */ |
| 47 | |
| 48 | #include <linux/spinlock.h> |
| 49 | |
| 50 | #include "hfi.h" |
| 51 | #include "mad.h" |
| 52 | #include "qp.h" |
Mike Marciniszyn | 45842ab | 2016-02-14 12:44:34 -0800 | [diff] [blame] | 53 | #include "verbs_txreq.h" |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 54 | #include "trace.h" |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 55 | |
| 56 | /* |
| 57 | * Convert the AETH RNR timeout code into the number of microseconds. |
| 58 | */ |
| 59 | const u32 ib_hfi1_rnr_table[32] = { |
| 60 | 655360, /* 00: 655.36 */ |
| 61 | 10, /* 01: .01 */ |
| 62 | 20, /* 02 .02 */ |
| 63 | 30, /* 03: .03 */ |
| 64 | 40, /* 04: .04 */ |
| 65 | 60, /* 05: .06 */ |
| 66 | 80, /* 06: .08 */ |
| 67 | 120, /* 07: .12 */ |
| 68 | 160, /* 08: .16 */ |
| 69 | 240, /* 09: .24 */ |
| 70 | 320, /* 0A: .32 */ |
| 71 | 480, /* 0B: .48 */ |
| 72 | 640, /* 0C: .64 */ |
| 73 | 960, /* 0D: .96 */ |
| 74 | 1280, /* 0E: 1.28 */ |
| 75 | 1920, /* 0F: 1.92 */ |
| 76 | 2560, /* 10: 2.56 */ |
| 77 | 3840, /* 11: 3.84 */ |
| 78 | 5120, /* 12: 5.12 */ |
| 79 | 7680, /* 13: 7.68 */ |
| 80 | 10240, /* 14: 10.24 */ |
| 81 | 15360, /* 15: 15.36 */ |
| 82 | 20480, /* 16: 20.48 */ |
| 83 | 30720, /* 17: 30.72 */ |
| 84 | 40960, /* 18: 40.96 */ |
| 85 | 61440, /* 19: 61.44 */ |
| 86 | 81920, /* 1A: 81.92 */ |
| 87 | 122880, /* 1B: 122.88 */ |
| 88 | 163840, /* 1C: 163.84 */ |
| 89 | 245760, /* 1D: 245.76 */ |
| 90 | 327680, /* 1E: 327.68 */ |
| 91 | 491520 /* 1F: 491.52 */ |
| 92 | }; |
| 93 | |
| 94 | /* |
| 95 | * Validate a RWQE and fill in the SGE state. |
| 96 | * Return 1 if OK. |
| 97 | */ |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 98 | static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 99 | { |
| 100 | int i, j, ret; |
| 101 | struct ib_wc wc; |
Dennis Dalessandro | cd4ceee | 2016-01-19 14:41:55 -0800 | [diff] [blame] | 102 | struct rvt_lkey_table *rkt; |
Dennis Dalessandro | 4f87ccf | 2016-01-19 14:41:50 -0800 | [diff] [blame] | 103 | struct rvt_pd *pd; |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 104 | struct rvt_sge_state *ss; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 105 | |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 106 | rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table; |
Dennis Dalessandro | 4f87ccf | 2016-01-19 14:41:50 -0800 | [diff] [blame] | 107 | pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 108 | ss = &qp->r_sge; |
| 109 | ss->sg_list = qp->r_sg_list; |
| 110 | qp->r_len = 0; |
| 111 | for (i = j = 0; i < wqe->num_sge; i++) { |
| 112 | if (wqe->sg_list[i].length == 0) |
| 113 | continue; |
| 114 | /* Check LKEY */ |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 115 | if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, |
| 116 | &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 117 | goto bad_lkey; |
| 118 | qp->r_len += wqe->sg_list[i].length; |
| 119 | j++; |
| 120 | } |
| 121 | ss->num_sge = j; |
| 122 | ss->total_len = qp->r_len; |
| 123 | ret = 1; |
| 124 | goto bail; |
| 125 | |
| 126 | bad_lkey: |
| 127 | while (j) { |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 128 | struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 129 | |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 130 | rvt_put_mr(sge->mr); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 131 | } |
| 132 | ss->num_sge = 0; |
| 133 | memset(&wc, 0, sizeof(wc)); |
| 134 | wc.wr_id = wqe->wr_id; |
| 135 | wc.status = IB_WC_LOC_PROT_ERR; |
| 136 | wc.opcode = IB_WC_RECV; |
| 137 | wc.qp = &qp->ibqp; |
| 138 | /* Signal solicited completion event. */ |
Dennis Dalessandro | abd712d | 2016-01-19 14:43:22 -0800 | [diff] [blame] | 139 | rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 140 | ret = 0; |
| 141 | bail: |
| 142 | return ret; |
| 143 | } |
| 144 | |
| 145 | /** |
Dennis Dalessandro | ec4274f | 2016-01-19 14:43:44 -0800 | [diff] [blame] | 146 | * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 147 | * @qp: the QP |
| 148 | * @wr_id_only: update qp->r_wr_id only, not qp->r_sge |
| 149 | * |
| 150 | * Return -1 if there is a local error, 0 if no RWQE is available, |
| 151 | * otherwise return 1. |
| 152 | * |
| 153 | * Can be called from interrupt level. |
| 154 | */ |
Dennis Dalessandro | ec4274f | 2016-01-19 14:43:44 -0800 | [diff] [blame] | 155 | int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 156 | { |
| 157 | unsigned long flags; |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 158 | struct rvt_rq *rq; |
| 159 | struct rvt_rwq *wq; |
Dennis Dalessandro | 39db3e6 | 2016-01-19 14:42:33 -0800 | [diff] [blame] | 160 | struct rvt_srq *srq; |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 161 | struct rvt_rwqe *wqe; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 162 | void (*handler)(struct ib_event *, void *); |
| 163 | u32 tail; |
| 164 | int ret; |
| 165 | |
| 166 | if (qp->ibqp.srq) { |
Dennis Dalessandro | 39db3e6 | 2016-01-19 14:42:33 -0800 | [diff] [blame] | 167 | srq = ibsrq_to_rvtsrq(qp->ibqp.srq); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 168 | handler = srq->ibsrq.event_handler; |
| 169 | rq = &srq->rq; |
| 170 | } else { |
| 171 | srq = NULL; |
| 172 | handler = NULL; |
| 173 | rq = &qp->r_rq; |
| 174 | } |
| 175 | |
| 176 | spin_lock_irqsave(&rq->lock, flags); |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 177 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 178 | ret = 0; |
| 179 | goto unlock; |
| 180 | } |
| 181 | |
| 182 | wq = rq->wq; |
| 183 | tail = wq->tail; |
| 184 | /* Validate tail before using it since it is user writable. */ |
| 185 | if (tail >= rq->size) |
| 186 | tail = 0; |
| 187 | if (unlikely(tail == wq->head)) { |
| 188 | ret = 0; |
| 189 | goto unlock; |
| 190 | } |
| 191 | /* Make sure entry is read after head index is read. */ |
| 192 | smp_rmb(); |
Dennis Dalessandro | ec4274f | 2016-01-19 14:43:44 -0800 | [diff] [blame] | 193 | wqe = rvt_get_rwqe_ptr(rq, tail); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 194 | /* |
| 195 | * Even though we update the tail index in memory, the verbs |
| 196 | * consumer is not supposed to post more entries until a |
| 197 | * completion is generated. |
| 198 | */ |
| 199 | if (++tail >= rq->size) |
| 200 | tail = 0; |
| 201 | wq->tail = tail; |
| 202 | if (!wr_id_only && !init_sge(qp, wqe)) { |
| 203 | ret = -1; |
| 204 | goto unlock; |
| 205 | } |
| 206 | qp->r_wr_id = wqe->wr_id; |
| 207 | |
| 208 | ret = 1; |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 209 | set_bit(RVT_R_WRID_VALID, &qp->r_aflags); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 210 | if (handler) { |
| 211 | u32 n; |
| 212 | |
| 213 | /* |
| 214 | * Validate head pointer value and compute |
| 215 | * the number of remaining WQEs. |
| 216 | */ |
| 217 | n = wq->head; |
| 218 | if (n >= rq->size) |
| 219 | n = 0; |
| 220 | if (n < tail) |
| 221 | n += rq->size - tail; |
| 222 | else |
| 223 | n -= tail; |
| 224 | if (n < srq->limit) { |
| 225 | struct ib_event ev; |
| 226 | |
| 227 | srq->limit = 0; |
| 228 | spin_unlock_irqrestore(&rq->lock, flags); |
| 229 | ev.device = qp->ibqp.device; |
| 230 | ev.element.srq = qp->ibqp.srq; |
| 231 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; |
| 232 | handler(&ev, srq->ibsrq.srq_context); |
| 233 | goto bail; |
| 234 | } |
| 235 | } |
| 236 | unlock: |
| 237 | spin_unlock_irqrestore(&rq->lock, flags); |
| 238 | bail: |
| 239 | return ret; |
| 240 | } |
| 241 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 242 | static __be64 get_sguid(struct hfi1_ibport *ibp, unsigned index) |
| 243 | { |
| 244 | if (!index) { |
| 245 | struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); |
| 246 | |
| 247 | return cpu_to_be64(ppd->guid); |
| 248 | } |
| 249 | return ibp->guids[index - 1]; |
| 250 | } |
| 251 | |
| 252 | static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) |
| 253 | { |
| 254 | return (gid->global.interface_id == id && |
| 255 | (gid->global.subnet_prefix == gid_prefix || |
| 256 | gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX)); |
| 257 | } |
| 258 | |
| 259 | /* |
| 260 | * |
| 261 | * This should be called with the QP r_lock held. |
| 262 | * |
| 263 | * The s_lock will be acquired around the hfi1_migrate_qp() call. |
| 264 | */ |
Mike Marciniszyn | 261a435 | 2016-09-06 04:35:05 -0700 | [diff] [blame] | 265 | int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr, |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 266 | int has_grh, struct rvt_qp *qp, u32 bth0) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 267 | { |
| 268 | __be64 guid; |
| 269 | unsigned long flags; |
| 270 | u8 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; |
| 271 | |
| 272 | if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { |
| 273 | if (!has_grh) { |
| 274 | if (qp->alt_ah_attr.ah_flags & IB_AH_GRH) |
| 275 | goto err; |
| 276 | } else { |
| 277 | if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) |
| 278 | goto err; |
| 279 | guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); |
Dennis Dalessandro | 4eb0688 | 2016-01-19 14:42:39 -0800 | [diff] [blame] | 280 | if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix, |
| 281 | guid)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 282 | goto err; |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 283 | if (!gid_ok( |
| 284 | &hdr->u.l.grh.sgid, |
| 285 | qp->alt_ah_attr.grh.dgid.global.subnet_prefix, |
| 286 | qp->alt_ah_attr.grh.dgid.global.interface_id)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 287 | goto err; |
| 288 | } |
| 289 | if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, |
| 290 | sc5, be16_to_cpu(hdr->lrh[3])))) { |
Erik E. Kahn | 5cd2411 | 2015-12-10 09:59:40 -0500 | [diff] [blame] | 291 | hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 292 | (u16)bth0, |
| 293 | (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, |
| 294 | 0, qp->ibqp.qp_num, |
Erik E. Kahn | 5cd2411 | 2015-12-10 09:59:40 -0500 | [diff] [blame] | 295 | be16_to_cpu(hdr->lrh[3]), |
| 296 | be16_to_cpu(hdr->lrh[1])); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 297 | goto err; |
| 298 | } |
| 299 | /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ |
| 300 | if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || |
| 301 | ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) |
| 302 | goto err; |
| 303 | spin_lock_irqsave(&qp->s_lock, flags); |
| 304 | hfi1_migrate_qp(qp); |
| 305 | spin_unlock_irqrestore(&qp->s_lock, flags); |
| 306 | } else { |
| 307 | if (!has_grh) { |
| 308 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) |
| 309 | goto err; |
| 310 | } else { |
| 311 | if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) |
| 312 | goto err; |
| 313 | guid = get_sguid(ibp, |
| 314 | qp->remote_ah_attr.grh.sgid_index); |
Dennis Dalessandro | 4eb0688 | 2016-01-19 14:42:39 -0800 | [diff] [blame] | 315 | if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix, |
| 316 | guid)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 317 | goto err; |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 318 | if (!gid_ok( |
| 319 | &hdr->u.l.grh.sgid, |
| 320 | qp->remote_ah_attr.grh.dgid.global.subnet_prefix, |
| 321 | qp->remote_ah_attr.grh.dgid.global.interface_id)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 322 | goto err; |
| 323 | } |
| 324 | if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, |
| 325 | sc5, be16_to_cpu(hdr->lrh[3])))) { |
Erik E. Kahn | 5cd2411 | 2015-12-10 09:59:40 -0500 | [diff] [blame] | 326 | hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 327 | (u16)bth0, |
| 328 | (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, |
| 329 | 0, qp->ibqp.qp_num, |
Erik E. Kahn | 5cd2411 | 2015-12-10 09:59:40 -0500 | [diff] [blame] | 330 | be16_to_cpu(hdr->lrh[3]), |
| 331 | be16_to_cpu(hdr->lrh[1])); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 332 | goto err; |
| 333 | } |
| 334 | /* Validate the SLID. See Ch. 9.6.1.5 */ |
| 335 | if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid || |
| 336 | ppd_from_ibp(ibp)->port != qp->port_num) |
| 337 | goto err; |
| 338 | if (qp->s_mig_state == IB_MIG_REARM && |
| 339 | !(bth0 & IB_BTH_MIG_REQ)) |
| 340 | qp->s_mig_state = IB_MIG_ARMED; |
| 341 | } |
| 342 | |
| 343 | return 0; |
| 344 | |
| 345 | err: |
| 346 | return 1; |
| 347 | } |
| 348 | |
| 349 | /** |
| 350 | * ruc_loopback - handle UC and RC loopback requests |
| 351 | * @sqp: the sending QP |
| 352 | * |
| 353 | * This is called from hfi1_do_send() to |
| 354 | * forward a WQE addressed to the same HFI. |
Dennis Dalessandro | ca00c62 | 2016-09-25 07:42:08 -0700 | [diff] [blame^] | 355 | * Note that although we are single threaded due to the send engine, we still |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 356 | * have to protect against post_send(). We don't have to worry about |
| 357 | * receive interrupts since this is a connected protocol and all packets |
| 358 | * will pass through here. |
| 359 | */ |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 360 | static void ruc_loopback(struct rvt_qp *sqp) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 361 | { |
| 362 | struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 363 | struct rvt_qp *qp; |
| 364 | struct rvt_swqe *wqe; |
| 365 | struct rvt_sge *sge; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 366 | unsigned long flags; |
| 367 | struct ib_wc wc; |
| 368 | u64 sdata; |
| 369 | atomic64_t *maddr; |
| 370 | enum ib_wc_status send_status; |
| 371 | int release; |
| 372 | int ret; |
Dean Luick | 7b0b01a | 2016-02-03 14:35:49 -0800 | [diff] [blame] | 373 | int copy_last = 0; |
Mike Marciniszyn | 34cee28 | 2016-02-09 14:29:31 -0800 | [diff] [blame] | 374 | u32 to; |
Jianxin Xiong | 0db3dfa | 2016-07-25 13:38:37 -0700 | [diff] [blame] | 375 | int local_ops = 0; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 376 | |
| 377 | rcu_read_lock(); |
| 378 | |
| 379 | /* |
| 380 | * Note that we check the responder QP state after |
| 381 | * checking the requester's state. |
| 382 | */ |
Dennis Dalessandro | ec4274f | 2016-01-19 14:43:44 -0800 | [diff] [blame] | 383 | qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, |
| 384 | sqp->remote_qpn); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 385 | |
| 386 | spin_lock_irqsave(&sqp->s_lock, flags); |
| 387 | |
| 388 | /* Return if we are already busy processing a work request. */ |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 389 | if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) || |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 390 | !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 391 | goto unlock; |
| 392 | |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 393 | sqp->s_flags |= RVT_S_BUSY; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 394 | |
| 395 | again: |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 396 | smp_read_barrier_depends(); /* see post_one_send() */ |
| 397 | if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 398 | goto clr_busy; |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 399 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 400 | |
| 401 | /* Return if it is not OK to start a new work request. */ |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 402 | if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) { |
| 403 | if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 404 | goto clr_busy; |
| 405 | /* We are in the error state, flush the work request. */ |
| 406 | send_status = IB_WC_WR_FLUSH_ERR; |
| 407 | goto flush_send; |
| 408 | } |
| 409 | |
| 410 | /* |
| 411 | * We can rely on the entry not changing without the s_lock |
| 412 | * being held until we update s_last. |
| 413 | * We increment s_cur to indicate s_last is in progress. |
| 414 | */ |
| 415 | if (sqp->s_last == sqp->s_cur) { |
| 416 | if (++sqp->s_cur >= sqp->s_size) |
| 417 | sqp->s_cur = 0; |
| 418 | } |
| 419 | spin_unlock_irqrestore(&sqp->s_lock, flags); |
| 420 | |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 421 | if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) || |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 422 | qp->ibqp.qp_type != sqp->ibqp.qp_type) { |
Dennis Dalessandro | 4eb0688 | 2016-01-19 14:42:39 -0800 | [diff] [blame] | 423 | ibp->rvp.n_pkt_drops++; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 424 | /* |
| 425 | * For RC, the requester would timeout and retry so |
| 426 | * shortcut the timeouts and just signal too many retries. |
| 427 | */ |
| 428 | if (sqp->ibqp.qp_type == IB_QPT_RC) |
| 429 | send_status = IB_WC_RETRY_EXC_ERR; |
| 430 | else |
| 431 | send_status = IB_WC_SUCCESS; |
| 432 | goto serr; |
| 433 | } |
| 434 | |
| 435 | memset(&wc, 0, sizeof(wc)); |
| 436 | send_status = IB_WC_SUCCESS; |
| 437 | |
| 438 | release = 1; |
| 439 | sqp->s_sge.sge = wqe->sg_list[0]; |
| 440 | sqp->s_sge.sg_list = wqe->sg_list + 1; |
| 441 | sqp->s_sge.num_sge = wqe->wr.num_sge; |
| 442 | sqp->s_len = wqe->length; |
| 443 | switch (wqe->wr.opcode) { |
Jianxin Xiong | 0db3dfa | 2016-07-25 13:38:37 -0700 | [diff] [blame] | 444 | case IB_WR_REG_MR: |
Jianxin Xiong | 0db3dfa | 2016-07-25 13:38:37 -0700 | [diff] [blame] | 445 | goto send_comp; |
| 446 | |
| 447 | case IB_WR_LOCAL_INV: |
Jianxin Xiong | d9b13c2 | 2016-07-25 13:39:45 -0700 | [diff] [blame] | 448 | if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { |
| 449 | if (rvt_invalidate_rkey(sqp, |
| 450 | wqe->wr.ex.invalidate_rkey)) |
| 451 | send_status = IB_WC_LOC_PROT_ERR; |
| 452 | local_ops = 1; |
| 453 | } |
Jianxin Xiong | 0db3dfa | 2016-07-25 13:38:37 -0700 | [diff] [blame] | 454 | goto send_comp; |
| 455 | |
| 456 | case IB_WR_SEND_WITH_INV: |
| 457 | if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) { |
| 458 | wc.wc_flags = IB_WC_WITH_INVALIDATE; |
| 459 | wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey; |
| 460 | } |
| 461 | goto send; |
| 462 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 463 | case IB_WR_SEND_WITH_IMM: |
| 464 | wc.wc_flags = IB_WC_WITH_IMM; |
| 465 | wc.ex.imm_data = wqe->wr.ex.imm_data; |
| 466 | /* FALLTHROUGH */ |
| 467 | case IB_WR_SEND: |
Jianxin Xiong | 0db3dfa | 2016-07-25 13:38:37 -0700 | [diff] [blame] | 468 | send: |
Dennis Dalessandro | ec4274f | 2016-01-19 14:43:44 -0800 | [diff] [blame] | 469 | ret = hfi1_rvt_get_rwqe(qp, 0); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 470 | if (ret < 0) |
| 471 | goto op_err; |
| 472 | if (!ret) |
| 473 | goto rnr_nak; |
| 474 | break; |
| 475 | |
| 476 | case IB_WR_RDMA_WRITE_WITH_IMM: |
| 477 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) |
| 478 | goto inv_err; |
| 479 | wc.wc_flags = IB_WC_WITH_IMM; |
| 480 | wc.ex.imm_data = wqe->wr.ex.imm_data; |
Dennis Dalessandro | ec4274f | 2016-01-19 14:43:44 -0800 | [diff] [blame] | 481 | ret = hfi1_rvt_get_rwqe(qp, 1); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 482 | if (ret < 0) |
| 483 | goto op_err; |
| 484 | if (!ret) |
| 485 | goto rnr_nak; |
Dean Luick | 7b0b01a | 2016-02-03 14:35:49 -0800 | [diff] [blame] | 486 | /* skip copy_last set and qp_access_flags recheck */ |
| 487 | goto do_write; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 488 | case IB_WR_RDMA_WRITE: |
Dean Luick | 7b0b01a | 2016-02-03 14:35:49 -0800 | [diff] [blame] | 489 | copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 490 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) |
| 491 | goto inv_err; |
Dean Luick | 7b0b01a | 2016-02-03 14:35:49 -0800 | [diff] [blame] | 492 | do_write: |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 493 | if (wqe->length == 0) |
Harish Chegondi | 42d6ec1 | 2016-03-05 08:49:24 -0800 | [diff] [blame] | 494 | break; |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 495 | if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length, |
| 496 | wqe->rdma_wr.remote_addr, |
| 497 | wqe->rdma_wr.rkey, |
| 498 | IB_ACCESS_REMOTE_WRITE))) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 499 | goto acc_err; |
| 500 | qp->r_sge.sg_list = NULL; |
| 501 | qp->r_sge.num_sge = 1; |
| 502 | qp->r_sge.total_len = wqe->length; |
| 503 | break; |
| 504 | |
| 505 | case IB_WR_RDMA_READ: |
| 506 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) |
| 507 | goto inv_err; |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 508 | if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, |
| 509 | wqe->rdma_wr.remote_addr, |
| 510 | wqe->rdma_wr.rkey, |
| 511 | IB_ACCESS_REMOTE_READ))) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 512 | goto acc_err; |
| 513 | release = 0; |
| 514 | sqp->s_sge.sg_list = NULL; |
| 515 | sqp->s_sge.num_sge = 1; |
| 516 | qp->r_sge.sge = wqe->sg_list[0]; |
| 517 | qp->r_sge.sg_list = wqe->sg_list + 1; |
| 518 | qp->r_sge.num_sge = wqe->wr.num_sge; |
| 519 | qp->r_sge.total_len = wqe->length; |
| 520 | break; |
| 521 | |
| 522 | case IB_WR_ATOMIC_CMP_AND_SWP: |
| 523 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
| 524 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) |
| 525 | goto inv_err; |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 526 | if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), |
| 527 | wqe->atomic_wr.remote_addr, |
| 528 | wqe->atomic_wr.rkey, |
| 529 | IB_ACCESS_REMOTE_ATOMIC))) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 530 | goto acc_err; |
| 531 | /* Perform atomic OP and save result. */ |
Jubin John | 50e5dcb | 2016-02-14 20:19:41 -0800 | [diff] [blame] | 532 | maddr = (atomic64_t *)qp->r_sge.sge.vaddr; |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 533 | sdata = wqe->atomic_wr.compare_add; |
Jubin John | 50e5dcb | 2016-02-14 20:19:41 -0800 | [diff] [blame] | 534 | *(u64 *)sqp->s_sge.sge.vaddr = |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 535 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? |
Jubin John | 50e5dcb | 2016-02-14 20:19:41 -0800 | [diff] [blame] | 536 | (u64)atomic64_add_return(sdata, maddr) - sdata : |
| 537 | (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, |
Christoph Hellwig | e622f2f | 2015-10-08 09:16:33 +0100 | [diff] [blame] | 538 | sdata, wqe->atomic_wr.swap); |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 539 | rvt_put_mr(qp->r_sge.sge.mr); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 540 | qp->r_sge.num_sge = 0; |
| 541 | goto send_comp; |
| 542 | |
| 543 | default: |
| 544 | send_status = IB_WC_LOC_QP_OP_ERR; |
| 545 | goto serr; |
| 546 | } |
| 547 | |
| 548 | sge = &sqp->s_sge.sge; |
| 549 | while (sqp->s_len) { |
| 550 | u32 len = sqp->s_len; |
| 551 | |
| 552 | if (len > sge->length) |
| 553 | len = sge->length; |
| 554 | if (len > sge->sge_length) |
| 555 | len = sge->sge_length; |
| 556 | WARN_ON_ONCE(len == 0); |
Dean Luick | 7b0b01a | 2016-02-03 14:35:49 -0800 | [diff] [blame] | 557 | hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 558 | sge->vaddr += len; |
| 559 | sge->length -= len; |
| 560 | sge->sge_length -= len; |
| 561 | if (sge->sge_length == 0) { |
| 562 | if (!release) |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 563 | rvt_put_mr(sge->mr); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 564 | if (--sqp->s_sge.num_sge) |
| 565 | *sge = *sqp->s_sge.sg_list++; |
| 566 | } else if (sge->length == 0 && sge->mr->lkey) { |
Dennis Dalessandro | cd4ceee | 2016-01-19 14:41:55 -0800 | [diff] [blame] | 567 | if (++sge->n >= RVT_SEGSZ) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 568 | if (++sge->m >= sge->mr->mapsz) |
| 569 | break; |
| 570 | sge->n = 0; |
| 571 | } |
| 572 | sge->vaddr = |
| 573 | sge->mr->map[sge->m]->segs[sge->n].vaddr; |
| 574 | sge->length = |
| 575 | sge->mr->map[sge->m]->segs[sge->n].length; |
| 576 | } |
| 577 | sqp->s_len -= len; |
| 578 | } |
| 579 | if (release) |
Dennis Dalessandro | ec4274f | 2016-01-19 14:43:44 -0800 | [diff] [blame] | 580 | rvt_put_ss(&qp->r_sge); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 581 | |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 582 | if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 583 | goto send_comp; |
| 584 | |
| 585 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) |
| 586 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; |
| 587 | else |
| 588 | wc.opcode = IB_WC_RECV; |
| 589 | wc.wr_id = qp->r_wr_id; |
| 590 | wc.status = IB_WC_SUCCESS; |
| 591 | wc.byte_len = wqe->length; |
| 592 | wc.qp = &qp->ibqp; |
| 593 | wc.src_qp = qp->remote_qpn; |
| 594 | wc.slid = qp->remote_ah_attr.dlid; |
| 595 | wc.sl = qp->remote_ah_attr.sl; |
| 596 | wc.port_num = 1; |
| 597 | /* Signal completion event if the solicited bit is set. */ |
Dennis Dalessandro | abd712d | 2016-01-19 14:43:22 -0800 | [diff] [blame] | 598 | rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, |
| 599 | wqe->wr.send_flags & IB_SEND_SOLICITED); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 600 | |
| 601 | send_comp: |
| 602 | spin_lock_irqsave(&sqp->s_lock, flags); |
Dennis Dalessandro | 4eb0688 | 2016-01-19 14:42:39 -0800 | [diff] [blame] | 603 | ibp->rvp.n_loop_pkts++; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 604 | flush_send: |
| 605 | sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; |
| 606 | hfi1_send_complete(sqp, wqe, send_status); |
Jianxin Xiong | 0db3dfa | 2016-07-25 13:38:37 -0700 | [diff] [blame] | 607 | if (local_ops) { |
| 608 | atomic_dec(&sqp->local_ops_pending); |
| 609 | local_ops = 0; |
| 610 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 611 | goto again; |
| 612 | |
| 613 | rnr_nak: |
| 614 | /* Handle RNR NAK */ |
| 615 | if (qp->ibqp.qp_type == IB_QPT_UC) |
| 616 | goto send_comp; |
Dennis Dalessandro | 4eb0688 | 2016-01-19 14:42:39 -0800 | [diff] [blame] | 617 | ibp->rvp.n_rnr_naks++; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 618 | /* |
| 619 | * Note: we don't need the s_lock held since the BUSY flag |
| 620 | * makes this single threaded. |
| 621 | */ |
| 622 | if (sqp->s_rnr_retry == 0) { |
| 623 | send_status = IB_WC_RNR_RETRY_EXC_ERR; |
| 624 | goto serr; |
| 625 | } |
| 626 | if (sqp->s_rnr_retry_cnt < 7) |
| 627 | sqp->s_rnr_retry--; |
| 628 | spin_lock_irqsave(&sqp->s_lock, flags); |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 629 | if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 630 | goto clr_busy; |
Mike Marciniszyn | 34cee28 | 2016-02-09 14:29:31 -0800 | [diff] [blame] | 631 | to = ib_hfi1_rnr_table[qp->r_min_rnr_timer]; |
| 632 | hfi1_add_rnr_timer(sqp, to); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 633 | goto clr_busy; |
| 634 | |
| 635 | op_err: |
| 636 | send_status = IB_WC_REM_OP_ERR; |
| 637 | wc.status = IB_WC_LOC_QP_OP_ERR; |
| 638 | goto err; |
| 639 | |
| 640 | inv_err: |
| 641 | send_status = IB_WC_REM_INV_REQ_ERR; |
| 642 | wc.status = IB_WC_LOC_QP_OP_ERR; |
| 643 | goto err; |
| 644 | |
| 645 | acc_err: |
| 646 | send_status = IB_WC_REM_ACCESS_ERR; |
| 647 | wc.status = IB_WC_LOC_PROT_ERR; |
| 648 | err: |
| 649 | /* responder goes to error state */ |
| 650 | hfi1_rc_error(qp, wc.status); |
| 651 | |
| 652 | serr: |
| 653 | spin_lock_irqsave(&sqp->s_lock, flags); |
| 654 | hfi1_send_complete(sqp, wqe, send_status); |
| 655 | if (sqp->ibqp.qp_type == IB_QPT_RC) { |
Dennis Dalessandro | ec4274f | 2016-01-19 14:43:44 -0800 | [diff] [blame] | 656 | int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 657 | |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 658 | sqp->s_flags &= ~RVT_S_BUSY; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 659 | spin_unlock_irqrestore(&sqp->s_lock, flags); |
| 660 | if (lastwqe) { |
| 661 | struct ib_event ev; |
| 662 | |
| 663 | ev.device = sqp->ibqp.device; |
| 664 | ev.element.qp = &sqp->ibqp; |
| 665 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; |
| 666 | sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); |
| 667 | } |
| 668 | goto done; |
| 669 | } |
| 670 | clr_busy: |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 671 | sqp->s_flags &= ~RVT_S_BUSY; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 672 | unlock: |
| 673 | spin_unlock_irqrestore(&sqp->s_lock, flags); |
| 674 | done: |
| 675 | rcu_read_unlock(); |
| 676 | } |
| 677 | |
| 678 | /** |
| 679 | * hfi1_make_grh - construct a GRH header |
| 680 | * @ibp: a pointer to the IB port |
| 681 | * @hdr: a pointer to the GRH header being constructed |
| 682 | * @grh: the global route address to send to |
| 683 | * @hwords: the number of 32 bit words of header being sent |
| 684 | * @nwords: the number of 32 bit words of data being sent |
| 685 | * |
| 686 | * Return the size of the header in 32 bit words. |
| 687 | */ |
| 688 | u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, |
| 689 | struct ib_global_route *grh, u32 hwords, u32 nwords) |
| 690 | { |
| 691 | hdr->version_tclass_flow = |
| 692 | cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) | |
| 693 | (grh->traffic_class << IB_GRH_TCLASS_SHIFT) | |
| 694 | (grh->flow_label << IB_GRH_FLOW_SHIFT)); |
| 695 | hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2); |
| 696 | /* next_hdr is defined by C8-7 in ch. 8.4.1 */ |
| 697 | hdr->next_hdr = IB_GRH_NEXT_HDR; |
| 698 | hdr->hop_limit = grh->hop_limit; |
| 699 | /* The SGID is 32-bit aligned. */ |
Dennis Dalessandro | 4eb0688 | 2016-01-19 14:42:39 -0800 | [diff] [blame] | 700 | hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 701 | hdr->sgid.global.interface_id = |
| 702 | grh->sgid_index && grh->sgid_index < ARRAY_SIZE(ibp->guids) ? |
| 703 | ibp->guids[grh->sgid_index - 1] : |
| 704 | cpu_to_be64(ppd_from_ibp(ibp)->guid); |
| 705 | hdr->dgid = grh->dgid; |
| 706 | |
| 707 | /* GRH header size in 32-bit words. */ |
| 708 | return sizeof(struct ib_grh) / sizeof(u32); |
| 709 | } |
| 710 | |
Don Hiatt | d4d602e | 2016-07-25 13:40:22 -0700 | [diff] [blame] | 711 | #define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, hdr.u.oth.bth[2]) / 4) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 712 | |
| 713 | /** |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 714 | * build_ahg - create ahg in s_ahg |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 715 | * @qp: a pointer to QP |
| 716 | * @npsn: the next PSN for the request/response |
| 717 | * |
| 718 | * This routine handles the AHG by allocating an ahg entry and causing the |
| 719 | * copy of the first middle. |
| 720 | * |
| 721 | * Subsequent middles use the copied entry, editing the |
| 722 | * PSN with 1 or 2 edits. |
| 723 | */ |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 724 | static inline void build_ahg(struct rvt_qp *qp, u32 npsn) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 725 | { |
Dennis Dalessandro | 4c6829c | 2016-01-19 14:42:00 -0800 | [diff] [blame] | 726 | struct hfi1_qp_priv *priv = qp->priv; |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 727 | |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 728 | if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 729 | clear_ahg(qp); |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 730 | if (!(qp->s_flags & RVT_S_AHG_VALID)) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 731 | /* first middle that needs copy */ |
Mike Marciniszyn | d7b8ba5 | 2015-11-09 19:13:59 -0500 | [diff] [blame] | 732 | if (qp->s_ahgidx < 0) |
Dennis Dalessandro | 4c6829c | 2016-01-19 14:42:00 -0800 | [diff] [blame] | 733 | qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 734 | if (qp->s_ahgidx >= 0) { |
| 735 | qp->s_ahgpsn = npsn; |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 736 | priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 737 | /* save to protect a change in another thread */ |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 738 | priv->s_ahg->ahgidx = qp->s_ahgidx; |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 739 | qp->s_flags |= RVT_S_AHG_VALID; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 740 | } |
| 741 | } else { |
| 742 | /* subsequent middle after valid */ |
| 743 | if (qp->s_ahgidx >= 0) { |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 744 | priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG; |
| 745 | priv->s_ahg->ahgidx = qp->s_ahgidx; |
| 746 | priv->s_ahg->ahgcount++; |
| 747 | priv->s_ahg->ahgdesc[0] = |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 748 | sdma_build_ahg_descriptor( |
| 749 | (__force u16)cpu_to_be16((u16)npsn), |
| 750 | BTH2_OFFSET, |
| 751 | 16, |
| 752 | 16); |
| 753 | if ((npsn & 0xffff0000) != |
| 754 | (qp->s_ahgpsn & 0xffff0000)) { |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 755 | priv->s_ahg->ahgcount++; |
| 756 | priv->s_ahg->ahgdesc[1] = |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 757 | sdma_build_ahg_descriptor( |
| 758 | (__force u16)cpu_to_be16( |
| 759 | (u16)(npsn >> 16)), |
| 760 | BTH2_OFFSET, |
| 761 | 0, |
| 762 | 16); |
| 763 | } |
| 764 | } |
| 765 | } |
| 766 | } |
| 767 | |
Mike Marciniszyn | 261a435 | 2016-09-06 04:35:05 -0700 | [diff] [blame] | 768 | void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr, |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 769 | u32 bth0, u32 bth2, int middle, |
| 770 | struct hfi1_pkt_state *ps) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 771 | { |
Dennis Dalessandro | 4c6829c | 2016-01-19 14:42:00 -0800 | [diff] [blame] | 772 | struct hfi1_qp_priv *priv = qp->priv; |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 773 | struct hfi1_ibport *ibp = ps->ibp; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 774 | u16 lrh0; |
| 775 | u32 nwords; |
| 776 | u32 extra_bytes; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 777 | u32 bth1; |
| 778 | |
| 779 | /* Construct the header. */ |
| 780 | extra_bytes = -qp->s_cur_size & 3; |
| 781 | nwords = (qp->s_cur_size + extra_bytes) >> 2; |
| 782 | lrh0 = HFI1_LRH_BTH; |
| 783 | if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 784 | qp->s_hdrwords += hfi1_make_grh(ibp, |
| 785 | &ps->s_txreq->phdr.hdr.u.l.grh, |
Dennis Dalessandro | 4c6829c | 2016-01-19 14:42:00 -0800 | [diff] [blame] | 786 | &qp->remote_ah_attr.grh, |
| 787 | qp->s_hdrwords, nwords); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 788 | lrh0 = HFI1_LRH_GRH; |
| 789 | middle = 0; |
| 790 | } |
Dennis Dalessandro | 4c6829c | 2016-01-19 14:42:00 -0800 | [diff] [blame] | 791 | lrh0 |= (priv->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 792 | /* |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 793 | * reset s_ahg/AHG fields |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 794 | * |
| 795 | * This insures that the ahgentry/ahgcount |
| 796 | * are at a non-AHG default to protect |
| 797 | * build_verbs_tx_desc() from using |
| 798 | * an include ahgidx. |
| 799 | * |
| 800 | * build_ahg() will modify as appropriate |
| 801 | * to use the AHG feature. |
| 802 | */ |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 803 | priv->s_ahg->tx_flags = 0; |
| 804 | priv->s_ahg->ahgcount = 0; |
| 805 | priv->s_ahg->ahgidx = 0; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 806 | if (qp->s_mig_state == IB_MIG_MIGRATED) |
| 807 | bth0 |= IB_BTH_MIG_REQ; |
| 808 | else |
| 809 | middle = 0; |
| 810 | if (middle) |
| 811 | build_ahg(qp, bth2); |
| 812 | else |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 813 | qp->s_flags &= ~RVT_S_AHG_VALID; |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 814 | ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0); |
| 815 | ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); |
| 816 | ps->s_txreq->phdr.hdr.lrh[2] = |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 817 | cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 818 | ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 819 | qp->remote_ah_attr.src_path_bits); |
| 820 | bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index); |
| 821 | bth0 |= extra_bytes << 20; |
| 822 | ohdr->bth[0] = cpu_to_be32(bth0); |
| 823 | bth1 = qp->remote_qpn; |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 824 | if (qp->s_flags & RVT_S_ECN) { |
| 825 | qp->s_flags &= ~RVT_S_ECN; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 826 | /* we recently received a FECN, so return a BECN */ |
| 827 | bth1 |= (HFI1_BECN_MASK << HFI1_BECN_SHIFT); |
| 828 | } |
| 829 | ohdr->bth[1] = cpu_to_be32(bth1); |
| 830 | ohdr->bth[2] = cpu_to_be32(bth2); |
| 831 | } |
| 832 | |
Dean Luick | b421922 | 2015-10-26 10:28:35 -0400 | [diff] [blame] | 833 | /* when sending, force a reschedule every one of these periods */ |
| 834 | #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */ |
| 835 | |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 836 | void _hfi1_do_send(struct work_struct *work) |
| 837 | { |
| 838 | struct iowait *wait = container_of(work, struct iowait, iowork); |
| 839 | struct rvt_qp *qp = iowait_to_qp(wait); |
| 840 | |
| 841 | hfi1_do_send(qp); |
| 842 | } |
| 843 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 844 | /** |
| 845 | * hfi1_do_send - perform a send on a QP |
| 846 | * @work: contains a pointer to the QP |
| 847 | * |
| 848 | * Process entries in the send work queue until credit or queue is |
Dennis Dalessandro | ca00c62 | 2016-09-25 07:42:08 -0700 | [diff] [blame^] | 849 | * exhausted. Only allow one CPU to send a packet per QP. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 850 | * Otherwise, two threads could send packets out of order. |
| 851 | */ |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 852 | void hfi1_do_send(struct rvt_qp *qp) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 853 | { |
Dennis Dalessandro | d46e514 | 2015-11-11 00:34:37 -0500 | [diff] [blame] | 854 | struct hfi1_pkt_state ps; |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 855 | struct hfi1_qp_priv *priv = qp->priv; |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 856 | int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps); |
Dean Luick | b421922 | 2015-10-26 10:28:35 -0400 | [diff] [blame] | 857 | unsigned long timeout; |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 858 | unsigned long timeout_int; |
| 859 | int cpu; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 860 | |
Dennis Dalessandro | d46e514 | 2015-11-11 00:34:37 -0500 | [diff] [blame] | 861 | ps.dev = to_idev(qp->ibqp.device); |
| 862 | ps.ibp = to_iport(qp->ibqp.device, qp->port_num); |
| 863 | ps.ppd = ppd_from_ibp(ps.ibp); |
| 864 | |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 865 | switch (qp->ibqp.qp_type) { |
| 866 | case IB_QPT_RC: |
| 867 | if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc |
| 868 | ) - 1)) == |
| 869 | ps.ppd->lid)) { |
| 870 | ruc_loopback(qp); |
| 871 | return; |
| 872 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 873 | make_req = hfi1_make_rc_req; |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 874 | timeout_int = (qp->timeout_jiffies); |
| 875 | break; |
| 876 | case IB_QPT_UC: |
| 877 | if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc |
| 878 | ) - 1)) == |
| 879 | ps.ppd->lid)) { |
| 880 | ruc_loopback(qp); |
| 881 | return; |
| 882 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 883 | make_req = hfi1_make_uc_req; |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 884 | timeout_int = SEND_RESCHED_TIMEOUT; |
| 885 | break; |
| 886 | default: |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 887 | make_req = hfi1_make_ud_req; |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 888 | timeout_int = SEND_RESCHED_TIMEOUT; |
| 889 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 890 | |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 891 | spin_lock_irqsave(&qp->s_lock, ps.flags); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 892 | |
| 893 | /* Return if we are already busy processing a work request. */ |
| 894 | if (!hfi1_send_ok(qp)) { |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 895 | spin_unlock_irqrestore(&qp->s_lock, ps.flags); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 896 | return; |
| 897 | } |
| 898 | |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 899 | qp->s_flags |= RVT_S_BUSY; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 900 | |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 901 | timeout = jiffies + (timeout_int) / 8; |
| 902 | cpu = priv->s_sde ? priv->s_sde->cpu : |
| 903 | cpumask_first(cpumask_of_node(ps.ppd->dd->node)); |
Mike Marciniszyn | 711e104 | 2016-02-14 12:45:18 -0800 | [diff] [blame] | 904 | /* insure a pre-built packet is handled */ |
| 905 | ps.s_txreq = get_waiting_verbs_txreq(qp); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 906 | do { |
| 907 | /* Check for a constructed packet to be sent. */ |
| 908 | if (qp->s_hdrwords != 0) { |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 909 | spin_unlock_irqrestore(&qp->s_lock, ps.flags); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 910 | /* |
| 911 | * If the packet cannot be sent now, return and |
Dennis Dalessandro | ca00c62 | 2016-09-25 07:42:08 -0700 | [diff] [blame^] | 912 | * the send engine will be woken up later. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 913 | */ |
Dennis Dalessandro | d46e514 | 2015-11-11 00:34:37 -0500 | [diff] [blame] | 914 | if (hfi1_verbs_send(qp, &ps)) |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 915 | return; |
Dasaratharaman Chandramouli | a9b6b3b | 2016-07-25 13:40:16 -0700 | [diff] [blame] | 916 | /* Record that s_ahg is empty. */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 917 | qp->s_hdrwords = 0; |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 918 | /* allow other tasks to run */ |
| 919 | if (unlikely(time_after(jiffies, timeout))) { |
| 920 | if (workqueue_congested(cpu, |
| 921 | ps.ppd->hfi1_wq)) { |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 922 | spin_lock_irqsave( |
| 923 | &qp->s_lock, |
| 924 | ps.flags); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 925 | qp->s_flags &= ~RVT_S_BUSY; |
| 926 | hfi1_schedule_send(qp); |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 927 | spin_unlock_irqrestore( |
| 928 | &qp->s_lock, |
| 929 | ps.flags); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 930 | this_cpu_inc( |
| 931 | *ps.ppd->dd->send_schedule); |
| 932 | return; |
| 933 | } |
Kaike Wan | 831464c | 2016-03-05 08:50:49 -0800 | [diff] [blame] | 934 | if (!irqs_disabled()) { |
| 935 | cond_resched(); |
| 936 | this_cpu_inc( |
| 937 | *ps.ppd->dd->send_schedule); |
| 938 | } |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 939 | timeout = jiffies + (timeout_int) / 8; |
Vennila Megavannan | 23cd471 | 2016-02-03 14:34:23 -0800 | [diff] [blame] | 940 | } |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 941 | spin_lock_irqsave(&qp->s_lock, ps.flags); |
Dean Luick | b421922 | 2015-10-26 10:28:35 -0400 | [diff] [blame] | 942 | } |
Dennis Dalessandro | bb5df5f | 2016-02-14 12:44:43 -0800 | [diff] [blame] | 943 | } while (make_req(qp, &ps)); |
Mike Marciniszyn | 46a80d6 | 2016-02-14 12:10:04 -0800 | [diff] [blame] | 944 | |
Mike Marciniszyn | 747f4d7 | 2016-04-12 10:46:10 -0700 | [diff] [blame] | 945 | spin_unlock_irqrestore(&qp->s_lock, ps.flags); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 946 | } |
| 947 | |
| 948 | /* |
| 949 | * This should be called with s_lock held. |
| 950 | */ |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 951 | void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 952 | enum ib_wc_status status) |
| 953 | { |
| 954 | u32 old_last, last; |
| 955 | unsigned i; |
| 956 | |
Dennis Dalessandro | 83693bd | 2016-01-19 14:43:33 -0800 | [diff] [blame] | 957 | if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 958 | return; |
| 959 | |
Mike Marciniszyn | 6c2ab0b | 2016-02-04 11:03:19 -0800 | [diff] [blame] | 960 | last = qp->s_last; |
| 961 | old_last = last; |
| 962 | if (++last >= qp->s_size) |
| 963 | last = 0; |
| 964 | qp->s_last = last; |
| 965 | /* See post_send() */ |
| 966 | barrier(); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 967 | for (i = 0; i < wqe->wr.num_sge; i++) { |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 968 | struct rvt_sge *sge = &wqe->sg_list[i]; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 969 | |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 970 | rvt_put_mr(sge->mr); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 971 | } |
| 972 | if (qp->ibqp.qp_type == IB_QPT_UD || |
| 973 | qp->ibqp.qp_type == IB_QPT_SMI || |
| 974 | qp->ibqp.qp_type == IB_QPT_GSI) |
Dennis Dalessandro | 15723f0 | 2016-01-19 14:42:17 -0800 | [diff] [blame] | 975 | atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 976 | |
| 977 | /* See ch. 11.2.4.1 and 10.7.3.1 */ |
Dennis Dalessandro | 54d10c1 | 2016-01-19 14:43:01 -0800 | [diff] [blame] | 978 | if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 979 | (wqe->wr.send_flags & IB_SEND_SIGNALED) || |
| 980 | status != IB_WC_SUCCESS) { |
| 981 | struct ib_wc wc; |
| 982 | |
| 983 | memset(&wc, 0, sizeof(wc)); |
| 984 | wc.wr_id = wqe->wr.wr_id; |
| 985 | wc.status = status; |
| 986 | wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode]; |
| 987 | wc.qp = &qp->ibqp; |
| 988 | if (status == IB_WC_SUCCESS) |
| 989 | wc.byte_len = wqe->length; |
Dennis Dalessandro | abd712d | 2016-01-19 14:43:22 -0800 | [diff] [blame] | 990 | rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, |
| 991 | status != IB_WC_SUCCESS); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 992 | } |
| 993 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 994 | if (qp->s_acked == old_last) |
| 995 | qp->s_acked = last; |
| 996 | if (qp->s_cur == old_last) |
| 997 | qp->s_cur = last; |
| 998 | if (qp->s_tail == old_last) |
| 999 | qp->s_tail = last; |
| 1000 | if (qp->state == IB_QPS_SQD && last == qp->s_cur) |
| 1001 | qp->s_draining = 0; |
| 1002 | } |