blob: 4b01ccd895b4f3a882a38d2dfde3f5b98c12048d [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Mike Marciniszyndff2fe72017-08-28 11:23:51 -07002 * Copyright(c) 2015 - 2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/err.h>
49#include <linux/vmalloc.h>
50#include <linux/hash.h>
51#include <linux/module.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040052#include <linux/seq_file.h>
Dennis Dalessandroec4274f2016-01-19 14:43:44 -080053#include <rdma/rdma_vt.h>
54#include <rdma/rdmavt_qp.h>
Mike Marciniszyn1ac57c52016-07-01 16:02:13 -070055#include <rdma/ib_verbs.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040056
57#include "hfi.h"
58#include "qp.h"
59#include "trace.h"
Mike Marciniszyn45842ab2016-02-14 12:44:34 -080060#include "verbs_txreq.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040061
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -080062unsigned int hfi1_qp_table_size = 256;
Mike Marciniszyn77241052015-07-30 15:17:43 -040063module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
64MODULE_PARM_DESC(qp_table_size, "QP table size");
65
Dennis Dalessandro895420d2016-01-19 14:42:28 -080066static void flush_tx_list(struct rvt_qp *qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040067static int iowait_sleep(
68 struct sdma_engine *sde,
69 struct iowait *wait,
70 struct sdma_txreq *stx,
Kaike Wanbcad2912017-07-24 07:45:37 -070071 unsigned int seq,
72 bool pkts_sent);
Mike Marciniszyn77241052015-07-30 15:17:43 -040073static void iowait_wakeup(struct iowait *wait, int reason);
Mike Marciniszyna545f532016-02-14 12:45:53 -080074static void iowait_sdma_drained(struct iowait *wait);
Mike Marciniszyn91702b42016-02-14 12:45:44 -080075static void qp_pio_drain(struct rvt_qp *qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040076
Mike Marciniszyn1ac57c52016-07-01 16:02:13 -070077const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
78[IB_WR_RDMA_WRITE] = {
79 .length = sizeof(struct ib_rdma_wr),
80 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
81},
82
83[IB_WR_RDMA_READ] = {
84 .length = sizeof(struct ib_rdma_wr),
85 .qpt_support = BIT(IB_QPT_RC),
86 .flags = RVT_OPERATION_ATOMIC,
87},
88
89[IB_WR_ATOMIC_CMP_AND_SWP] = {
90 .length = sizeof(struct ib_atomic_wr),
91 .qpt_support = BIT(IB_QPT_RC),
92 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
93},
94
95[IB_WR_ATOMIC_FETCH_AND_ADD] = {
96 .length = sizeof(struct ib_atomic_wr),
97 .qpt_support = BIT(IB_QPT_RC),
98 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
99},
100
101[IB_WR_RDMA_WRITE_WITH_IMM] = {
102 .length = sizeof(struct ib_rdma_wr),
103 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
104},
105
106[IB_WR_SEND] = {
107 .length = sizeof(struct ib_send_wr),
108 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
109 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
110},
111
112[IB_WR_SEND_WITH_IMM] = {
113 .length = sizeof(struct ib_send_wr),
114 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
115 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
116},
117
Jianxin Xiongc72cfe32016-07-25 13:38:43 -0700118[IB_WR_REG_MR] = {
119 .length = sizeof(struct ib_reg_wr),
120 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
121 .flags = RVT_OPERATION_LOCAL,
122},
123
124[IB_WR_LOCAL_INV] = {
125 .length = sizeof(struct ib_send_wr),
126 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
127 .flags = RVT_OPERATION_LOCAL,
128},
129
130[IB_WR_SEND_WITH_INV] = {
131 .length = sizeof(struct ib_send_wr),
132 .qpt_support = BIT(IB_QPT_RC),
133},
134
Mike Marciniszyn1ac57c52016-07-01 16:02:13 -0700135};
136
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800137static void flush_tx_list(struct rvt_qp *qp)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400138{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800139 struct hfi1_qp_priv *priv = qp->priv;
140
141 while (!list_empty(&priv->s_iowait.tx_head)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400142 struct sdma_txreq *tx;
143
144 tx = list_first_entry(
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800145 &priv->s_iowait.tx_head,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400146 struct sdma_txreq,
147 list);
148 list_del_init(&tx->list);
149 hfi1_put_txreq(
150 container_of(tx, struct verbs_txreq, txreq));
151 }
152}
153
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800154static void flush_iowait(struct rvt_qp *qp)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400155{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800156 struct hfi1_qp_priv *priv = qp->priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400157 unsigned long flags;
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700158 seqlock_t *lock = priv->s_iowait.lock;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400159
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700160 if (!lock)
161 return;
162 write_seqlock_irqsave(lock, flags);
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800163 if (!list_empty(&priv->s_iowait.list)) {
164 list_del_init(&priv->s_iowait.list);
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700165 priv->s_iowait.lock = NULL;
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -0700166 rvt_put_qp(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400167 }
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700168 write_sequnlock_irqrestore(lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400169}
170
171static inline int opa_mtu_enum_to_int(int mtu)
172{
173 switch (mtu) {
174 case OPA_MTU_8192: return 8192;
175 case OPA_MTU_10240: return 10240;
176 default: return -1;
177 }
178}
179
180/**
181 * This function is what we would push to the core layer if we wanted to be a
182 * "first class citizen". Instead we hide this here and rely on Verbs ULPs
183 * to blindly pass the MTU enum value from the PathRecord to us.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400184 */
185static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
186{
Sebastian Sanchezef699e82016-04-12 11:17:09 -0700187 int val;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400188
Sebastian Sanchezef699e82016-04-12 11:17:09 -0700189 /* Constraining 10KB packets to 8KB packets */
190 if (mtu == (enum ib_mtu)OPA_MTU_10240)
191 mtu = OPA_MTU_8192;
192 val = opa_mtu_enum_to_int((int)mtu);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400193 if (val > 0)
194 return val;
195 return ib_mtu_enum_to_int(mtu);
196}
197
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800198int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
199 int attr_mask, struct ib_udata *udata)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400200{
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800201 struct ib_qp *ibqp = &qp->ibqp;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400202 struct hfi1_ibdev *dev = to_idev(ibqp->device);
Mike Marciniszynd7b8ba52015-11-09 19:13:59 -0500203 struct hfi1_devdata *dd = dd_from_dev(dev);
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800204 u8 sc;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400205
206 if (attr_mask & IB_QP_AV) {
Mike Marciniszynd7b8ba52015-11-09 19:13:59 -0500207 sc = ah_to_sc(ibqp->device, &attr->ah_attr);
Ira Weiny31e7af12016-02-03 14:33:14 -0800208 if (sc == 0xf)
209 return -EINVAL;
210
Mike Marciniszynd7b8ba52015-11-09 19:13:59 -0500211 if (!qp_to_sdma_engine(qp, sc) &&
212 dd->flags & HFI1_HAS_SEND_DMA)
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800213 return -EINVAL;
Jubin John721d0422016-02-14 12:45:00 -0800214
215 if (!qp_to_send_context(qp, sc))
216 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400217 }
218
219 if (attr_mask & IB_QP_ALT_PATH) {
Mike Marciniszynd7b8ba52015-11-09 19:13:59 -0500220 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
Ira Weiny31e7af12016-02-03 14:33:14 -0800221 if (sc == 0xf)
222 return -EINVAL;
223
Mike Marciniszynd7b8ba52015-11-09 19:13:59 -0500224 if (!qp_to_sdma_engine(qp, sc) &&
225 dd->flags & HFI1_HAS_SEND_DMA)
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800226 return -EINVAL;
Jubin John721d0422016-02-14 12:45:00 -0800227
228 if (!qp_to_send_context(qp, sc))
229 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400230 }
231
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800232 return 0;
233}
Mike Marciniszyn77241052015-07-30 15:17:43 -0400234
Don Hiattd98bb7f2017-08-04 13:54:16 -0700235/*
236 * qp_set_16b - Set the hdr_type based on whether the slid or the
237 * dlid in the connection is extended. Only applicable for RC and UC
238 * QPs. UD QPs determine this on the fly from the ah in the wqe
239 */
240static inline void qp_set_16b(struct rvt_qp *qp)
241{
242 struct hfi1_pportdata *ppd;
243 struct hfi1_ibport *ibp;
244 struct hfi1_qp_priv *priv = qp->priv;
245
246 /* Update ah_attr to account for extended LIDs */
247 hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr);
248
249 /* Create 32 bit LIDs */
250 hfi1_make_opa_lid(&qp->remote_ah_attr);
251
252 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH))
253 return;
254
255 ibp = to_iport(qp->ibqp.device, qp->port_num);
256 ppd = ppd_from_ibp(ibp);
257 priv->hdr_type = hfi1_get_hdr_type(ppd->lid, &qp->remote_ah_attr);
258}
259
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800260void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
261 int attr_mask, struct ib_udata *udata)
262{
263 struct ib_qp *ibqp = &qp->ibqp;
264 struct hfi1_qp_priv *priv = qp->priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400265
266 if (attr_mask & IB_QP_AV) {
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800267 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
268 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
Mike Marciniszyncef504c2016-03-07 11:35:35 -0800269 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
Don Hiattd98bb7f2017-08-04 13:54:16 -0700270 qp_set_16b(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400271 }
272
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800273 if (attr_mask & IB_QP_PATH_MIG_STATE &&
274 attr->path_mig_state == IB_MIG_MIGRATED &&
275 qp->s_mig_state == IB_MIG_ARMED) {
276 qp->s_flags |= RVT_S_AHG_CLEAR;
277 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
278 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
Mike Marciniszyncef504c2016-03-07 11:35:35 -0800279 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
Don Hiattd98bb7f2017-08-04 13:54:16 -0700280 qp_set_16b(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400281 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400282}
283
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800284/**
285 * hfi1_check_send_wqe - validate wqe
286 * @qp - The qp
287 * @wqe - The built wqe
288 *
289 * validate wqe. This is called
290 * prior to inserting the wqe into
291 * the ring but after the wqe has been
292 * setup.
293 *
294 * Returns 0 on success, -EINVAL on failure
295 *
296 */
297int hfi1_check_send_wqe(struct rvt_qp *qp,
298 struct rvt_swqe *wqe)
Ira Weiny31e7af12016-02-03 14:33:14 -0800299{
300 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800301 struct rvt_ah *ah;
Ira Weiny31e7af12016-02-03 14:33:14 -0800302
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800303 switch (qp->ibqp.qp_type) {
304 case IB_QPT_RC:
305 case IB_QPT_UC:
306 if (wqe->length > 0x80000000U)
307 return -EINVAL;
308 break;
309 case IB_QPT_SMI:
310 ah = ibah_to_rvtah(wqe->ud_wr.ah);
311 if (wqe->length > (1 << ah->log_pmtu))
312 return -EINVAL;
313 break;
314 case IB_QPT_GSI:
315 case IB_QPT_UD:
316 ah = ibah_to_rvtah(wqe->ud_wr.ah);
317 if (wqe->length > (1 << ah->log_pmtu))
318 return -EINVAL;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400319 if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf)
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800320 return -EINVAL;
321 default:
322 break;
Ira Weiny31e7af12016-02-03 14:33:14 -0800323 }
Mike Marciniszyn91702b42016-02-14 12:45:44 -0800324 return wqe->length <= piothreshold;
Ira Weiny31e7af12016-02-03 14:33:14 -0800325}
326
Mike Marciniszyn77241052015-07-30 15:17:43 -0400327/**
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800328 * _hfi1_schedule_send - schedule progress
329 * @qp: the QP
330 *
331 * This schedules qp progress w/o regard to the s_flags.
332 *
333 * It is only used in the post send, which doesn't hold
334 * the s_lock.
335 */
336void _hfi1_schedule_send(struct rvt_qp *qp)
337{
338 struct hfi1_qp_priv *priv = qp->priv;
339 struct hfi1_ibport *ibp =
340 to_iport(qp->ibqp.device, qp->port_num);
341 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
342 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
343
344 iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
345 priv->s_sde ?
346 priv->s_sde->cpu :
347 cpumask_first(cpumask_of_node(dd->node)));
348}
349
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800350static void qp_pio_drain(struct rvt_qp *qp)
351{
352 struct hfi1_ibdev *dev;
353 struct hfi1_qp_priv *priv = qp->priv;
354
355 if (!priv->s_sendcontext)
356 return;
357 dev = to_idev(qp->ibqp.device);
358 while (iowait_pio_pending(&priv->s_iowait)) {
359 write_seqlock_irq(&dev->iowait_lock);
360 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
361 write_sequnlock_irq(&dev->iowait_lock);
362 iowait_pio_drain(&priv->s_iowait);
363 write_seqlock_irq(&dev->iowait_lock);
364 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
365 write_sequnlock_irq(&dev->iowait_lock);
366 }
367}
368
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800369/**
370 * hfi1_schedule_send - schedule progress
371 * @qp: the QP
372 *
373 * This schedules qp progress and caller should hold
374 * the s_lock.
375 */
376void hfi1_schedule_send(struct rvt_qp *qp)
377{
Mike Marciniszyn68e78b32016-09-06 04:37:41 -0700378 lockdep_assert_held(&qp->s_lock);
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800379 if (hfi1_send_ok(qp))
380 _hfi1_schedule_send(qp);
381}
382
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800383void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400384{
385 unsigned long flags;
386
387 spin_lock_irqsave(&qp->s_lock, flags);
388 if (qp->s_flags & flag) {
389 qp->s_flags &= ~flag;
390 trace_hfi1_qpwakeup(qp, flag);
391 hfi1_schedule_send(qp);
392 }
393 spin_unlock_irqrestore(&qp->s_lock, flags);
394 /* Notify hfi1_destroy_qp() if it is waiting. */
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -0700395 rvt_put_qp(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400396}
397
398static int iowait_sleep(
399 struct sdma_engine *sde,
400 struct iowait *wait,
401 struct sdma_txreq *stx,
Kaike Wanbcad2912017-07-24 07:45:37 -0700402 uint seq,
403 bool pkts_sent)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400404{
405 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800406 struct rvt_qp *qp;
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800407 struct hfi1_qp_priv *priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400408 unsigned long flags;
409 int ret = 0;
410 struct hfi1_ibdev *dev;
411
412 qp = tx->qp;
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800413 priv = qp->priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400414
415 spin_lock_irqsave(&qp->s_lock, flags);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800416 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400417 /*
418 * If we couldn't queue the DMA request, save the info
419 * and try again later rather than destroying the
420 * buffer and undoing the side effects of the copy.
421 */
422 /* Make a common routine? */
423 dev = &sde->dd->verbs_dev;
424 list_add_tail(&stx->list, &wait->tx_head);
425 write_seqlock(&dev->iowait_lock);
426 if (sdma_progress(sde, seq, stx))
427 goto eagain;
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800428 if (list_empty(&priv->s_iowait.list)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400429 struct hfi1_ibport *ibp =
430 to_iport(qp->ibqp.device, qp->port_num);
431
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800432 ibp->rvp.n_dmawait++;
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800433 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
Kaike Wanbcad2912017-07-24 07:45:37 -0700434 iowait_queue(pkts_sent, &priv->s_iowait,
435 &sde->dmawait);
Mike Marciniszyn4e045572016-10-10 06:14:28 -0700436 priv->s_iowait.lock = &dev->iowait_lock;
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800437 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
Mike Marciniszyn4d6f85c2016-09-06 04:34:35 -0700438 rvt_get_qp(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400439 }
440 write_sequnlock(&dev->iowait_lock);
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800441 qp->s_flags &= ~RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400442 spin_unlock_irqrestore(&qp->s_lock, flags);
443 ret = -EBUSY;
444 } else {
445 spin_unlock_irqrestore(&qp->s_lock, flags);
446 hfi1_put_txreq(tx);
447 }
448 return ret;
449eagain:
450 write_sequnlock(&dev->iowait_lock);
451 spin_unlock_irqrestore(&qp->s_lock, flags);
452 list_del_init(&stx->list);
453 return -EAGAIN;
454}
455
456static void iowait_wakeup(struct iowait *wait, int reason)
457{
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800458 struct rvt_qp *qp = iowait_to_qp(wait);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400459
460 WARN_ON(reason != SDMA_AVAIL_REASON);
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800461 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400462}
463
Mike Marciniszyna545f532016-02-14 12:45:53 -0800464static void iowait_sdma_drained(struct iowait *wait)
465{
466 struct rvt_qp *qp = iowait_to_qp(wait);
Mike Marciniszyn7049de62016-05-24 12:50:23 -0700467 unsigned long flags;
Mike Marciniszyna545f532016-02-14 12:45:53 -0800468
469 /*
470 * This happens when the send engine notes
471 * a QP in the error state and cannot
472 * do the flush work until that QP's
473 * sdma work has finished.
474 */
Mike Marciniszyn7049de62016-05-24 12:50:23 -0700475 spin_lock_irqsave(&qp->s_lock, flags);
Mike Marciniszyna545f532016-02-14 12:45:53 -0800476 if (qp->s_flags & RVT_S_WAIT_DMA) {
477 qp->s_flags &= ~RVT_S_WAIT_DMA;
478 hfi1_schedule_send(qp);
479 }
Mike Marciniszyn7049de62016-05-24 12:50:23 -0700480 spin_unlock_irqrestore(&qp->s_lock, flags);
Mike Marciniszyna545f532016-02-14 12:45:53 -0800481}
482
Mike Marciniszyn77241052015-07-30 15:17:43 -0400483/**
484 *
485 * qp_to_sdma_engine - map a qp to a send engine
486 * @qp: the QP
487 * @sc5: the 5 bit sc
488 *
489 * Return:
490 * A send engine for the qp or NULL for SMI type qp.
491 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800492struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400493{
494 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
495 struct sdma_engine *sde;
496
497 if (!(dd->flags & HFI1_HAS_SEND_DMA))
498 return NULL;
499 switch (qp->ibqp.qp_type) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400500 case IB_QPT_SMI:
501 return NULL;
502 default:
503 break;
504 }
505 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
506 return sde;
507}
508
Jubin John35f6bef2016-02-14 12:46:10 -0800509/*
510 * qp_to_send_context - map a qp to a send context
511 * @qp: the QP
512 * @sc5: the 5 bit sc
513 *
514 * Return:
515 * A send context for the qp
516 */
517struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
518{
519 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
520
521 switch (qp->ibqp.qp_type) {
522 case IB_QPT_SMI:
523 /* SMA packets to VL15 */
524 return dd->vld[15].sc;
525 default:
526 break;
527 }
528
529 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift,
530 sc5);
531}
532
Mike Marciniszyn77241052015-07-30 15:17:43 -0400533static const char * const qp_type_str[] = {
534 "SMI", "GSI", "RC", "UC", "UD",
535};
536
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800537static int qp_idle(struct rvt_qp *qp)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400538{
539 return
540 qp->s_last == qp->s_acked &&
541 qp->s_acked == qp->s_cur &&
542 qp->s_cur == qp->s_tail &&
543 qp->s_tail == qp->s_head;
544}
545
Mike Marciniszyne5c197a2017-08-28 11:23:58 -0700546/**
547 * qp_iter_print - print the qp information to seq_file
548 * @s: the seq_file to emit the qp information on
549 * @iter: the iterator for the qp hash list
550 */
551void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400552{
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800553 struct rvt_swqe *wqe;
554 struct rvt_qp *qp = iter->qp;
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800555 struct hfi1_qp_priv *priv = qp->priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400556 struct sdma_engine *sde;
Jubin John721d0422016-02-14 12:45:00 -0800557 struct send_context *send_context;
Kaike Wan642aaab2017-08-21 18:27:41 -0700558 struct rvt_ack_entry *e = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400559
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800560 sde = qp_to_sdma_engine(qp, priv->s_sc);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800561 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
Jubin John721d0422016-02-14 12:45:00 -0800562 send_context = qp_to_send_context(qp, priv->s_sc);
Kaike Wan642aaab2017-08-21 18:27:41 -0700563 if (qp->s_ack_queue)
564 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
Mike Marciniszyn77241052015-07-30 15:17:43 -0400565 seq_printf(s,
Mike Marciniszyn280ad492017-08-21 18:27:48 -0700566 "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -0400567 iter->n,
568 qp_idle(qp) ? "I" : "B",
569 qp->ibqp.qp_num,
570 atomic_read(&qp->refcount),
571 qp_type_str[qp->ibqp.qp_type],
572 qp->state,
573 wqe ? wqe->wr.opcode : 0,
574 qp->s_hdrwords,
575 qp->s_flags,
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800576 iowait_sdma_pending(&priv->s_iowait),
577 iowait_pio_pending(&priv->s_iowait),
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800578 !list_empty(&priv->s_iowait.list),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400579 qp->timeout,
580 wqe ? wqe->ssn : 0,
581 qp->s_lsn,
582 qp->s_last_psn,
583 qp->s_psn, qp->s_next_psn,
584 qp->s_sending_psn, qp->s_sending_hpsn,
Mike Marciniszynd7c76e92017-02-08 05:26:43 -0800585 qp->r_psn,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400586 qp->s_last, qp->s_acked, qp->s_cur,
587 qp->s_tail, qp->s_head, qp->s_size,
Mike Marciniszyn35852542016-02-14 12:44:17 -0800588 qp->s_avail,
Kaike Wanff8d8362017-06-17 10:37:34 -0700589 /* ack_queue ring pointers, size */
590 qp->s_tail_ack_queue, qp->r_head_ack_queue,
Kaike Wan4b9796b2017-08-28 11:23:39 -0700591 rvt_max_atomic(&to_idev(qp->ibqp.device)->rdi),
Kaike Wanff8d8362017-06-17 10:37:34 -0700592 /* remote QP info */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400593 qp->remote_qpn,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400594 rdma_ah_get_dlid(&qp->remote_ah_attr),
595 rdma_ah_get_sl(&qp->remote_ah_attr),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400596 qp->pmtu,
Mike Marciniszyn20658662016-02-04 11:03:11 -0800597 qp->s_retry,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400598 qp->s_retry_cnt,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400599 qp->s_rnr_retry_cnt,
Mike Marciniszynd7c76e92017-02-08 05:26:43 -0800600 qp->s_rnr_retry,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400601 sde,
Jubin John721d0422016-02-14 12:45:00 -0800602 sde ? sde->this_idx : 0,
Jubin John77e76392016-02-14 12:46:19 -0800603 send_context,
Vennila Megavannan0358a442016-02-14 12:46:28 -0800604 send_context ? send_context->sw_index : 0,
605 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head,
Mike Marciniszynef086c02016-03-07 11:35:08 -0800606 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail,
Kaike Wan642aaab2017-08-21 18:27:41 -0700607 qp->pid,
Mike Marciniszyn280ad492017-08-21 18:27:48 -0700608 qp->s_state,
609 qp->s_ack_state,
Kaike Wan642aaab2017-08-21 18:27:41 -0700610 /* ack queue information */
611 e ? e->opcode : 0,
612 e ? e->psn : 0,
613 e ? e->lpsn : 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400614}
615
Leon Romanovsky0f4d0272017-05-23 14:38:14 +0300616void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -0800617{
618 struct hfi1_qp_priv *priv;
619
Leon Romanovsky0f4d0272017-05-23 14:38:14 +0300620 priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -0800621 if (!priv)
622 return ERR_PTR(-ENOMEM);
623
624 priv->owner = qp;
625
Leon Romanovsky0f4d0272017-05-23 14:38:14 +0300626 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700627 rdi->dparms.node);
628 if (!priv->s_ahg) {
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -0800629 kfree(priv);
630 return ERR_PTR(-ENOMEM);
631 }
Mike Marciniszyn5a648df2016-09-06 04:36:53 -0700632 iowait_init(
633 &priv->s_iowait,
634 1,
635 _hfi1_do_send,
636 iowait_sleep,
637 iowait_wakeup,
638 iowait_sdma_drained);
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -0800639 return priv;
640}
641
642void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
643{
644 struct hfi1_qp_priv *priv = qp->priv;
645
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700646 kfree(priv->s_ahg);
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -0800647 kfree(priv);
648}
649
650unsigned free_all_qps(struct rvt_dev_info *rdi)
651{
652 struct hfi1_ibdev *verbs_dev = container_of(rdi,
653 struct hfi1_ibdev,
654 rdi);
655 struct hfi1_devdata *dd = container_of(verbs_dev,
656 struct hfi1_devdata,
657 verbs_dev);
658 int n;
659 unsigned qp_inuse = 0;
660
661 for (n = 0; n < dd->num_pports; n++) {
662 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
663
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -0800664 rcu_read_lock();
665 if (rcu_dereference(ibp->rvp.qp[0]))
666 qp_inuse++;
667 if (rcu_dereference(ibp->rvp.qp[1]))
668 qp_inuse++;
669 rcu_read_unlock();
670 }
671
672 return qp_inuse;
673}
674
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800675void flush_qp_waiters(struct rvt_qp *qp)
676{
Mike Marciniszyn68e78b32016-09-06 04:37:41 -0700677 lockdep_assert_held(&qp->s_lock);
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800678 flush_iowait(qp);
679}
680
681void stop_send_queue(struct rvt_qp *qp)
682{
683 struct hfi1_qp_priv *priv = qp->priv;
684
685 cancel_work_sync(&priv->s_iowait.iowork);
686}
687
688void quiesce_qp(struct rvt_qp *qp)
689{
690 struct hfi1_qp_priv *priv = qp->priv;
691
692 iowait_sdma_drain(&priv->s_iowait);
Mike Marciniszyn14553ca2016-02-14 12:45:36 -0800693 qp_pio_drain(qp);
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800694 flush_tx_list(qp);
695}
696
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -0800697void notify_qp_reset(struct rvt_qp *qp)
698{
Mike Marciniszyn688f21c2017-05-04 05:14:04 -0700699 qp->r_adefered = 0;
Dennis Dalessandroa2c2d602016-01-19 14:43:12 -0800700 clear_ahg(qp);
701}
702
Mike Marciniszync2f3ffb2015-11-09 19:13:57 -0500703/*
704 * Switch to alternate path.
705 * The QP s_lock should be held and interrupts disabled.
706 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800707void hfi1_migrate_qp(struct rvt_qp *qp)
Mike Marciniszync2f3ffb2015-11-09 19:13:57 -0500708{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800709 struct hfi1_qp_priv *priv = qp->priv;
Mike Marciniszync2f3ffb2015-11-09 19:13:57 -0500710 struct ib_event ev;
711
712 qp->s_mig_state = IB_MIG_MIGRATED;
713 qp->remote_ah_attr = qp->alt_ah_attr;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400714 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
Mike Marciniszync2f3ffb2015-11-09 19:13:57 -0500715 qp->s_pkey_index = qp->s_alt_pkey_index;
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800716 qp->s_flags |= RVT_S_AHG_CLEAR;
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800717 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
718 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
Don Hiattd98bb7f2017-08-04 13:54:16 -0700719 qp_set_16b(qp);
Mike Marciniszync2f3ffb2015-11-09 19:13:57 -0500720
721 ev.device = qp->ibqp.device;
722 ev.element.qp = &qp->ibqp;
723 ev.event = IB_EVENT_PATH_MIG;
724 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
725}
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800726
727int mtu_to_path_mtu(u32 mtu)
728{
729 return mtu_to_enum(mtu, OPA_MTU_8192);
730}
731
732u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
733{
734 u32 mtu;
735 struct hfi1_ibdev *verbs_dev = container_of(rdi,
736 struct hfi1_ibdev,
737 rdi);
738 struct hfi1_devdata *dd = container_of(verbs_dev,
739 struct hfi1_devdata,
740 verbs_dev);
741 struct hfi1_ibport *ibp;
742 u8 sc, vl;
743
744 ibp = &dd->pport[qp->port_num - 1].ibport_data;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400745 sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800746 vl = sc_to_vlt(dd, sc);
747
748 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
749 if (vl < PER_VL_SEND_CONTEXTS)
750 mtu = min_t(u32, mtu, dd->vld[vl].mtu);
751 return mtu;
752}
753
754int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
755 struct ib_qp_attr *attr)
756{
757 int mtu, pidx = qp->port_num - 1;
758 struct hfi1_ibdev *verbs_dev = container_of(rdi,
759 struct hfi1_ibdev,
760 rdi);
761 struct hfi1_devdata *dd = container_of(verbs_dev,
762 struct hfi1_devdata,
763 verbs_dev);
764 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
765 if (mtu == -1)
766 return -1; /* values less than 0 are error */
767
768 if (mtu > dd->pport[pidx].ibmtu)
769 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
770 else
771 return attr->path_mtu;
772}
773
774void notify_error_qp(struct rvt_qp *qp)
775{
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800776 struct hfi1_qp_priv *priv = qp->priv;
Mike Marciniszyna8715b92017-02-08 05:26:20 -0800777 seqlock_t *lock = priv->s_iowait.lock;
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800778
Mike Marciniszyna8715b92017-02-08 05:26:20 -0800779 if (lock) {
780 write_seqlock(lock);
781 if (!list_empty(&priv->s_iowait.list) &&
782 !(qp->s_flags & RVT_S_BUSY)) {
783 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
784 list_del_init(&priv->s_iowait.list);
785 priv->s_iowait.lock = NULL;
786 rvt_put_qp(qp);
787 }
788 write_sequnlock(lock);
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800789 }
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800790
791 if (!(qp->s_flags & RVT_S_BUSY)) {
792 qp->s_hdrwords = 0;
793 if (qp->s_rdma_mr) {
794 rvt_put_mr(qp->s_rdma_mr);
795 qp->s_rdma_mr = NULL;
796 }
797 flush_tx_list(qp);
798 }
799}
800
Kaike Wan0ec79e82016-02-14 12:10:20 -0800801/**
Mike Marciniszyndff2fe72017-08-28 11:23:51 -0700802 * hfi1_qp_iter_cb - callback for iterator
803 * @qp - the qp
804 * @v - the sl in low bits of v
805 *
806 * This is called from the iterator callback to work
807 * on an individual qp.
808 */
809static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v)
810{
811 int lastwqe;
812 struct ib_event ev;
813 struct hfi1_ibport *ibp =
814 to_iport(qp->ibqp.device, qp->port_num);
815 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
816 u8 sl = (u8)v;
817
818 if (qp->port_num != ppd->port ||
819 (qp->ibqp.qp_type != IB_QPT_UC &&
820 qp->ibqp.qp_type != IB_QPT_RC) ||
821 rdma_ah_get_sl(&qp->remote_ah_attr) != sl ||
822 !(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))
823 return;
824
825 spin_lock_irq(&qp->r_lock);
826 spin_lock(&qp->s_hlock);
827 spin_lock(&qp->s_lock);
828 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
829 spin_unlock(&qp->s_lock);
830 spin_unlock(&qp->s_hlock);
831 spin_unlock_irq(&qp->r_lock);
832 if (lastwqe) {
833 ev.device = qp->ibqp.device;
834 ev.element.qp = &qp->ibqp;
835 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
836 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
837 }
838}
839
840/**
Kaike Wan0ec79e82016-02-14 12:10:20 -0800841 * hfi1_error_port_qps - put a port's RC/UC qps into error state
842 * @ibp: the ibport.
843 * @sl: the service level.
844 *
845 * This function places all RC/UC qps with a given service level into error
846 * state. It is generally called to force upper lay apps to abandon stale qps
847 * after an sl->sc mapping change.
848 */
849void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
850{
Kaike Wan0ec79e82016-02-14 12:10:20 -0800851 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
852 struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
Kaike Wan0ec79e82016-02-14 12:10:20 -0800853
Mike Marciniszyndff2fe72017-08-28 11:23:51 -0700854 rvt_qp_iter(&dev->rdi, sl, hfi1_qp_iter_cb);
Kaike Wan0ec79e82016-02-14 12:10:20 -0800855}