blob: 1c38c38ec77eaadc8cc0f718270abbf383c0c50b [file] [log] [blame]
Gary Leshnerd99dc602020-05-11 12:05:48 -04001// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2/*
3 * Copyright(c) 2020 Intel Corporation.
4 *
5 */
6
7/*
8 * This file contains HFI1 support for IPOIB SDMA functionality
9 */
10
11#include <linux/log2.h>
12#include <linux/circ_buf.h>
13
14#include "sdma.h"
15#include "verbs.h"
16#include "trace_ibhdrs.h"
17#include "ipoib.h"
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -040018#include "trace_tx.h"
Gary Leshnerd99dc602020-05-11 12:05:48 -040019
20/* Add a convenience helper */
21#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
22#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
23#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
24
25/**
26 * struct ipoib_txreq - IPOIB transmit descriptor
27 * @txreq: sdma transmit request
28 * @sdma_hdr: 9b ib headers
29 * @sdma_status: status returned by sdma engine
30 * @priv: ipoib netdev private data
31 * @txq: txq on which skb was output
32 * @skb: skb to send
33 */
34struct ipoib_txreq {
35 struct sdma_txreq txreq;
36 struct hfi1_sdma_header sdma_hdr;
37 int sdma_status;
38 struct hfi1_ipoib_dev_priv *priv;
39 struct hfi1_ipoib_txq *txq;
40 struct sk_buff *skb;
41};
42
43struct ipoib_txparms {
44 struct hfi1_devdata *dd;
45 struct rdma_ah_attr *ah_attr;
46 struct hfi1_ibport *ibp;
47 struct hfi1_ipoib_txq *txq;
48 union hfi1_ipoib_flow flow;
49 u32 dqpn;
50 u8 hdr_dwords;
51 u8 entropy;
52};
53
54static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
55{
56 return sent - completed;
57}
58
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -040059static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
60{
61 return hfi1_ipoib_txreqs(txq->sent_txreqs,
62 atomic64_read(&txq->complete_txreqs));
63}
64
65static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
66{
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -040067 trace_hfi1_txq_stop(txq);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -040068 if (atomic_inc_return(&txq->stops) == 1)
69 netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
70}
71
72static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
73{
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -040074 trace_hfi1_txq_wake(txq);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -040075 if (atomic_dec_and_test(&txq->stops))
76 netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
77}
78
79static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
80{
81 return min_t(uint, txq->priv->netdev->tx_queue_len,
82 txq->tx_ring.max_items - 1);
83}
84
85static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
86{
87 return min_t(uint, txq->priv->netdev->tx_queue_len,
88 txq->tx_ring.max_items) >> 1;
89}
90
Gary Leshnerd99dc602020-05-11 12:05:48 -040091static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
92{
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -040093 ++txq->sent_txreqs;
94 if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -040095 !atomic_xchg(&txq->ring_full, 1)) {
96 trace_hfi1_txq_full(txq);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -040097 hfi1_ipoib_stop_txq(txq);
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -040098 }
Gary Leshnerd99dc602020-05-11 12:05:48 -040099}
100
101static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
102{
103 struct net_device *dev = txq->priv->netdev;
104
Gary Leshnerd99dc602020-05-11 12:05:48 -0400105 /* If shutting down just return as queue state is irrelevant */
106 if (unlikely(dev->reg_state != NETREG_REGISTERED))
107 return;
108
109 /*
110 * When the queue has been drained to less than half full it will be
111 * restarted.
112 * The size of the txreq ring is fixed at initialization.
113 * The tx queue len can be adjusted upward while the interface is
114 * running.
115 * The tx queue len can be large enough to overflow the txreq_ring.
116 * Use the minimum of the current tx_queue_len or the rings max txreqs
117 * to protect against ring overflow.
118 */
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400119 if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400120 atomic_xchg(&txq->ring_full, 0)) {
121 trace_hfi1_txq_xmit_unstopped(txq);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400122 hfi1_ipoib_wake_txq(txq);
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400123 }
Gary Leshnerd99dc602020-05-11 12:05:48 -0400124}
125
126static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
127{
128 struct hfi1_ipoib_dev_priv *priv = tx->priv;
129
130 if (likely(!tx->sdma_status)) {
Heiner Kallweitaa0616a2020-11-10 20:47:34 +0100131 dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400132 } else {
133 ++priv->netdev->stats.tx_errors;
134 dd_dev_warn(priv->dd,
135 "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
136 __func__, tx->sdma_status,
137 le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
138 tx->txq->sde->this_idx);
139 }
140
141 napi_consume_skb(tx->skb, budget);
142 sdma_txclean(priv->dd, &tx->txreq);
143 kmem_cache_free(priv->txreq_cache, tx);
144}
145
146static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget)
147{
148 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
149 unsigned long head;
150 unsigned long tail;
151 unsigned int max_tx;
152 int work_done;
153 int tx_count;
154
155 spin_lock_bh(&tx_ring->consumer_lock);
156
157 /* Read index before reading contents at that index. */
158 head = smp_load_acquire(&tx_ring->head);
159 tail = tx_ring->tail;
160 max_tx = tx_ring->max_items;
161
162 work_done = min_t(int, CIRC_CNT(head, tail, max_tx), budget);
163
164 for (tx_count = work_done; tx_count; tx_count--) {
165 hfi1_ipoib_free_tx(tx_ring->items[tail], budget);
166 tail = CIRC_NEXT(tail, max_tx);
167 }
168
169 atomic64_add(work_done, &txq->complete_txreqs);
170
171 /* Finished freeing tx items so store the tail value. */
172 smp_store_release(&tx_ring->tail, tail);
173
174 spin_unlock_bh(&tx_ring->consumer_lock);
175
176 hfi1_ipoib_check_queue_stopped(txq);
177
178 return work_done;
179}
180
181static int hfi1_ipoib_process_tx_ring(struct napi_struct *napi, int budget)
182{
183 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(napi->dev);
184 struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis];
185
186 int work_done = hfi1_ipoib_drain_tx_ring(txq, budget);
187
188 if (work_done < budget)
189 napi_complete_done(napi, work_done);
190
191 return work_done;
192}
193
194static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx)
195{
196 struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring;
197 unsigned long head;
198 unsigned long tail;
199 size_t max_tx;
200
201 spin_lock(&tx_ring->producer_lock);
202
203 head = tx_ring->head;
204 tail = READ_ONCE(tx_ring->tail);
205 max_tx = tx_ring->max_items;
206
207 if (likely(CIRC_SPACE(head, tail, max_tx))) {
208 tx_ring->items[head] = tx;
209
210 /* Finish storing txreq before incrementing head. */
211 smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx));
212 napi_schedule(tx->txq->napi);
213 } else {
214 struct hfi1_ipoib_txq *txq = tx->txq;
215 struct hfi1_ipoib_dev_priv *priv = tx->priv;
216
217 /* Ring was full */
218 hfi1_ipoib_free_tx(tx, 0);
219 atomic64_inc(&txq->complete_txreqs);
220 dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx);
221 }
222
223 spin_unlock(&tx_ring->producer_lock);
224}
225
226static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status)
227{
228 struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq);
229
230 tx->sdma_status = status;
231
232 hfi1_ipoib_add_tx(tx);
233}
234
235static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
236 struct ipoib_txparms *txp)
237{
238 struct hfi1_devdata *dd = txp->dd;
239 struct sdma_txreq *txreq = &tx->txreq;
240 struct sk_buff *skb = tx->skb;
241 int ret = 0;
242 int i;
243
244 if (skb_headlen(skb)) {
245 ret = sdma_txadd_kvaddr(dd, txreq, skb->data, skb_headlen(skb));
246 if (unlikely(ret))
247 return ret;
248 }
249
250 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
251 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
252
253 ret = sdma_txadd_page(dd,
254 txreq,
255 skb_frag_page(frag),
256 frag->bv_offset,
257 skb_frag_size(frag));
258 if (unlikely(ret))
259 break;
260 }
261
262 return ret;
263}
264
265static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
266 struct ipoib_txparms *txp)
267{
268 struct hfi1_devdata *dd = txp->dd;
269 struct sdma_txreq *txreq = &tx->txreq;
270 struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
271 u16 pkt_bytes =
272 sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
273 int ret;
274
275 ret = sdma_txinit(txreq, 0, pkt_bytes, hfi1_ipoib_sdma_complete);
276 if (unlikely(ret))
277 return ret;
278
279 /* add pbc + headers */
280 ret = sdma_txadd_kvaddr(dd,
281 txreq,
282 sdma_hdr,
283 sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2));
284 if (unlikely(ret))
285 return ret;
286
287 /* add the ulp payload */
288 return hfi1_ipoib_build_ulp_payload(tx, txp);
289}
290
291static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
292 struct ipoib_txparms *txp)
293{
294 struct hfi1_ipoib_dev_priv *priv = tx->priv;
295 struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
296 struct sk_buff *skb = tx->skb;
297 struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
298 struct rdma_ah_attr *ah_attr = txp->ah_attr;
299 struct ib_other_headers *ohdr;
300 struct ib_grh *grh;
301 u16 dwords;
302 u16 slid;
303 u16 dlid;
304 u16 lrh0;
305 u32 bth0;
306 u32 sqpn = (u32)(priv->netdev->dev_addr[1] << 16 |
307 priv->netdev->dev_addr[2] << 8 |
308 priv->netdev->dev_addr[3]);
309 u16 payload_dwords;
310 u8 pad_cnt;
311
312 pad_cnt = -skb->len & 3;
313
314 /* Includes ICRC */
315 payload_dwords = ((skb->len + pad_cnt) >> 2) + SIZE_OF_CRC;
316
317 /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
318 txp->hdr_dwords = 7;
319
320 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
321 grh = &sdma_hdr->hdr.ibh.u.l.grh;
322 txp->hdr_dwords +=
323 hfi1_make_grh(txp->ibp,
324 grh,
325 rdma_ah_read_grh(ah_attr),
326 txp->hdr_dwords - LRH_9B_DWORDS,
327 payload_dwords);
328 lrh0 = HFI1_LRH_GRH;
329 ohdr = &sdma_hdr->hdr.ibh.u.l.oth;
330 } else {
331 lrh0 = HFI1_LRH_BTH;
332 ohdr = &sdma_hdr->hdr.ibh.u.oth;
333 }
334
335 lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
336 lrh0 |= (txp->flow.sc5 & 0xf) << 12;
337
338 dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
339 if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
340 slid = be16_to_cpu(IB_LID_PERMISSIVE);
341 } else {
342 u16 lid = (u16)ppd->lid;
343
344 if (lid) {
345 lid |= rdma_ah_get_path_bits(ah_attr) &
346 ((1 << ppd->lmc) - 1);
347 slid = lid;
348 } else {
349 slid = be16_to_cpu(IB_LID_PERMISSIVE);
350 }
351 }
352
353 /* Includes ICRC */
354 dwords = txp->hdr_dwords + payload_dwords;
355
356 /* Build the lrh */
357 sdma_hdr->hdr.hdr_type = HFI1_PKT_TYPE_9B;
358 hfi1_make_ib_hdr(&sdma_hdr->hdr.ibh, lrh0, dwords, dlid, slid);
359
360 /* Build the bth */
361 bth0 = (IB_OPCODE_UD_SEND_ONLY << 24) | (pad_cnt << 20) | priv->pkey;
362
363 ohdr->bth[0] = cpu_to_be32(bth0);
364 ohdr->bth[1] = cpu_to_be32(txp->dqpn);
365 ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs));
366
367 /* Build the deth */
368 ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey);
369 ohdr->u.ud.deth[1] = cpu_to_be32((txp->entropy <<
370 HFI1_IPOIB_ENTROPY_SHIFT) | sqpn);
371
372 /* Construct the pbc. */
373 sdma_hdr->pbc =
374 cpu_to_le64(create_pbc(ppd,
375 ib_is_sc5(txp->flow.sc5) <<
376 PBC_DC_INFO_SHIFT,
377 0,
378 sc_to_vlt(priv->dd, txp->flow.sc5),
379 dwords - SIZE_OF_CRC +
380 (sizeof(sdma_hdr->pbc) >> 2)));
381}
382
383static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
384 struct sk_buff *skb,
385 struct ipoib_txparms *txp)
386{
387 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
388 struct ipoib_txreq *tx;
389 int ret;
390
391 tx = kmem_cache_alloc_node(priv->txreq_cache,
392 GFP_ATOMIC,
393 priv->dd->node);
394 if (unlikely(!tx))
395 return ERR_PTR(-ENOMEM);
396
Kieran Bingham0dc63bb2020-06-09 13:45:55 +0100397 /* so that we can test if the sdma descriptors are there */
Gary Leshnerd99dc602020-05-11 12:05:48 -0400398 tx->txreq.num_desc = 0;
399 tx->priv = priv;
400 tx->txq = txp->txq;
401 tx->skb = skb;
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400402 INIT_LIST_HEAD(&tx->txreq.list);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400403
404 hfi1_ipoib_build_ib_tx_headers(tx, txp);
405
406 ret = hfi1_ipoib_build_tx_desc(tx, txp);
407 if (likely(!ret)) {
408 if (txp->txq->flow.as_int != txp->flow.as_int) {
409 txp->txq->flow.tx_queue = txp->flow.tx_queue;
410 txp->txq->flow.sc5 = txp->flow.sc5;
411 txp->txq->sde =
412 sdma_select_engine_sc(priv->dd,
413 txp->flow.tx_queue,
414 txp->flow.sc5);
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400415 trace_hfi1_flow_switch(txp->txq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400416 }
417
418 return tx;
419 }
420
421 sdma_txclean(priv->dd, &tx->txreq);
422 kmem_cache_free(priv->txreq_cache, tx);
423
424 return ERR_PTR(ret);
425}
426
427static int hfi1_ipoib_submit_tx_list(struct net_device *dev,
428 struct hfi1_ipoib_txq *txq)
429{
430 int ret;
431 u16 count_out;
432
433 ret = sdma_send_txlist(txq->sde,
434 iowait_get_ib_work(&txq->wait),
435 &txq->tx_list,
436 &count_out);
437 if (likely(!ret) || ret == -EBUSY || ret == -ECOMM)
438 return ret;
439
440 dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret);
441
442 return ret;
443}
444
445static int hfi1_ipoib_flush_tx_list(struct net_device *dev,
446 struct hfi1_ipoib_txq *txq)
447{
448 int ret = 0;
449
450 if (!list_empty(&txq->tx_list)) {
451 /* Flush the current list */
452 ret = hfi1_ipoib_submit_tx_list(dev, txq);
453
454 if (unlikely(ret))
455 if (ret != -EBUSY)
456 ++dev->stats.tx_carrier_errors;
457 }
458
459 return ret;
460}
461
462static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq,
463 struct ipoib_txreq *tx)
464{
465 int ret;
466
467 ret = sdma_send_txreq(txq->sde,
468 iowait_get_ib_work(&txq->wait),
469 &tx->txreq,
470 txq->pkts_sent);
471 if (likely(!ret)) {
472 txq->pkts_sent = true;
473 iowait_starve_clear(txq->pkts_sent, &txq->wait);
474 }
475
476 return ret;
477}
478
479static int hfi1_ipoib_send_dma_single(struct net_device *dev,
480 struct sk_buff *skb,
481 struct ipoib_txparms *txp)
482{
483 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
484 struct hfi1_ipoib_txq *txq = txp->txq;
485 struct ipoib_txreq *tx;
486 int ret;
487
488 tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
489 if (IS_ERR(tx)) {
490 int ret = PTR_ERR(tx);
491
492 dev_kfree_skb_any(skb);
493
494 if (ret == -ENOMEM)
495 ++dev->stats.tx_errors;
496 else
497 ++dev->stats.tx_carrier_errors;
498
499 return NETDEV_TX_OK;
500 }
501
502 ret = hfi1_ipoib_submit_tx(txq, tx);
503 if (likely(!ret)) {
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400504tx_ok:
Gary Leshnerd99dc602020-05-11 12:05:48 -0400505 trace_sdma_output_ibhdr(tx->priv->dd,
506 &tx->sdma_hdr.hdr,
507 ib_is_sc5(txp->flow.sc5));
508 hfi1_ipoib_check_queue_depth(txq);
509 return NETDEV_TX_OK;
510 }
511
512 txq->pkts_sent = false;
513
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400514 if (ret == -EBUSY || ret == -ECOMM)
515 goto tx_ok;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400516
517 sdma_txclean(priv->dd, &tx->txreq);
518 dev_kfree_skb_any(skb);
519 kmem_cache_free(priv->txreq_cache, tx);
520 ++dev->stats.tx_carrier_errors;
521
522 return NETDEV_TX_OK;
523}
524
525static int hfi1_ipoib_send_dma_list(struct net_device *dev,
526 struct sk_buff *skb,
527 struct ipoib_txparms *txp)
528{
529 struct hfi1_ipoib_txq *txq = txp->txq;
530 struct ipoib_txreq *tx;
531
532 /* Has the flow change ? */
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400533 if (txq->flow.as_int != txp->flow.as_int) {
534 int ret;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400535
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400536 trace_hfi1_flow_flush(txq);
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400537 ret = hfi1_ipoib_flush_tx_list(dev, txq);
538 if (unlikely(ret)) {
539 if (ret == -EBUSY)
540 ++dev->stats.tx_dropped;
541 dev_kfree_skb_any(skb);
542 return NETDEV_TX_OK;
543 }
544 }
Gary Leshnerd99dc602020-05-11 12:05:48 -0400545 tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
546 if (IS_ERR(tx)) {
547 int ret = PTR_ERR(tx);
548
549 dev_kfree_skb_any(skb);
550
551 if (ret == -ENOMEM)
552 ++dev->stats.tx_errors;
553 else
554 ++dev->stats.tx_carrier_errors;
555
556 return NETDEV_TX_OK;
557 }
558
559 list_add_tail(&tx->txreq.list, &txq->tx_list);
560
561 hfi1_ipoib_check_queue_depth(txq);
562
563 trace_sdma_output_ibhdr(tx->priv->dd,
564 &tx->sdma_hdr.hdr,
565 ib_is_sc5(txp->flow.sc5));
566
567 if (!netdev_xmit_more())
568 (void)hfi1_ipoib_flush_tx_list(dev, txq);
569
570 return NETDEV_TX_OK;
571}
572
573static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb)
574{
575 if (skb_transport_header_was_set(skb)) {
576 u8 *hdr = (u8 *)skb_transport_header(skb);
577
578 return (hdr[0] ^ hdr[1] ^ hdr[2] ^ hdr[3]);
579 }
580
581 return (u8)skb_get_queue_mapping(skb);
582}
583
584int hfi1_ipoib_send_dma(struct net_device *dev,
585 struct sk_buff *skb,
586 struct ib_ah *address,
587 u32 dqpn)
588{
589 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
590 struct ipoib_txparms txp;
591 struct rdma_netdev *rn = netdev_priv(dev);
592
593 if (unlikely(skb->len > rn->mtu + HFI1_IPOIB_ENCAP_LEN)) {
594 dd_dev_warn(priv->dd, "packet len %d (> %d) too long to send, dropping\n",
595 skb->len,
596 rn->mtu + HFI1_IPOIB_ENCAP_LEN);
597 ++dev->stats.tx_dropped;
598 ++dev->stats.tx_errors;
599 dev_kfree_skb_any(skb);
600 return NETDEV_TX_OK;
601 }
602
603 txp.dd = priv->dd;
604 txp.ah_attr = &ibah_to_rvtah(address)->attr;
605 txp.ibp = to_iport(priv->device, priv->port_num);
606 txp.txq = &priv->txqs[skb_get_queue_mapping(skb)];
607 txp.dqpn = dqpn;
608 txp.flow.sc5 = txp.ibp->sl_to_sc[rdma_ah_get_sl(txp.ah_attr)];
609 txp.flow.tx_queue = (u8)skb_get_queue_mapping(skb);
610 txp.entropy = hfi1_ipoib_calc_entropy(skb);
611
612 if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list))
613 return hfi1_ipoib_send_dma_list(dev, skb, &txp);
614
615 return hfi1_ipoib_send_dma_single(dev, skb, &txp);
616}
617
618/*
619 * hfi1_ipoib_sdma_sleep - ipoib sdma sleep function
620 *
621 * This function gets called from sdma_send_txreq() when there are not enough
622 * sdma descriptors available to send the packet. It adds Tx queue's wait
623 * structure to sdma engine's dmawait list to be woken up when descriptors
624 * become available.
625 */
626static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
627 struct iowait_work *wait,
628 struct sdma_txreq *txreq,
629 uint seq,
630 bool pkts_sent)
631{
632 struct hfi1_ipoib_txq *txq =
633 container_of(wait->iow, struct hfi1_ipoib_txq, wait);
634
635 write_seqlock(&sde->waitlock);
636
637 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) {
638 if (sdma_progress(sde, seq, txreq)) {
639 write_sequnlock(&sde->waitlock);
640 return -EAGAIN;
641 }
642
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400643 if (list_empty(&txreq->list))
644 /* came from non-list submit */
645 list_add_tail(&txreq->list, &txq->tx_list);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400646 if (list_empty(&txq->wait.list)) {
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400647 if (!atomic_xchg(&txq->no_desc, 1)) {
648 trace_hfi1_txq_queued(txq);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400649 hfi1_ipoib_stop_txq(txq);
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400650 }
Gary Leshnerd99dc602020-05-11 12:05:48 -0400651 iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400652 }
Gary Leshnerd99dc602020-05-11 12:05:48 -0400653
654 write_sequnlock(&sde->waitlock);
655 return -EBUSY;
656 }
657
658 write_sequnlock(&sde->waitlock);
659 return -EINVAL;
660}
661
662/*
663 * hfi1_ipoib_sdma_wakeup - ipoib sdma wakeup function
664 *
665 * This function gets called when SDMA descriptors becomes available and Tx
666 * queue's wait structure was previously added to sdma engine's dmawait list.
667 */
668static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
669{
670 struct hfi1_ipoib_txq *txq =
671 container_of(wait, struct hfi1_ipoib_txq, wait);
672
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400673 trace_hfi1_txq_wakeup(txq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400674 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
675 iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
676}
677
678static void hfi1_ipoib_flush_txq(struct work_struct *work)
679{
680 struct iowait_work *ioww =
681 container_of(work, struct iowait_work, iowork);
682 struct iowait *wait = iowait_ioww_to_iow(ioww);
683 struct hfi1_ipoib_txq *txq =
684 container_of(wait, struct hfi1_ipoib_txq, wait);
685 struct net_device *dev = txq->priv->netdev;
686
687 if (likely(dev->reg_state == NETREG_REGISTERED) &&
Gary Leshnerd99dc602020-05-11 12:05:48 -0400688 likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400689 if (atomic_xchg(&txq->no_desc, 0))
690 hfi1_ipoib_wake_txq(txq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400691}
692
693int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
694{
695 struct net_device *dev = priv->netdev;
696 char buf[HFI1_IPOIB_TXREQ_NAME_LEN];
697 unsigned long tx_ring_size;
698 int i;
699
700 /*
701 * Ring holds 1 less than tx_ring_size
702 * Round up to next power of 2 in order to hold at least tx_queue_len
703 */
704 tx_ring_size = roundup_pow_of_two((unsigned long)dev->tx_queue_len + 1);
705
706 snprintf(buf, sizeof(buf), "hfi1_%u_ipoib_txreq_cache", priv->dd->unit);
707 priv->txreq_cache = kmem_cache_create(buf,
708 sizeof(struct ipoib_txreq),
709 0,
710 0,
711 NULL);
712 if (!priv->txreq_cache)
713 return -ENOMEM;
714
715 priv->tx_napis = kcalloc_node(dev->num_tx_queues,
716 sizeof(struct napi_struct),
Mike Marciniszynb536d4b2021-03-29 09:54:09 -0400717 GFP_KERNEL,
Gary Leshnerd99dc602020-05-11 12:05:48 -0400718 priv->dd->node);
719 if (!priv->tx_napis)
720 goto free_txreq_cache;
721
722 priv->txqs = kcalloc_node(dev->num_tx_queues,
723 sizeof(struct hfi1_ipoib_txq),
Mike Marciniszynb536d4b2021-03-29 09:54:09 -0400724 GFP_KERNEL,
Gary Leshnerd99dc602020-05-11 12:05:48 -0400725 priv->dd->node);
726 if (!priv->txqs)
727 goto free_tx_napis;
728
729 for (i = 0; i < dev->num_tx_queues; i++) {
730 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
731
732 iowait_init(&txq->wait,
733 0,
734 hfi1_ipoib_flush_txq,
735 NULL,
736 hfi1_ipoib_sdma_sleep,
737 hfi1_ipoib_sdma_wakeup,
738 NULL,
739 NULL);
740 txq->priv = priv;
741 txq->sde = NULL;
742 INIT_LIST_HEAD(&txq->tx_list);
743 atomic64_set(&txq->complete_txreqs, 0);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400744 atomic_set(&txq->stops, 0);
745 atomic_set(&txq->ring_full, 0);
746 atomic_set(&txq->no_desc, 0);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400747 txq->q_idx = i;
748 txq->flow.tx_queue = 0xff;
749 txq->flow.sc5 = 0xff;
750 txq->pkts_sent = false;
751
752 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
753 priv->dd->node);
754
755 txq->tx_ring.items =
Mike Marciniszynb536d4b2021-03-29 09:54:09 -0400756 kcalloc_node(tx_ring_size,
757 sizeof(struct ipoib_txreq *),
758 GFP_KERNEL, priv->dd->node);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400759 if (!txq->tx_ring.items)
760 goto free_txqs;
761
762 spin_lock_init(&txq->tx_ring.producer_lock);
763 spin_lock_init(&txq->tx_ring.consumer_lock);
764 txq->tx_ring.max_items = tx_ring_size;
765
766 txq->napi = &priv->tx_napis[i];
767 netif_tx_napi_add(dev, txq->napi,
768 hfi1_ipoib_process_tx_ring,
769 NAPI_POLL_WEIGHT);
770 }
771
772 return 0;
773
774free_txqs:
775 for (i--; i >= 0; i--) {
776 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
777
778 netif_napi_del(txq->napi);
Mike Marciniszynb536d4b2021-03-29 09:54:09 -0400779 kfree(txq->tx_ring.items);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400780 }
781
782 kfree(priv->txqs);
783 priv->txqs = NULL;
784
785free_tx_napis:
786 kfree(priv->tx_napis);
787 priv->tx_napis = NULL;
788
789free_txreq_cache:
790 kmem_cache_destroy(priv->txreq_cache);
791 priv->txreq_cache = NULL;
792 return -ENOMEM;
793}
794
795static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
796{
797 struct sdma_txreq *txreq;
798 struct sdma_txreq *txreq_tmp;
799 atomic64_t *complete_txreqs = &txq->complete_txreqs;
800
801 list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
802 struct ipoib_txreq *tx =
803 container_of(txreq, struct ipoib_txreq, txreq);
804
805 list_del(&txreq->list);
806 sdma_txclean(txq->priv->dd, &tx->txreq);
807 dev_kfree_skb_any(tx->skb);
808 kmem_cache_free(txq->priv->txreq_cache, tx);
809 atomic64_inc(complete_txreqs);
810 }
811
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400812 if (hfi1_ipoib_used(txq))
Gary Leshnerd99dc602020-05-11 12:05:48 -0400813 dd_dev_warn(txq->priv->dd,
814 "txq %d not empty found %llu requests\n",
815 txq->q_idx,
816 hfi1_ipoib_txreqs(txq->sent_txreqs,
817 atomic64_read(complete_txreqs)));
818}
819
820void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
821{
822 int i;
823
824 for (i = 0; i < priv->netdev->num_tx_queues; i++) {
825 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
826
827 iowait_cancel_work(&txq->wait);
828 iowait_sdma_drain(&txq->wait);
829 hfi1_ipoib_drain_tx_list(txq);
830 netif_napi_del(txq->napi);
831 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
Mike Marciniszynb536d4b2021-03-29 09:54:09 -0400832 kfree(txq->tx_ring.items);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400833 }
834
835 kfree(priv->txqs);
836 priv->txqs = NULL;
837
838 kfree(priv->tx_napis);
839 priv->tx_napis = NULL;
840
841 kmem_cache_destroy(priv->txreq_cache);
842 priv->txreq_cache = NULL;
843}
844
845void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
846{
847 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
848 int i;
849
850 for (i = 0; i < dev->num_tx_queues; i++) {
851 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
852
853 napi_enable(txq->napi);
854 }
855}
856
857void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
858{
859 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
860 int i;
861
862 for (i = 0; i < dev->num_tx_queues; i++) {
863 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
864
865 napi_disable(txq->napi);
866 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
867 }
868}
Mike Marciniszyn042a00f2021-03-29 09:54:08 -0400869
870void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
871{
872 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
873 struct hfi1_ipoib_txq *txq = &priv->txqs[q];
874 u64 completed = atomic64_read(&txq->complete_txreqs);
875
876 dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
877 (unsigned long long)txq, q,
878 __netif_subqueue_stopped(dev, txq->q_idx),
879 atomic_read(&txq->stops),
880 atomic_read(&txq->no_desc),
881 atomic_read(&txq->ring_full));
882 dd_dev_info(priv->dd, "sde %llx engine %u\n",
883 (unsigned long long)txq->sde,
884 txq->sde ? txq->sde->this_idx : 0);
885 dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
886 dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
887 txq->sent_txreqs, completed, hfi1_ipoib_used(txq));
888 dd_dev_info(priv->dd, "tx_queue_len %u max_items %lu\n",
889 dev->tx_queue_len, txq->tx_ring.max_items);
890 dd_dev_info(priv->dd, "head %lu tail %lu\n",
891 txq->tx_ring.head, txq->tx_ring.tail);
892 dd_dev_info(priv->dd, "wait queued %u\n",
893 !list_empty(&txq->wait.list));
894 dd_dev_info(priv->dd, "tx_list empty %u\n",
895 list_empty(&txq->tx_list));
896}
897