blob: 0a5d32721dfe607f903001ada74a758aa7ece2d9 [file] [log] [blame]
Gary Leshnerd99dc602020-05-11 12:05:48 -04001// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2/*
3 * Copyright(c) 2020 Intel Corporation.
4 *
5 */
6
7/*
8 * This file contains HFI1 support for IPOIB SDMA functionality
9 */
10
11#include <linux/log2.h>
12#include <linux/circ_buf.h>
13
14#include "sdma.h"
15#include "verbs.h"
16#include "trace_ibhdrs.h"
17#include "ipoib.h"
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -040018#include "trace_tx.h"
Gary Leshnerd99dc602020-05-11 12:05:48 -040019
20/* Add a convenience helper */
21#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
22#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
23#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
24
25/**
26 * struct ipoib_txreq - IPOIB transmit descriptor
27 * @txreq: sdma transmit request
28 * @sdma_hdr: 9b ib headers
29 * @sdma_status: status returned by sdma engine
Mike Marciniszynd47dfc22021-09-13 09:28:26 -040030 * @complete: non-zero implies complete
Gary Leshnerd99dc602020-05-11 12:05:48 -040031 * @priv: ipoib netdev private data
32 * @txq: txq on which skb was output
33 * @skb: skb to send
34 */
35struct ipoib_txreq {
36 struct sdma_txreq txreq;
37 struct hfi1_sdma_header sdma_hdr;
38 int sdma_status;
Mike Marciniszynd47dfc22021-09-13 09:28:26 -040039 int complete;
Gary Leshnerd99dc602020-05-11 12:05:48 -040040 struct hfi1_ipoib_dev_priv *priv;
41 struct hfi1_ipoib_txq *txq;
42 struct sk_buff *skb;
43};
44
45struct ipoib_txparms {
46 struct hfi1_devdata *dd;
47 struct rdma_ah_attr *ah_attr;
48 struct hfi1_ibport *ibp;
49 struct hfi1_ipoib_txq *txq;
50 union hfi1_ipoib_flow flow;
51 u32 dqpn;
52 u8 hdr_dwords;
53 u8 entropy;
54};
55
Mike Marciniszynd47dfc22021-09-13 09:28:26 -040056static struct ipoib_txreq *
57hfi1_txreq_from_idx(struct hfi1_ipoib_circ_buf *r, u32 idx)
58{
59 return (struct ipoib_txreq *)(r->items + (idx << r->shift));
60}
61
62static u32 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
Gary Leshnerd99dc602020-05-11 12:05:48 -040063{
64 return sent - completed;
65}
66
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -040067static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
68{
69 return hfi1_ipoib_txreqs(txq->sent_txreqs,
70 atomic64_read(&txq->complete_txreqs));
71}
72
73static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
74{
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -040075 trace_hfi1_txq_stop(txq);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -040076 if (atomic_inc_return(&txq->stops) == 1)
77 netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
78}
79
80static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
81{
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -040082 trace_hfi1_txq_wake(txq);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -040083 if (atomic_dec_and_test(&txq->stops))
84 netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
85}
86
87static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
88{
89 return min_t(uint, txq->priv->netdev->tx_queue_len,
90 txq->tx_ring.max_items - 1);
91}
92
93static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
94{
95 return min_t(uint, txq->priv->netdev->tx_queue_len,
96 txq->tx_ring.max_items) >> 1;
97}
98
Gary Leshnerd99dc602020-05-11 12:05:48 -040099static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
100{
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400101 ++txq->sent_txreqs;
102 if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400103 !atomic_xchg(&txq->ring_full, 1)) {
104 trace_hfi1_txq_full(txq);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400105 hfi1_ipoib_stop_txq(txq);
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400106 }
Gary Leshnerd99dc602020-05-11 12:05:48 -0400107}
108
109static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
110{
111 struct net_device *dev = txq->priv->netdev;
112
Gary Leshnerd99dc602020-05-11 12:05:48 -0400113 /* If shutting down just return as queue state is irrelevant */
114 if (unlikely(dev->reg_state != NETREG_REGISTERED))
115 return;
116
117 /*
118 * When the queue has been drained to less than half full it will be
119 * restarted.
120 * The size of the txreq ring is fixed at initialization.
121 * The tx queue len can be adjusted upward while the interface is
122 * running.
123 * The tx queue len can be large enough to overflow the txreq_ring.
124 * Use the minimum of the current tx_queue_len or the rings max txreqs
125 * to protect against ring overflow.
126 */
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400127 if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400128 atomic_xchg(&txq->ring_full, 0)) {
129 trace_hfi1_txq_xmit_unstopped(txq);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400130 hfi1_ipoib_wake_txq(txq);
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400131 }
Gary Leshnerd99dc602020-05-11 12:05:48 -0400132}
133
134static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
135{
136 struct hfi1_ipoib_dev_priv *priv = tx->priv;
137
138 if (likely(!tx->sdma_status)) {
Heiner Kallweitaa0616a2020-11-10 20:47:34 +0100139 dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400140 } else {
141 ++priv->netdev->stats.tx_errors;
142 dd_dev_warn(priv->dd,
143 "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
144 __func__, tx->sdma_status,
145 le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
146 tx->txq->sde->this_idx);
147 }
148
149 napi_consume_skb(tx->skb, budget);
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400150 tx->skb = NULL;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400151 sdma_txclean(priv->dd, &tx->txreq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400152}
153
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400154static void hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq)
Gary Leshnerd99dc602020-05-11 12:05:48 -0400155{
156 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400157 int i;
158 struct ipoib_txreq *tx;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400159
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400160 for (i = 0; i < tx_ring->max_items; i++) {
161 tx = hfi1_txreq_from_idx(tx_ring, i);
162 tx->complete = 0;
163 dev_kfree_skb_any(tx->skb);
164 tx->skb = NULL;
165 sdma_txclean(txq->priv->dd, &tx->txreq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400166 }
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400167 tx_ring->head = 0;
168 tx_ring->tail = 0;
169 atomic64_set(&txq->complete_txreqs, 0);
170 txq->sent_txreqs = 0;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400171}
172
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400173static int hfi1_ipoib_poll_tx_ring(struct napi_struct *napi, int budget)
Gary Leshnerd99dc602020-05-11 12:05:48 -0400174{
175 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(napi->dev);
176 struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis];
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400177 struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
178 u32 head = tx_ring->head;
179 u32 max_tx = tx_ring->max_items;
180 int work_done;
181 struct ipoib_txreq *tx = hfi1_txreq_from_idx(tx_ring, head);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400182
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400183 trace_hfi1_txq_poll(txq);
184 for (work_done = 0; work_done < budget; work_done++) {
185 /* See hfi1_ipoib_sdma_complete() */
186 if (!smp_load_acquire(&tx->complete))
187 break;
188 tx->complete = 0;
189 hfi1_ipoib_free_tx(tx, budget);
190 head = CIRC_NEXT(head, max_tx);
191 tx = hfi1_txreq_from_idx(tx_ring, head);
192 }
193 atomic64_add(work_done, &txq->complete_txreqs);
194
195 /* Finished freeing tx items so store the head value. */
196 smp_store_release(&tx_ring->head, head);
197
198 hfi1_ipoib_check_queue_stopped(txq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400199
200 if (work_done < budget)
201 napi_complete_done(napi, work_done);
202
203 return work_done;
204}
205
Gary Leshnerd99dc602020-05-11 12:05:48 -0400206static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status)
207{
208 struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq);
209
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400210 trace_hfi1_txq_complete(tx->txq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400211 tx->sdma_status = status;
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400212 /* see hfi1_ipoib_poll_tx_ring */
213 smp_store_release(&tx->complete, 1);
214 napi_schedule_irqoff(tx->txq->napi);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400215}
216
217static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
218 struct ipoib_txparms *txp)
219{
220 struct hfi1_devdata *dd = txp->dd;
221 struct sdma_txreq *txreq = &tx->txreq;
222 struct sk_buff *skb = tx->skb;
223 int ret = 0;
224 int i;
225
226 if (skb_headlen(skb)) {
227 ret = sdma_txadd_kvaddr(dd, txreq, skb->data, skb_headlen(skb));
228 if (unlikely(ret))
229 return ret;
230 }
231
232 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
233 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
234
235 ret = sdma_txadd_page(dd,
236 txreq,
237 skb_frag_page(frag),
238 frag->bv_offset,
239 skb_frag_size(frag));
240 if (unlikely(ret))
241 break;
242 }
243
244 return ret;
245}
246
247static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
248 struct ipoib_txparms *txp)
249{
250 struct hfi1_devdata *dd = txp->dd;
251 struct sdma_txreq *txreq = &tx->txreq;
252 struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
253 u16 pkt_bytes =
254 sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
255 int ret;
256
257 ret = sdma_txinit(txreq, 0, pkt_bytes, hfi1_ipoib_sdma_complete);
258 if (unlikely(ret))
259 return ret;
260
261 /* add pbc + headers */
262 ret = sdma_txadd_kvaddr(dd,
263 txreq,
264 sdma_hdr,
265 sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2));
266 if (unlikely(ret))
267 return ret;
268
269 /* add the ulp payload */
270 return hfi1_ipoib_build_ulp_payload(tx, txp);
271}
272
273static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
274 struct ipoib_txparms *txp)
275{
276 struct hfi1_ipoib_dev_priv *priv = tx->priv;
277 struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
278 struct sk_buff *skb = tx->skb;
279 struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
280 struct rdma_ah_attr *ah_attr = txp->ah_attr;
281 struct ib_other_headers *ohdr;
282 struct ib_grh *grh;
283 u16 dwords;
284 u16 slid;
285 u16 dlid;
286 u16 lrh0;
287 u32 bth0;
288 u32 sqpn = (u32)(priv->netdev->dev_addr[1] << 16 |
289 priv->netdev->dev_addr[2] << 8 |
290 priv->netdev->dev_addr[3]);
291 u16 payload_dwords;
292 u8 pad_cnt;
293
294 pad_cnt = -skb->len & 3;
295
296 /* Includes ICRC */
297 payload_dwords = ((skb->len + pad_cnt) >> 2) + SIZE_OF_CRC;
298
299 /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
300 txp->hdr_dwords = 7;
301
302 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
303 grh = &sdma_hdr->hdr.ibh.u.l.grh;
304 txp->hdr_dwords +=
305 hfi1_make_grh(txp->ibp,
306 grh,
307 rdma_ah_read_grh(ah_attr),
308 txp->hdr_dwords - LRH_9B_DWORDS,
309 payload_dwords);
310 lrh0 = HFI1_LRH_GRH;
311 ohdr = &sdma_hdr->hdr.ibh.u.l.oth;
312 } else {
313 lrh0 = HFI1_LRH_BTH;
314 ohdr = &sdma_hdr->hdr.ibh.u.oth;
315 }
316
317 lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
318 lrh0 |= (txp->flow.sc5 & 0xf) << 12;
319
320 dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
321 if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
322 slid = be16_to_cpu(IB_LID_PERMISSIVE);
323 } else {
324 u16 lid = (u16)ppd->lid;
325
326 if (lid) {
327 lid |= rdma_ah_get_path_bits(ah_attr) &
328 ((1 << ppd->lmc) - 1);
329 slid = lid;
330 } else {
331 slid = be16_to_cpu(IB_LID_PERMISSIVE);
332 }
333 }
334
335 /* Includes ICRC */
336 dwords = txp->hdr_dwords + payload_dwords;
337
338 /* Build the lrh */
339 sdma_hdr->hdr.hdr_type = HFI1_PKT_TYPE_9B;
340 hfi1_make_ib_hdr(&sdma_hdr->hdr.ibh, lrh0, dwords, dlid, slid);
341
342 /* Build the bth */
343 bth0 = (IB_OPCODE_UD_SEND_ONLY << 24) | (pad_cnt << 20) | priv->pkey;
344
345 ohdr->bth[0] = cpu_to_be32(bth0);
346 ohdr->bth[1] = cpu_to_be32(txp->dqpn);
347 ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs));
348
349 /* Build the deth */
350 ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey);
351 ohdr->u.ud.deth[1] = cpu_to_be32((txp->entropy <<
352 HFI1_IPOIB_ENTROPY_SHIFT) | sqpn);
353
354 /* Construct the pbc. */
355 sdma_hdr->pbc =
356 cpu_to_le64(create_pbc(ppd,
357 ib_is_sc5(txp->flow.sc5) <<
358 PBC_DC_INFO_SHIFT,
359 0,
360 sc_to_vlt(priv->dd, txp->flow.sc5),
361 dwords - SIZE_OF_CRC +
362 (sizeof(sdma_hdr->pbc) >> 2)));
363}
364
365static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
366 struct sk_buff *skb,
367 struct ipoib_txparms *txp)
368{
369 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400370 struct hfi1_ipoib_txq *txq = txp->txq;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400371 struct ipoib_txreq *tx;
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400372 struct hfi1_ipoib_circ_buf *tx_ring;
373 u32 tail;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400374 int ret;
375
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400376 if (unlikely(hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq)))
377 /* This shouldn't happen with a stopped queue */
Gary Leshnerd99dc602020-05-11 12:05:48 -0400378 return ERR_PTR(-ENOMEM);
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400379 tx_ring = &txq->tx_ring;
380 tail = tx_ring->tail;
381 tx = hfi1_txreq_from_idx(tx_ring, tx_ring->tail);
382 trace_hfi1_txq_alloc_tx(txq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400383
Kieran Bingham0dc63bb2020-06-09 13:45:55 +0100384 /* so that we can test if the sdma descriptors are there */
Gary Leshnerd99dc602020-05-11 12:05:48 -0400385 tx->txreq.num_desc = 0;
386 tx->priv = priv;
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400387 tx->txq = txq;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400388 tx->skb = skb;
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400389 INIT_LIST_HEAD(&tx->txreq.list);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400390
391 hfi1_ipoib_build_ib_tx_headers(tx, txp);
392
393 ret = hfi1_ipoib_build_tx_desc(tx, txp);
394 if (likely(!ret)) {
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400395 if (txq->flow.as_int != txp->flow.as_int) {
396 txq->flow.tx_queue = txp->flow.tx_queue;
397 txq->flow.sc5 = txp->flow.sc5;
398 txq->sde =
Gary Leshnerd99dc602020-05-11 12:05:48 -0400399 sdma_select_engine_sc(priv->dd,
400 txp->flow.tx_queue,
401 txp->flow.sc5);
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400402 trace_hfi1_flow_switch(txq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400403 }
404
405 return tx;
406 }
407
408 sdma_txclean(priv->dd, &tx->txreq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400409
410 return ERR_PTR(ret);
411}
412
413static int hfi1_ipoib_submit_tx_list(struct net_device *dev,
414 struct hfi1_ipoib_txq *txq)
415{
416 int ret;
417 u16 count_out;
418
419 ret = sdma_send_txlist(txq->sde,
420 iowait_get_ib_work(&txq->wait),
421 &txq->tx_list,
422 &count_out);
423 if (likely(!ret) || ret == -EBUSY || ret == -ECOMM)
424 return ret;
425
426 dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret);
427
428 return ret;
429}
430
431static int hfi1_ipoib_flush_tx_list(struct net_device *dev,
432 struct hfi1_ipoib_txq *txq)
433{
434 int ret = 0;
435
436 if (!list_empty(&txq->tx_list)) {
437 /* Flush the current list */
438 ret = hfi1_ipoib_submit_tx_list(dev, txq);
439
440 if (unlikely(ret))
441 if (ret != -EBUSY)
442 ++dev->stats.tx_carrier_errors;
443 }
444
445 return ret;
446}
447
448static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq,
449 struct ipoib_txreq *tx)
450{
451 int ret;
452
453 ret = sdma_send_txreq(txq->sde,
454 iowait_get_ib_work(&txq->wait),
455 &tx->txreq,
456 txq->pkts_sent);
457 if (likely(!ret)) {
458 txq->pkts_sent = true;
459 iowait_starve_clear(txq->pkts_sent, &txq->wait);
460 }
461
462 return ret;
463}
464
465static int hfi1_ipoib_send_dma_single(struct net_device *dev,
466 struct sk_buff *skb,
467 struct ipoib_txparms *txp)
468{
Gary Leshnerd99dc602020-05-11 12:05:48 -0400469 struct hfi1_ipoib_txq *txq = txp->txq;
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400470 struct hfi1_ipoib_circ_buf *tx_ring;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400471 struct ipoib_txreq *tx;
472 int ret;
473
474 tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
475 if (IS_ERR(tx)) {
476 int ret = PTR_ERR(tx);
477
478 dev_kfree_skb_any(skb);
479
480 if (ret == -ENOMEM)
481 ++dev->stats.tx_errors;
482 else
483 ++dev->stats.tx_carrier_errors;
484
485 return NETDEV_TX_OK;
486 }
487
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400488 tx_ring = &txq->tx_ring;
489 /* consume tx */
490 smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items));
Gary Leshnerd99dc602020-05-11 12:05:48 -0400491 ret = hfi1_ipoib_submit_tx(txq, tx);
492 if (likely(!ret)) {
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400493tx_ok:
Gary Leshnerd99dc602020-05-11 12:05:48 -0400494 trace_sdma_output_ibhdr(tx->priv->dd,
495 &tx->sdma_hdr.hdr,
496 ib_is_sc5(txp->flow.sc5));
497 hfi1_ipoib_check_queue_depth(txq);
498 return NETDEV_TX_OK;
499 }
500
501 txq->pkts_sent = false;
502
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400503 if (ret == -EBUSY || ret == -ECOMM)
504 goto tx_ok;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400505
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400506 /* mark complete and kick napi tx */
507 smp_store_release(&tx->complete, 1);
508 napi_schedule(tx->txq->napi);
509
Gary Leshnerd99dc602020-05-11 12:05:48 -0400510 ++dev->stats.tx_carrier_errors;
511
512 return NETDEV_TX_OK;
513}
514
515static int hfi1_ipoib_send_dma_list(struct net_device *dev,
516 struct sk_buff *skb,
517 struct ipoib_txparms *txp)
518{
519 struct hfi1_ipoib_txq *txq = txp->txq;
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400520 struct hfi1_ipoib_circ_buf *tx_ring;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400521 struct ipoib_txreq *tx;
522
523 /* Has the flow change ? */
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400524 if (txq->flow.as_int != txp->flow.as_int) {
525 int ret;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400526
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400527 trace_hfi1_flow_flush(txq);
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400528 ret = hfi1_ipoib_flush_tx_list(dev, txq);
529 if (unlikely(ret)) {
530 if (ret == -EBUSY)
531 ++dev->stats.tx_dropped;
532 dev_kfree_skb_any(skb);
533 return NETDEV_TX_OK;
534 }
535 }
Gary Leshnerd99dc602020-05-11 12:05:48 -0400536 tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
537 if (IS_ERR(tx)) {
538 int ret = PTR_ERR(tx);
539
540 dev_kfree_skb_any(skb);
541
542 if (ret == -ENOMEM)
543 ++dev->stats.tx_errors;
544 else
545 ++dev->stats.tx_carrier_errors;
546
547 return NETDEV_TX_OK;
548 }
549
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400550 tx_ring = &txq->tx_ring;
551 /* consume tx */
552 smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items));
Gary Leshnerd99dc602020-05-11 12:05:48 -0400553 list_add_tail(&tx->txreq.list, &txq->tx_list);
554
555 hfi1_ipoib_check_queue_depth(txq);
556
557 trace_sdma_output_ibhdr(tx->priv->dd,
558 &tx->sdma_hdr.hdr,
559 ib_is_sc5(txp->flow.sc5));
560
561 if (!netdev_xmit_more())
562 (void)hfi1_ipoib_flush_tx_list(dev, txq);
563
564 return NETDEV_TX_OK;
565}
566
567static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb)
568{
569 if (skb_transport_header_was_set(skb)) {
570 u8 *hdr = (u8 *)skb_transport_header(skb);
571
572 return (hdr[0] ^ hdr[1] ^ hdr[2] ^ hdr[3]);
573 }
574
575 return (u8)skb_get_queue_mapping(skb);
576}
577
Mike Marciniszyn326a2392021-03-29 09:54:11 -0400578int hfi1_ipoib_send(struct net_device *dev,
579 struct sk_buff *skb,
580 struct ib_ah *address,
581 u32 dqpn)
Gary Leshnerd99dc602020-05-11 12:05:48 -0400582{
583 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
584 struct ipoib_txparms txp;
585 struct rdma_netdev *rn = netdev_priv(dev);
586
587 if (unlikely(skb->len > rn->mtu + HFI1_IPOIB_ENCAP_LEN)) {
588 dd_dev_warn(priv->dd, "packet len %d (> %d) too long to send, dropping\n",
589 skb->len,
590 rn->mtu + HFI1_IPOIB_ENCAP_LEN);
591 ++dev->stats.tx_dropped;
592 ++dev->stats.tx_errors;
593 dev_kfree_skb_any(skb);
594 return NETDEV_TX_OK;
595 }
596
597 txp.dd = priv->dd;
598 txp.ah_attr = &ibah_to_rvtah(address)->attr;
599 txp.ibp = to_iport(priv->device, priv->port_num);
600 txp.txq = &priv->txqs[skb_get_queue_mapping(skb)];
601 txp.dqpn = dqpn;
602 txp.flow.sc5 = txp.ibp->sl_to_sc[rdma_ah_get_sl(txp.ah_attr)];
603 txp.flow.tx_queue = (u8)skb_get_queue_mapping(skb);
604 txp.entropy = hfi1_ipoib_calc_entropy(skb);
605
606 if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list))
607 return hfi1_ipoib_send_dma_list(dev, skb, &txp);
608
609 return hfi1_ipoib_send_dma_single(dev, skb, &txp);
610}
611
612/*
613 * hfi1_ipoib_sdma_sleep - ipoib sdma sleep function
614 *
615 * This function gets called from sdma_send_txreq() when there are not enough
616 * sdma descriptors available to send the packet. It adds Tx queue's wait
617 * structure to sdma engine's dmawait list to be woken up when descriptors
618 * become available.
619 */
620static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
621 struct iowait_work *wait,
622 struct sdma_txreq *txreq,
623 uint seq,
624 bool pkts_sent)
625{
626 struct hfi1_ipoib_txq *txq =
627 container_of(wait->iow, struct hfi1_ipoib_txq, wait);
628
629 write_seqlock(&sde->waitlock);
630
631 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) {
632 if (sdma_progress(sde, seq, txreq)) {
633 write_sequnlock(&sde->waitlock);
634 return -EAGAIN;
635 }
636
Mike Marciniszyn82172b72020-06-23 16:43:22 -0400637 if (list_empty(&txreq->list))
638 /* came from non-list submit */
639 list_add_tail(&txreq->list, &txq->tx_list);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400640 if (list_empty(&txq->wait.list)) {
Mike Marciniszyne9901042021-07-15 12:04:40 -0400641 struct hfi1_ibport *ibp = &sde->ppd->ibport_data;
642
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400643 if (!atomic_xchg(&txq->no_desc, 1)) {
644 trace_hfi1_txq_queued(txq);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400645 hfi1_ipoib_stop_txq(txq);
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400646 }
Mike Marciniszyne9901042021-07-15 12:04:40 -0400647 ibp->rvp.n_dmawait++;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400648 iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400649 }
Gary Leshnerd99dc602020-05-11 12:05:48 -0400650
651 write_sequnlock(&sde->waitlock);
652 return -EBUSY;
653 }
654
655 write_sequnlock(&sde->waitlock);
656 return -EINVAL;
657}
658
659/*
660 * hfi1_ipoib_sdma_wakeup - ipoib sdma wakeup function
661 *
662 * This function gets called when SDMA descriptors becomes available and Tx
663 * queue's wait structure was previously added to sdma engine's dmawait list.
664 */
665static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
666{
667 struct hfi1_ipoib_txq *txq =
668 container_of(wait, struct hfi1_ipoib_txq, wait);
669
Mike Marciniszyn4bd00b52021-03-29 09:54:07 -0400670 trace_hfi1_txq_wakeup(txq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400671 if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
672 iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
673}
674
675static void hfi1_ipoib_flush_txq(struct work_struct *work)
676{
677 struct iowait_work *ioww =
678 container_of(work, struct iowait_work, iowork);
679 struct iowait *wait = iowait_ioww_to_iow(ioww);
680 struct hfi1_ipoib_txq *txq =
681 container_of(wait, struct hfi1_ipoib_txq, wait);
682 struct net_device *dev = txq->priv->netdev;
683
684 if (likely(dev->reg_state == NETREG_REGISTERED) &&
Gary Leshnerd99dc602020-05-11 12:05:48 -0400685 likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400686 if (atomic_xchg(&txq->no_desc, 0))
687 hfi1_ipoib_wake_txq(txq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400688}
689
690int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
691{
692 struct net_device *dev = priv->netdev;
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400693 u32 tx_ring_size, tx_item_size;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400694 int i;
695
Gary Leshnerd99dc602020-05-11 12:05:48 -0400696 priv->tx_napis = kcalloc_node(dev->num_tx_queues,
697 sizeof(struct napi_struct),
Mike Marciniszynb536d4b2021-03-29 09:54:09 -0400698 GFP_KERNEL,
Gary Leshnerd99dc602020-05-11 12:05:48 -0400699 priv->dd->node);
700 if (!priv->tx_napis)
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400701 return -ENOMEM;
702
703 /*
704 * Ring holds 1 less than tx_ring_size
705 * Round up to next power of 2 in order to hold at least tx_queue_len
706 */
707 tx_ring_size = roundup_pow_of_two(dev->tx_queue_len + 1);
708 tx_item_size = roundup_pow_of_two(sizeof(struct ipoib_txreq));
Gary Leshnerd99dc602020-05-11 12:05:48 -0400709
710 priv->txqs = kcalloc_node(dev->num_tx_queues,
711 sizeof(struct hfi1_ipoib_txq),
Mike Marciniszynb536d4b2021-03-29 09:54:09 -0400712 GFP_KERNEL,
Gary Leshnerd99dc602020-05-11 12:05:48 -0400713 priv->dd->node);
714 if (!priv->txqs)
715 goto free_tx_napis;
716
717 for (i = 0; i < dev->num_tx_queues; i++) {
718 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
719
720 iowait_init(&txq->wait,
721 0,
722 hfi1_ipoib_flush_txq,
723 NULL,
724 hfi1_ipoib_sdma_sleep,
725 hfi1_ipoib_sdma_wakeup,
726 NULL,
727 NULL);
728 txq->priv = priv;
729 txq->sde = NULL;
730 INIT_LIST_HEAD(&txq->tx_list);
731 atomic64_set(&txq->complete_txreqs, 0);
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400732 atomic_set(&txq->stops, 0);
733 atomic_set(&txq->ring_full, 0);
734 atomic_set(&txq->no_desc, 0);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400735 txq->q_idx = i;
736 txq->flow.tx_queue = 0xff;
737 txq->flow.sc5 = 0xff;
738 txq->pkts_sent = false;
739
740 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
741 priv->dd->node);
742
743 txq->tx_ring.items =
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400744 kcalloc_node(tx_ring_size, tx_item_size,
Mike Marciniszynb536d4b2021-03-29 09:54:09 -0400745 GFP_KERNEL, priv->dd->node);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400746 if (!txq->tx_ring.items)
747 goto free_txqs;
748
Gary Leshnerd99dc602020-05-11 12:05:48 -0400749 txq->tx_ring.max_items = tx_ring_size;
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400750 txq->tx_ring.shift = ilog2(tx_ring_size);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400751
752 txq->napi = &priv->tx_napis[i];
753 netif_tx_napi_add(dev, txq->napi,
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400754 hfi1_ipoib_poll_tx_ring,
Gary Leshnerd99dc602020-05-11 12:05:48 -0400755 NAPI_POLL_WEIGHT);
756 }
757
758 return 0;
759
760free_txqs:
761 for (i--; i >= 0; i--) {
762 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
763
764 netif_napi_del(txq->napi);
Mike Marciniszynb536d4b2021-03-29 09:54:09 -0400765 kfree(txq->tx_ring.items);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400766 }
767
768 kfree(priv->txqs);
769 priv->txqs = NULL;
770
771free_tx_napis:
772 kfree(priv->tx_napis);
773 priv->tx_napis = NULL;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400774 return -ENOMEM;
775}
776
777static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
778{
779 struct sdma_txreq *txreq;
780 struct sdma_txreq *txreq_tmp;
781 atomic64_t *complete_txreqs = &txq->complete_txreqs;
782
783 list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
784 struct ipoib_txreq *tx =
785 container_of(txreq, struct ipoib_txreq, txreq);
786
787 list_del(&txreq->list);
788 sdma_txclean(txq->priv->dd, &tx->txreq);
789 dev_kfree_skb_any(tx->skb);
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400790 tx->skb = NULL;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400791 atomic64_inc(complete_txreqs);
792 }
793
Mike Marciniszyn38fd98a2020-06-23 16:43:28 -0400794 if (hfi1_ipoib_used(txq))
Gary Leshnerd99dc602020-05-11 12:05:48 -0400795 dd_dev_warn(txq->priv->dd,
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400796 "txq %d not empty found %u requests\n",
Gary Leshnerd99dc602020-05-11 12:05:48 -0400797 txq->q_idx,
798 hfi1_ipoib_txreqs(txq->sent_txreqs,
799 atomic64_read(complete_txreqs)));
800}
801
802void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
803{
804 int i;
805
806 for (i = 0; i < priv->netdev->num_tx_queues; i++) {
807 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
808
809 iowait_cancel_work(&txq->wait);
810 iowait_sdma_drain(&txq->wait);
811 hfi1_ipoib_drain_tx_list(txq);
812 netif_napi_del(txq->napi);
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400813 hfi1_ipoib_drain_tx_ring(txq);
Mike Marciniszynb536d4b2021-03-29 09:54:09 -0400814 kfree(txq->tx_ring.items);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400815 }
816
817 kfree(priv->txqs);
818 priv->txqs = NULL;
819
820 kfree(priv->tx_napis);
821 priv->tx_napis = NULL;
Gary Leshnerd99dc602020-05-11 12:05:48 -0400822}
823
824void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
825{
826 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
827 int i;
828
829 for (i = 0; i < dev->num_tx_queues; i++) {
830 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
831
832 napi_enable(txq->napi);
833 }
834}
835
836void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
837{
838 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
839 int i;
840
841 for (i = 0; i < dev->num_tx_queues; i++) {
842 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
843
844 napi_disable(txq->napi);
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400845 hfi1_ipoib_drain_tx_ring(txq);
Gary Leshnerd99dc602020-05-11 12:05:48 -0400846 }
847}
Mike Marciniszyn042a00f2021-03-29 09:54:08 -0400848
849void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
850{
851 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
852 struct hfi1_ipoib_txq *txq = &priv->txqs[q];
853 u64 completed = atomic64_read(&txq->complete_txreqs);
854
855 dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
856 (unsigned long long)txq, q,
857 __netif_subqueue_stopped(dev, txq->q_idx),
858 atomic_read(&txq->stops),
859 atomic_read(&txq->no_desc),
860 atomic_read(&txq->ring_full));
861 dd_dev_info(priv->dd, "sde %llx engine %u\n",
862 (unsigned long long)txq->sde,
863 txq->sde ? txq->sde->this_idx : 0);
864 dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
865 dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
866 txq->sent_txreqs, completed, hfi1_ipoib_used(txq));
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400867 dd_dev_info(priv->dd, "tx_queue_len %u max_items %u\n",
Mike Marciniszyn042a00f2021-03-29 09:54:08 -0400868 dev->tx_queue_len, txq->tx_ring.max_items);
Mike Marciniszynd47dfc22021-09-13 09:28:26 -0400869 dd_dev_info(priv->dd, "head %u tail %u\n",
Mike Marciniszyn042a00f2021-03-29 09:54:08 -0400870 txq->tx_ring.head, txq->tx_ring.tail);
871 dd_dev_info(priv->dd, "wait queued %u\n",
872 !list_empty(&txq->wait.list));
873 dd_dev_info(priv->dd, "tx_list empty %u\n",
874 list_empty(&txq->tx_list));
875}
876