blob: 93025f6ded152cb4775768a601d3be4b0120550b [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001#ifndef _HFI1_SDMA_H
2#define _HFI1_SDMA_H
3/*
Jubin John05d6ac12016-02-14 20:22:17 -08004 * Copyright(c) 2015, 2016 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04005 *
6 * This file is provided under a dual BSD/GPLv2 license. When using or
7 * redistributing this file, you may do so under either license.
8 *
9 * GPL LICENSE SUMMARY
10 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040011 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * BSD LICENSE
21 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040022 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
24 * are met:
25 *
26 * - Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * - Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in
30 * the documentation and/or other materials provided with the
31 * distribution.
32 * - Neither the name of Intel Corporation nor the names of its
33 * contributors may be used to endorse or promote products derived
34 * from this software without specific prior written permission.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 *
48 */
49
50#include <linux/types.h>
51#include <linux/list.h>
52#include <asm/byteorder.h>
53#include <linux/workqueue.h>
54#include <linux/rculist.h>
55
56#include "hfi.h"
57#include "verbs.h"
Mike Marciniszyn45842ab2016-02-14 12:44:34 -080058#include "sdma_txreq.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040059
Mike Marciniszyn77241052015-07-30 15:17:43 -040060/* Hardware limit */
61#define MAX_DESC 64
62/* Hardware limit for SDMA packet size */
63#define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
64
Mike Marciniszyn77241052015-07-30 15:17:43 -040065#define SDMA_TXREQ_S_OK 0
66#define SDMA_TXREQ_S_SENDERROR 1
67#define SDMA_TXREQ_S_ABORTED 2
68#define SDMA_TXREQ_S_SHUTDOWN 3
69
70/* flags bits */
71#define SDMA_TXREQ_F_URGENT 0x0001
72#define SDMA_TXREQ_F_AHG_COPY 0x0002
73#define SDMA_TXREQ_F_USE_AHG 0x0004
74
75#define SDMA_MAP_NONE 0
76#define SDMA_MAP_SINGLE 1
77#define SDMA_MAP_PAGE 2
78
79#define SDMA_AHG_VALUE_MASK 0xffff
80#define SDMA_AHG_VALUE_SHIFT 0
81#define SDMA_AHG_INDEX_MASK 0xf
82#define SDMA_AHG_INDEX_SHIFT 16
83#define SDMA_AHG_FIELD_LEN_MASK 0xf
84#define SDMA_AHG_FIELD_LEN_SHIFT 20
85#define SDMA_AHG_FIELD_START_MASK 0x1f
86#define SDMA_AHG_FIELD_START_SHIFT 24
87#define SDMA_AHG_UPDATE_ENABLE_MASK 0x1
88#define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
89
90/* AHG modes */
91
92/*
93 * Be aware the ordering and values
94 * for SDMA_AHG_APPLY_UPDATE[123]
95 * are assumed in generating a skip
96 * count in submit_tx() in sdma.c
97 */
98#define SDMA_AHG_NO_AHG 0
99#define SDMA_AHG_COPY 1
100#define SDMA_AHG_APPLY_UPDATE1 2
101#define SDMA_AHG_APPLY_UPDATE2 3
102#define SDMA_AHG_APPLY_UPDATE3 4
103
104/*
105 * Bits defined in the send DMA descriptor.
106 */
Jubin John3f34d952016-02-14 20:20:42 -0800107#define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63)
108#define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400109#define SDMA_DESC0_BYTE_COUNT_SHIFT 48
110#define SDMA_DESC0_BYTE_COUNT_WIDTH 14
111#define SDMA_DESC0_BYTE_COUNT_MASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300112 ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400113#define SDMA_DESC0_BYTE_COUNT_SMASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300114 (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400115#define SDMA_DESC0_PHY_ADDR_SHIFT 0
116#define SDMA_DESC0_PHY_ADDR_WIDTH 48
117#define SDMA_DESC0_PHY_ADDR_MASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300118 ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400119#define SDMA_DESC0_PHY_ADDR_SMASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300120 (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400121
122#define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
123#define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
124#define SDMA_DESC1_HEADER_UPDATE1_MASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300125 ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400126#define SDMA_DESC1_HEADER_UPDATE1_SMASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300127 (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400128#define SDMA_DESC1_HEADER_MODE_SHIFT 13
129#define SDMA_DESC1_HEADER_MODE_WIDTH 3
130#define SDMA_DESC1_HEADER_MODE_MASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300131 ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400132#define SDMA_DESC1_HEADER_MODE_SMASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300133 (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400134#define SDMA_DESC1_HEADER_INDEX_SHIFT 8
135#define SDMA_DESC1_HEADER_INDEX_WIDTH 5
136#define SDMA_DESC1_HEADER_INDEX_MASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300137 ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400138#define SDMA_DESC1_HEADER_INDEX_SMASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300139 (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400140#define SDMA_DESC1_HEADER_DWS_SHIFT 4
141#define SDMA_DESC1_HEADER_DWS_WIDTH 4
142#define SDMA_DESC1_HEADER_DWS_MASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300143 ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400144#define SDMA_DESC1_HEADER_DWS_SMASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300145 (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400146#define SDMA_DESC1_GENERATION_SHIFT 2
147#define SDMA_DESC1_GENERATION_WIDTH 2
148#define SDMA_DESC1_GENERATION_MASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300149 ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400150#define SDMA_DESC1_GENERATION_SMASK \
Dan Carpenter3f2686a2015-09-16 19:02:54 +0300151 (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
Jubin John3f34d952016-02-14 20:20:42 -0800152#define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1)
153#define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400154
155enum sdma_states {
156 sdma_state_s00_hw_down,
157 sdma_state_s10_hw_start_up_halt_wait,
158 sdma_state_s15_hw_start_up_clean_wait,
159 sdma_state_s20_idle,
160 sdma_state_s30_sw_clean_up_wait,
161 sdma_state_s40_hw_clean_up_wait,
162 sdma_state_s50_hw_halt_wait,
163 sdma_state_s60_idle_halt_wait,
164 sdma_state_s80_hw_freeze,
165 sdma_state_s82_freeze_sw_clean,
166 sdma_state_s99_running,
167};
168
169enum sdma_events {
170 sdma_event_e00_go_hw_down,
171 sdma_event_e10_go_hw_start,
172 sdma_event_e15_hw_halt_done,
173 sdma_event_e25_hw_clean_up_done,
174 sdma_event_e30_go_running,
175 sdma_event_e40_sw_cleaned,
176 sdma_event_e50_hw_cleaned,
177 sdma_event_e60_hw_halted,
178 sdma_event_e70_go_idle,
179 sdma_event_e80_hw_freeze,
180 sdma_event_e81_hw_frozen,
181 sdma_event_e82_hw_unfreeze,
182 sdma_event_e85_link_down,
183 sdma_event_e90_sw_halted,
184};
185
186struct sdma_set_state_action {
187 unsigned op_enable:1;
188 unsigned op_intenable:1;
189 unsigned op_halt:1;
190 unsigned op_cleanup:1;
191 unsigned go_s99_running_tofalse:1;
192 unsigned go_s99_running_totrue:1;
193};
194
195struct sdma_state {
196 struct kref kref;
197 struct completion comp;
198 enum sdma_states current_state;
199 unsigned current_op;
200 unsigned go_s99_running;
201 /* debugging/development */
202 enum sdma_states previous_state;
203 unsigned previous_op;
204 enum sdma_events last_event;
205};
206
207/**
208 * DOC: sdma exported routines
209 *
210 * These sdma routines fit into three categories:
211 * - The SDMA API for building and submitting packets
212 * to the ring
213 *
214 * - Initialization and tear down routines to buildup
215 * and tear down SDMA
216 *
217 * - ISR entrances to handle interrupts, state changes
218 * and errors
219 */
220
221/**
222 * DOC: sdma PSM/verbs API
223 *
224 * The sdma API is designed to be used by both PSM
225 * and verbs to supply packets to the SDMA ring.
226 *
227 * The usage of the API is as follows:
228 *
229 * Embed a struct iowait in the QP or
230 * PQ. The iowait should be initialized with a
231 * call to iowait_init().
232 *
233 * The user of the API should create an allocation method
234 * for their version of the txreq. slabs, pre-allocated lists,
235 * and dma pools can be used. Once the user's overload of
236 * the sdma_txreq has been allocated, the sdma_txreq member
237 * must be initialized with sdma_txinit() or sdma_txinit_ahg().
238 *
239 * The txreq must be declared with the sdma_txreq first.
240 *
241 * The tx request, once initialized, is manipulated with calls to
242 * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr()
243 * for each disjoint memory location. It is the user's responsibility
244 * to understand the packet boundaries and page boundaries to do the
245 * appropriate number of sdma_txadd_* calls.. The user
246 * must be prepared to deal with failures from these routines due to
247 * either memory allocation or dma_mapping failures.
248 *
249 * The mapping specifics for each memory location are recorded
250 * in the tx. Memory locations added with sdma_txadd_page()
251 * and sdma_txadd_kvaddr() are automatically mapped when added
252 * to the tx and nmapped as part of the progress processing in the
253 * SDMA interrupt handling.
254 *
255 * sdma_txadd_daddr() is used to add an dma_addr_t memory to the
256 * tx. An example of a use case would be a pre-allocated
257 * set of headers allocated via dma_pool_alloc() or
258 * dma_alloc_coherent(). For these memory locations, it
259 * is the responsibility of the user to handle that unmapping.
260 * (This would usually be at an unload or job termination.)
261 *
262 * The routine sdma_send_txreq() is used to submit
263 * a tx to the ring after the appropriate number of
264 * sdma_txadd_* have been done.
265 *
266 * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist()
267 * can be used to submit a list of packets.
268 *
269 * The user is free to use the link overhead in the struct sdma_txreq as
270 * long as the tx isn't in flight.
271 *
272 * The extreme degenerate case of the number of descriptors
273 * exceeding the ring size is automatically handled as
274 * memory locations are added. An overflow of the descriptor
275 * array that is part of the sdma_txreq is also automatically
276 * handled.
277 *
278 */
279
280/**
281 * DOC: Infrastructure calls
282 *
283 * sdma_init() is used to initialize data structures and
284 * CSRs for the desired number of SDMA engines.
285 *
286 * sdma_start() is used to kick the SDMA engines initialized
287 * with sdma_init(). Interrupts must be enabled at this
288 * point since aspects of the state machine are interrupt
289 * driven.
290 *
291 * sdma_engine_error() and sdma_engine_interrupt() are
292 * entrances for interrupts.
293 *
294 * sdma_map_init() is for the management of the mapping
295 * table when the number of vls is changed.
296 *
297 */
298
299/*
300 * struct hw_sdma_desc - raw 128 bit SDMA descriptor
301 *
302 * This is the raw descriptor in the SDMA ring
303 */
304struct hw_sdma_desc {
305 /* private: don't use directly */
306 __le64 qw[2];
307};
308
Mike Marciniszyn77241052015-07-30 15:17:43 -0400309/**
310 * struct sdma_engine - Data pertaining to each SDMA engine.
311 * @dd: a back-pointer to the device data
312 * @ppd: per port back-pointer
313 * @imask: mask for irq manipulation
314 * @idle_mask: mask for determining if an interrupt is due to sdma_idle
315 *
316 * This structure has the state for each sdma_engine.
317 *
318 * Accessing to non public fields are not supported
319 * since the private members are subject to change.
320 */
321struct sdma_engine {
322 /* read mostly */
323 struct hfi1_devdata *dd;
324 struct hfi1_pportdata *ppd;
325 /* private: */
326 void __iomem *tail_csr;
327 u64 imask; /* clear interrupt mask */
328 u64 idle_mask;
329 u64 progress_mask;
Vennila Megavannana699c6c2016-01-11 18:30:56 -0500330 u64 int_mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400331 /* private: */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400332 volatile __le64 *head_dma; /* DMA'ed by chip */
333 /* private: */
334 dma_addr_t head_phys;
335 /* private: */
336 struct hw_sdma_desc *descq;
337 /* private: */
338 unsigned descq_full_count;
339 struct sdma_txreq **tx_ring;
340 /* private: */
341 dma_addr_t descq_phys;
342 /* private */
343 u32 sdma_mask;
344 /* private */
345 struct sdma_state state;
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -0500346 /* private */
347 int cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400348 /* private: */
349 u8 sdma_shift;
350 /* private: */
351 u8 this_idx; /* zero relative engine */
352 /* protect changes to senddmactrl shadow */
353 spinlock_t senddmactrl_lock;
354 /* private: */
355 u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */
356
357 /* read/write using tail_lock */
358 spinlock_t tail_lock ____cacheline_aligned_in_smp;
359#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
360 /* private: */
361 u64 tail_sn;
362#endif
363 /* private: */
364 u32 descq_tail;
365 /* private: */
366 unsigned long ahg_bits;
367 /* private: */
368 u16 desc_avail;
369 /* private: */
370 u16 tx_tail;
371 /* private: */
372 u16 descq_cnt;
373
374 /* read/write using head_lock */
375 /* private: */
376 seqlock_t head_lock ____cacheline_aligned_in_smp;
377#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
378 /* private: */
379 u64 head_sn;
380#endif
381 /* private: */
382 u32 descq_head;
383 /* private: */
384 u16 tx_head;
385 /* private: */
386 u64 last_status;
Vennila Megavannana699c6c2016-01-11 18:30:56 -0500387 /* private */
388 u64 err_cnt;
389 /* private */
390 u64 sdma_int_cnt;
391 u64 idle_int_cnt;
392 u64 progress_int_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400393
394 /* private: */
395 struct list_head dmawait;
396
397 /* CONFIG SDMA for now, just blindly duplicate */
398 /* private: */
399 struct tasklet_struct sdma_hw_clean_up_task
400 ____cacheline_aligned_in_smp;
401
402 /* private: */
403 struct tasklet_struct sdma_sw_clean_up_task
404 ____cacheline_aligned_in_smp;
405 /* private: */
406 struct work_struct err_halt_worker;
407 /* private */
408 struct timer_list err_progress_check_timer;
409 u32 progress_check_head;
410 /* private: */
411 struct work_struct flush_worker;
Jubin John6a14c5e2016-02-14 20:21:34 -0800412 /* protect flush list */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400413 spinlock_t flushlist_lock;
414 /* private: */
415 struct list_head flushlist;
Tadeusz Struk0cb2aa62016-09-25 07:44:23 -0700416 struct cpumask cpu_mask;
417 struct kobject kobj;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400418};
419
Mike Marciniszyn77241052015-07-30 15:17:43 -0400420int sdma_init(struct hfi1_devdata *dd, u8 port);
421void sdma_start(struct hfi1_devdata *dd);
422void sdma_exit(struct hfi1_devdata *dd);
423void sdma_all_running(struct hfi1_devdata *dd);
424void sdma_all_idle(struct hfi1_devdata *dd);
425void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
426void sdma_freeze(struct hfi1_devdata *dd);
427void sdma_unfreeze(struct hfi1_devdata *dd);
428void sdma_wait(struct hfi1_devdata *dd);
429
430/**
431 * sdma_empty() - idle engine test
432 * @engine: sdma engine
433 *
434 * Currently used by verbs as a latency optimization.
435 *
436 * Return:
437 * 1 - empty, 0 - non-empty
438 */
439static inline int sdma_empty(struct sdma_engine *sde)
440{
441 return sde->descq_tail == sde->descq_head;
442}
443
444static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
445{
446 return sde->descq_cnt -
447 (sde->descq_tail -
448 ACCESS_ONCE(sde->descq_head)) - 1;
449}
450
451static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
452{
453 return sde->descq_cnt - sdma_descq_freecnt(sde);
454}
455
456/*
457 * Either head_lock or tail lock required to see
458 * a steady state.
459 */
460static inline int __sdma_running(struct sdma_engine *engine)
461{
462 return engine->state.current_state == sdma_state_s99_running;
463}
464
Mike Marciniszyn77241052015-07-30 15:17:43 -0400465/**
466 * sdma_running() - state suitability test
467 * @engine: sdma engine
468 *
469 * sdma_running probes the internal state to determine if it is suitable
470 * for submitting packets.
471 *
472 * Return:
473 * 1 - ok to submit, 0 - not ok to submit
474 *
475 */
476static inline int sdma_running(struct sdma_engine *engine)
477{
478 unsigned long flags;
479 int ret;
480
481 spin_lock_irqsave(&engine->tail_lock, flags);
482 ret = __sdma_running(engine);
483 spin_unlock_irqrestore(&engine->tail_lock, flags);
484 return ret;
485}
486
487void _sdma_txreq_ahgadd(
488 struct sdma_txreq *tx,
489 u8 num_ahg,
490 u8 ahg_entry,
491 u32 *ahg,
492 u8 ahg_hlen);
493
Mike Marciniszyn77241052015-07-30 15:17:43 -0400494/**
495 * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
496 * @tx: tx request to initialize
497 * @flags: flags to key last descriptor additions
498 * @tlen: total packet length (pbc + headers + data)
499 * @ahg_entry: ahg entry to use (0 - 31)
500 * @num_ahg: ahg descriptor for first descriptor (0 - 9)
501 * @ahg: array of AHG descriptors (up to 9 entries)
502 * @ahg_hlen: number of bytes from ASIC entry to use
503 * @cb: callback
504 *
505 * The allocation of the sdma_txreq and it enclosing structure is user
506 * dependent. This routine must be called to initialize the user independent
507 * fields.
508 *
509 * The currently supported flags are SDMA_TXREQ_F_URGENT,
510 * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG.
511 *
512 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
513 * completion is desired as soon as possible.
514 *
515 * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be
516 * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in
517 * the AHG descriptors into the first 1 to 3 descriptors.
518 *
519 * Completions of submitted requests can be gotten on selected
520 * txreqs by giving a completion routine callback to sdma_txinit() or
521 * sdma_txinit_ahg(). The environment in which the callback runs
522 * can be from an ISR, a tasklet, or a thread, so no sleeping
523 * kernel routines can be used. Aspects of the sdma ring may
524 * be locked so care should be taken with locking.
525 *
526 * The callback pointer can be NULL to avoid any callback for the packet
527 * being submitted. The callback will be provided this tx, a status, and a flag.
528 *
529 * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
530 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
531 *
532 * The flag, if the is the iowait had been used, indicates the iowait
533 * sdma_busy count has reached zero.
534 *
535 * user data portion of tlen should be precise. The sdma_txadd_* entrances
536 * will pad with a descriptor references 1 - 3 bytes when the number of bytes
537 * specified in tlen have been supplied to the sdma_txreq.
538 *
539 * ahg_hlen is used to determine the number of on-chip entry bytes to
540 * use as the header. This is for cases where the stored header is
541 * larger than the header to be used in a packet. This is typical
542 * for verbs where an RDMA_WRITE_FIRST is larger than the packet in
543 * and RDMA_WRITE_MIDDLE.
544 *
545 */
546static inline int sdma_txinit_ahg(
547 struct sdma_txreq *tx,
548 u16 flags,
549 u16 tlen,
550 u8 ahg_entry,
551 u8 num_ahg,
552 u32 *ahg,
553 u8 ahg_hlen,
Mike Marciniszyna545f532016-02-14 12:45:53 -0800554 void (*cb)(struct sdma_txreq *, int))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400555{
556 if (tlen == 0)
557 return -ENODATA;
558 if (tlen > MAX_SDMA_PKT_SIZE)
559 return -EMSGSIZE;
560 tx->desc_limit = ARRAY_SIZE(tx->descs);
561 tx->descp = &tx->descs[0];
562 INIT_LIST_HEAD(&tx->list);
563 tx->num_desc = 0;
564 tx->flags = flags;
565 tx->complete = cb;
566 tx->coalesce_buf = NULL;
567 tx->wait = NULL;
Jubin Johnf3ff8182016-02-14 20:20:50 -0800568 tx->packet_len = tlen;
569 tx->tlen = tx->packet_len;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400570 tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
571 tx->descs[0].qw[1] = 0;
572 if (flags & SDMA_TXREQ_F_AHG_COPY)
573 tx->descs[0].qw[1] |=
574 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
575 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
576 (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK)
577 << SDMA_DESC1_HEADER_MODE_SHIFT);
578 else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg)
579 _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
580 return 0;
581}
582
583/**
584 * sdma_txinit() - initialize an sdma_txreq struct (no AHG)
585 * @tx: tx request to initialize
586 * @flags: flags to key last descriptor additions
587 * @tlen: total packet length (pbc + headers + data)
588 * @cb: callback pointer
589 *
590 * The allocation of the sdma_txreq and it enclosing structure is user
591 * dependent. This routine must be called to initialize the user
592 * independent fields.
593 *
594 * The currently supported flags is SDMA_TXREQ_F_URGENT.
595 *
596 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
597 * completion is desired as soon as possible.
598 *
599 * Completions of submitted requests can be gotten on selected
600 * txreqs by giving a completion routine callback to sdma_txinit() or
601 * sdma_txinit_ahg(). The environment in which the callback runs
602 * can be from an ISR, a tasklet, or a thread, so no sleeping
603 * kernel routines can be used. The head size of the sdma ring may
604 * be locked so care should be taken with locking.
605 *
606 * The callback pointer can be NULL to avoid any callback for the packet
607 * being submitted.
608 *
609 * The callback, if non-NULL, will be provided this tx and a status. The
610 * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
611 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
612 *
613 */
614static inline int sdma_txinit(
615 struct sdma_txreq *tx,
616 u16 flags,
617 u16 tlen,
Mike Marciniszyna545f532016-02-14 12:45:53 -0800618 void (*cb)(struct sdma_txreq *, int))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400619{
620 return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
621}
622
623/* helpers - don't use */
624static inline int sdma_mapping_type(struct sdma_desc *d)
625{
626 return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK)
627 >> SDMA_DESC1_GENERATION_SHIFT;
628}
629
630static inline size_t sdma_mapping_len(struct sdma_desc *d)
631{
632 return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK)
633 >> SDMA_DESC0_BYTE_COUNT_SHIFT;
634}
635
636static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
637{
638 return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK)
639 >> SDMA_DESC0_PHY_ADDR_SHIFT;
640}
641
642static inline void make_tx_sdma_desc(
643 struct sdma_txreq *tx,
644 int type,
645 dma_addr_t addr,
646 size_t len)
647{
648 struct sdma_desc *desc = &tx->descp[tx->num_desc];
649
650 if (!tx->num_desc) {
651 /* qw[0] zero; qw[1] first, ahg mode already in from init */
652 desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK)
653 << SDMA_DESC1_GENERATION_SHIFT;
654 } else {
655 desc->qw[0] = 0;
656 desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK)
657 << SDMA_DESC1_GENERATION_SHIFT;
658 }
659 desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK)
660 << SDMA_DESC0_PHY_ADDR_SHIFT) |
661 (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
662 << SDMA_DESC0_BYTE_COUNT_SHIFT);
663}
664
665/* helper to extend txreq */
Niranjana Vishwanathapuraf4d26d82015-10-26 10:28:32 -0400666int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
667 int type, void *kvaddr, struct page *page,
668 unsigned long offset, u16 len);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400669int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
670void sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
671
672/* helpers used by public routines */
673static inline void _sdma_close_tx(struct hfi1_devdata *dd,
674 struct sdma_txreq *tx)
675{
676 tx->descp[tx->num_desc].qw[0] |=
677 SDMA_DESC0_LAST_DESC_FLAG;
678 tx->descp[tx->num_desc].qw[1] |=
679 dd->default_desc1;
680 if (tx->flags & SDMA_TXREQ_F_URGENT)
681 tx->descp[tx->num_desc].qw[1] |=
Jubin John8638b772016-02-14 20:19:24 -0800682 (SDMA_DESC1_HEAD_TO_HOST_FLAG |
Mike Marciniszyn77241052015-07-30 15:17:43 -0400683 SDMA_DESC1_INT_REQ_FLAG);
684}
685
686static inline int _sdma_txadd_daddr(
687 struct hfi1_devdata *dd,
688 int type,
689 struct sdma_txreq *tx,
690 dma_addr_t addr,
691 u16 len)
692{
693 int rval = 0;
694
Mike Marciniszyn77241052015-07-30 15:17:43 -0400695 make_tx_sdma_desc(
696 tx,
697 type,
698 addr, len);
699 WARN_ON(len > tx->tlen);
700 tx->tlen -= len;
701 /* special cases for last */
702 if (!tx->tlen) {
Mike Marciniszyna5a9e8c2015-12-03 16:41:05 -0500703 if (tx->packet_len & (sizeof(u32) - 1)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400704 rval = _pad_sdma_tx_descs(dd, tx);
Mike Marciniszyna5a9e8c2015-12-03 16:41:05 -0500705 if (rval)
706 return rval;
707 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400708 _sdma_close_tx(dd, tx);
Mike Marciniszyna5a9e8c2015-12-03 16:41:05 -0500709 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400710 }
711 tx->num_desc++;
712 return rval;
713}
714
715/**
716 * sdma_txadd_page() - add a page to the sdma_txreq
717 * @dd: the device to use for mapping
718 * @tx: tx request to which the page is added
719 * @page: page to map
720 * @offset: offset within the page
721 * @len: length in bytes
722 *
723 * This is used to add a page/offset/length descriptor.
724 *
725 * The mapping/unmapping of the page/offset/len is automatically handled.
726 *
727 * Return:
728 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't
Niranjana Vishwanathapuraf4d26d82015-10-26 10:28:32 -0400729 * extend/coalesce descriptor array
Mike Marciniszyn77241052015-07-30 15:17:43 -0400730 */
731static inline int sdma_txadd_page(
732 struct hfi1_devdata *dd,
733 struct sdma_txreq *tx,
734 struct page *page,
735 unsigned long offset,
736 u16 len)
737{
Niranjana Vishwanathapuraf4d26d82015-10-26 10:28:32 -0400738 dma_addr_t addr;
739 int rval;
740
741 if ((unlikely(tx->num_desc == tx->desc_limit))) {
742 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
743 NULL, page, offset, len);
744 if (rval <= 0)
745 return rval;
746 }
747
748 addr = dma_map_page(
749 &dd->pcidev->dev,
750 page,
751 offset,
752 len,
753 DMA_TO_DEVICE);
754
Mike Marciniszyn77241052015-07-30 15:17:43 -0400755 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
756 sdma_txclean(dd, tx);
757 return -ENOSPC;
758 }
Niranjana Vishwanathapuraf4d26d82015-10-26 10:28:32 -0400759
Mike Marciniszyn77241052015-07-30 15:17:43 -0400760 return _sdma_txadd_daddr(
761 dd, SDMA_MAP_PAGE, tx, addr, len);
762}
763
764/**
765 * sdma_txadd_daddr() - add a dma address to the sdma_txreq
766 * @dd: the device to use for mapping
767 * @tx: sdma_txreq to which the page is added
768 * @addr: dma address mapped by caller
769 * @len: length in bytes
770 *
771 * This is used to add a descriptor for memory that is already dma mapped.
772 *
773 * In this case, there is no unmapping as part of the progress processing for
774 * this memory location.
775 *
776 * Return:
777 * 0 - success, -ENOMEM - couldn't extend descriptor array
778 */
779
780static inline int sdma_txadd_daddr(
781 struct hfi1_devdata *dd,
782 struct sdma_txreq *tx,
783 dma_addr_t addr,
784 u16 len)
785{
Niranjana Vishwanathapuraf4d26d82015-10-26 10:28:32 -0400786 int rval;
787
788 if ((unlikely(tx->num_desc == tx->desc_limit))) {
789 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
790 NULL, NULL, 0, 0);
791 if (rval <= 0)
792 return rval;
793 }
794
Mike Marciniszyn77241052015-07-30 15:17:43 -0400795 return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
796}
797
798/**
799 * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq
800 * @dd: the device to use for mapping
801 * @tx: sdma_txreq to which the page is added
802 * @kvaddr: the kernel virtual address
803 * @len: length in bytes
804 *
805 * This is used to add a descriptor referenced by the indicated kvaddr and
806 * len.
807 *
808 * The mapping/unmapping of the kvaddr and len is automatically handled.
809 *
810 * Return:
Niranjana Vishwanathapuraf4d26d82015-10-26 10:28:32 -0400811 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce
Mike Marciniszyn77241052015-07-30 15:17:43 -0400812 * descriptor array
813 */
814static inline int sdma_txadd_kvaddr(
815 struct hfi1_devdata *dd,
816 struct sdma_txreq *tx,
817 void *kvaddr,
818 u16 len)
819{
Niranjana Vishwanathapuraf4d26d82015-10-26 10:28:32 -0400820 dma_addr_t addr;
821 int rval;
822
823 if ((unlikely(tx->num_desc == tx->desc_limit))) {
824 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
825 kvaddr, NULL, 0, len);
826 if (rval <= 0)
827 return rval;
828 }
829
830 addr = dma_map_single(
831 &dd->pcidev->dev,
832 kvaddr,
833 len,
834 DMA_TO_DEVICE);
835
Mike Marciniszyn77241052015-07-30 15:17:43 -0400836 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
837 sdma_txclean(dd, tx);
838 return -ENOSPC;
839 }
Niranjana Vishwanathapuraf4d26d82015-10-26 10:28:32 -0400840
Mike Marciniszyn77241052015-07-30 15:17:43 -0400841 return _sdma_txadd_daddr(
842 dd, SDMA_MAP_SINGLE, tx, addr, len);
843}
844
845struct iowait;
846
847int sdma_send_txreq(struct sdma_engine *sde,
848 struct iowait *wait,
849 struct sdma_txreq *tx);
850int sdma_send_txlist(struct sdma_engine *sde,
851 struct iowait *wait,
Harish Chegondi0b115ef2016-09-06 04:35:37 -0700852 struct list_head *tx_list,
853 u32 *count);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400854
855int sdma_ahg_alloc(struct sdma_engine *sde);
856void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
857
858/**
859 * sdma_build_ahg - build ahg descriptor
860 * @data
861 * @dwindex
862 * @startbit
863 * @bits
864 *
865 * Build and return a 32 bit descriptor.
866 */
867static inline u32 sdma_build_ahg_descriptor(
868 u16 data,
869 u8 dwindex,
870 u8 startbit,
871 u8 bits)
872{
873 return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT |
874 ((startbit & SDMA_AHG_FIELD_START_MASK) <<
875 SDMA_AHG_FIELD_START_SHIFT) |
876 ((bits & SDMA_AHG_FIELD_LEN_MASK) <<
877 SDMA_AHG_FIELD_LEN_SHIFT) |
878 ((dwindex & SDMA_AHG_INDEX_MASK) <<
879 SDMA_AHG_INDEX_SHIFT) |
880 ((data & SDMA_AHG_VALUE_MASK) <<
881 SDMA_AHG_VALUE_SHIFT));
882}
883
884/**
885 * sdma_progress - use seq number of detect head progress
886 * @sde: sdma_engine to check
887 * @seq: base seq count
888 * @tx: txreq for which we need to check descriptor availability
889 *
890 * This is used in the appropriate spot in the sleep routine
891 * to check for potential ring progress. This routine gets the
892 * seqcount before queuing the iowait structure for progress.
893 *
894 * If the seqcount indicates that progress needs to be checked,
895 * re-submission is detected by checking whether the descriptor
896 * queue has enough descriptor for the txreq.
897 */
898static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
899 struct sdma_txreq *tx)
900{
901 if (read_seqretry(&sde->head_lock, seq)) {
902 sde->desc_avail = sdma_descq_freecnt(sde);
903 if (tx->num_desc > sde->desc_avail)
904 return 0;
905 return 1;
906 }
907 return 0;
908}
909
910/**
911 * sdma_iowait_schedule() - initialize wait structure
912 * @sde: sdma_engine to schedule
913 * @wait: wait struct to schedule
914 *
915 * This function initializes the iowait
916 * structure embedded in the QP or PQ.
917 *
918 */
919static inline void sdma_iowait_schedule(
920 struct sdma_engine *sde,
921 struct iowait *wait)
922{
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -0500923 struct hfi1_pportdata *ppd = sde->dd->pport;
924
925 iowait_schedule(wait, ppd->hfi1_wq, sde->cpu);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400926}
927
928/* for use by interrupt handling */
929void sdma_engine_error(struct sdma_engine *sde, u64 status);
930void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
931
932/*
933 *
934 * The diagram below details the relationship of the mapping structures
935 *
936 * Since the mapping now allows for non-uniform engines per vl, the
937 * number of engines for a vl is either the vl_engines[vl] or
938 * a computation based on num_sdma/num_vls:
939 *
940 * For example:
941 * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls
942 *
943 * n = roundup to next highest power of 2 using nactual
944 *
945 * In the case where there are num_sdma/num_vls doesn't divide
946 * evenly, the extras are added from the last vl downward.
947 *
948 * For the case where n > nactual, the engines are assigned
949 * in a round robin fashion wrapping back to the first engine
950 * for a particular vl.
951 *
952 * dd->sdma_map
953 * | sdma_map_elem[0]
954 * | +--------------------+
955 * v | mask |
956 * sdma_vl_map |--------------------|
957 * +--------------------------+ | sde[0] -> eng 1 |
958 * | list (RCU) | |--------------------|
959 * |--------------------------| ->| sde[1] -> eng 2 |
960 * | mask | --/ |--------------------|
961 * |--------------------------| -/ | * |
962 * | actual_vls (max 8) | -/ |--------------------|
963 * |--------------------------| --/ | sde[n] -> eng n |
964 * | vls (max 8) | -/ +--------------------+
965 * |--------------------------| --/
966 * | map[0] |-/
967 * |--------------------------| +--------------------+
968 * | map[1] |--- | mask |
969 * |--------------------------| \---- |--------------------|
970 * | * | \-- | sde[0] -> eng 1+n |
971 * | * | \---- |--------------------|
972 * | * | \->| sde[1] -> eng 2+n |
973 * |--------------------------| |--------------------|
974 * | map[vls - 1] |- | * |
975 * +--------------------------+ \- |--------------------|
976 * \- | sde[m] -> eng m+n |
977 * \ +--------------------+
978 * \-
979 * \
980 * \- +--------------------+
981 * \- | mask |
982 * \ |--------------------|
983 * \- | sde[0] -> eng 1+m+n|
984 * \- |--------------------|
985 * >| sde[1] -> eng 2+m+n|
986 * |--------------------|
987 * | * |
988 * |--------------------|
989 * | sde[o] -> eng o+m+n|
990 * +--------------------+
991 *
992 */
993
994/**
995 * struct sdma_map_elem - mapping for a vl
996 * @mask - selector mask
997 * @sde - array of engines for this vl
998 *
999 * The mask is used to "mod" the selector
1000 * to produce index into the trailing
1001 * array of sdes.
1002 */
1003struct sdma_map_elem {
1004 u32 mask;
1005 struct sdma_engine *sde[0];
1006};
1007
1008/**
1009 * struct sdma_map_el - mapping for a vl
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001010 * @engine_to_vl - map of an engine to a vl
Mike Marciniszyn77241052015-07-30 15:17:43 -04001011 * @list - rcu head for free callback
1012 * @mask - vl mask to "mod" the vl to produce an index to map array
1013 * @actual_vls - number of vls
1014 * @vls - number of vls rounded to next power of 2
1015 * @map - array of sdma_map_elem entries
1016 *
1017 * This is the parent mapping structure. The trailing
1018 * members of the struct point to sdma_map_elem entries, which
1019 * in turn point to an array of sde's for that vl.
1020 */
1021struct sdma_vl_map {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001022 s8 engine_to_vl[TXE_NUM_SDMA_ENGINES];
Mike Marciniszyn77241052015-07-30 15:17:43 -04001023 struct rcu_head list;
1024 u32 mask;
1025 u8 actual_vls;
1026 u8 vls;
1027 struct sdma_map_elem *map[0];
1028};
1029
1030int sdma_map_init(
1031 struct hfi1_devdata *dd,
1032 u8 port,
1033 u8 num_vls,
1034 u8 *vl_engines);
1035
1036/* slow path */
1037void _sdma_engine_progress_schedule(struct sdma_engine *sde);
1038
1039/**
1040 * sdma_engine_progress_schedule() - schedule progress on engine
1041 * @sde: sdma_engine to schedule progress
1042 *
1043 * This is the fast path.
1044 *
1045 */
1046static inline void sdma_engine_progress_schedule(
1047 struct sdma_engine *sde)
1048{
1049 if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8))
1050 return;
1051 _sdma_engine_progress_schedule(sde);
1052}
1053
1054struct sdma_engine *sdma_select_engine_sc(
1055 struct hfi1_devdata *dd,
1056 u32 selector,
1057 u8 sc5);
1058
1059struct sdma_engine *sdma_select_engine_vl(
1060 struct hfi1_devdata *dd,
1061 u32 selector,
1062 u8 vl);
1063
Tadeusz Struk0cb2aa62016-09-25 07:44:23 -07001064struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
1065 u32 selector, u8 vl);
1066ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf);
1067ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
1068 size_t count);
1069int sdma_engine_get_vl(struct sdma_engine *sde);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001070void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *);
1071
1072#ifdef CONFIG_SDMA_VERBOSITY
1073void sdma_dumpstate(struct sdma_engine *);
1074#endif
1075static inline char *slashstrip(char *s)
1076{
1077 char *r = s;
1078
1079 while (*s)
1080 if (*s++ == '/')
1081 r = s;
1082 return r;
1083}
1084
1085u16 sdma_get_descq_cnt(void);
1086
1087extern uint mod_num_sdma;
1088
1089void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
1090
1091#endif