blob: ce4cbbf01005188820576ea59084fc2d89ae562e [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _CE_H_
19#define _CE_H_
20
21#include "hif.h"
22
23
24/* Maximum number of Copy Engine's supported */
25#define CE_COUNT_MAX 8
26#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
27
28/* Descriptor rings must be aligned to this boundary */
29#define CE_DESC_RING_ALIGN 8
30#define CE_SENDLIST_ITEMS_MAX 12
31#define CE_SEND_FLAG_GATHER 0x00010000
32
33/*
34 * Copy Engine support: low-level Target-side Copy Engine API.
35 * This is a hardware access layer used by code that understands
36 * how to use copy engines.
37 */
38
39struct ce_state;
40
41
Kalle Valo5e3dd152013-06-12 20:52:10 +030042#define CE_DESC_FLAGS_GATHER (1 << 0)
43#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
44#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
45#define CE_DESC_FLAGS_META_DATA_LSB 3
46
47struct ce_desc {
48 __le32 addr;
49 __le16 nbytes;
50 __le16 flags; /* %CE_DESC_FLAGS_ */
51};
52
53/* Copy Engine Ring internal state */
54struct ce_ring_state {
55 /* Number of entries in this ring; must be power of 2 */
56 unsigned int nentries;
57 unsigned int nentries_mask;
58
59 /*
60 * For dest ring, this is the next index to be processed
61 * by software after it was/is received into.
62 *
63 * For src ring, this is the last descriptor that was sent
64 * and completion processed by software.
65 *
66 * Regardless of src or dest ring, this is an invariant
67 * (modulo ring size):
68 * write index >= read index >= sw_index
69 */
70 unsigned int sw_index;
71 /* cached copy */
72 unsigned int write_index;
73 /*
74 * For src ring, this is the next index not yet processed by HW.
75 * This is a cached copy of the real HW index (read index), used
76 * for avoiding reading the HW index register more often than
77 * necessary.
78 * This extends the invariant:
79 * write index >= read index >= hw_index >= sw_index
80 *
81 * For dest ring, this is currently unused.
82 */
83 /* cached copy */
84 unsigned int hw_index;
85
86 /* Start of DMA-coherent area reserved for descriptors */
87 /* Host address space */
88 void *base_addr_owner_space_unaligned;
89 /* CE address space */
90 u32 base_addr_ce_space_unaligned;
91
92 /*
93 * Actual start of descriptors.
94 * Aligned to descriptor-size boundary.
95 * Points into reserved DMA-coherent area, above.
96 */
97 /* Host address space */
98 void *base_addr_owner_space;
99
100 /* CE address space */
101 u32 base_addr_ce_space;
102 /*
103 * Start of shadow copy of descriptors, within regular memory.
104 * Aligned to descriptor-size boundary.
105 */
106 void *shadow_base_unaligned;
107 struct ce_desc *shadow_base;
108
109 void **per_transfer_context;
110};
111
112/* Copy Engine internal state */
113struct ce_state {
114 struct ath10k *ar;
115 unsigned int id;
116
117 unsigned int attr_flags;
118
119 u32 ctrl_addr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300120
121 void (*send_cb) (struct ce_state *ce_state,
122 void *per_transfer_send_context,
123 u32 buffer,
124 unsigned int nbytes,
125 unsigned int transfer_id);
126 void (*recv_cb) (struct ce_state *ce_state,
127 void *per_transfer_recv_context,
128 u32 buffer,
129 unsigned int nbytes,
130 unsigned int transfer_id,
131 unsigned int flags);
132
133 unsigned int src_sz_max;
134 struct ce_ring_state *src_ring;
135 struct ce_ring_state *dest_ring;
136};
137
138struct ce_sendlist_item {
139 /* e.g. buffer or desc list */
140 dma_addr_t data;
141 union {
142 /* simple buffer */
143 unsigned int nbytes;
144 /* Rx descriptor list */
145 unsigned int ndesc;
146 } u;
147 /* externally-specified flags; OR-ed with internal flags */
148 u32 flags;
149};
150
151struct ce_sendlist {
152 unsigned int num_items;
153 struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
154};
155
156/* Copy Engine settable attributes */
157struct ce_attr;
158
159/*==================Send====================*/
160
161/* ath10k_ce_send flags */
162#define CE_SEND_FLAG_BYTE_SWAP 1
163
164/*
165 * Queue a source buffer to be sent to an anonymous destination buffer.
166 * ce - which copy engine to use
167 * buffer - address of buffer
168 * nbytes - number of bytes to send
169 * transfer_id - arbitrary ID; reflected to destination
170 * flags - CE_SEND_FLAG_* values
171 * Returns 0 on success; otherwise an error status.
172 *
173 * Note: If no flags are specified, use CE's default data swap mode.
174 *
175 * Implementation note: pushes 1 buffer to Source ring
176 */
177int ath10k_ce_send(struct ce_state *ce_state,
178 void *per_transfer_send_context,
179 u32 buffer,
180 unsigned int nbytes,
181 /* 14 bits */
182 unsigned int transfer_id,
183 unsigned int flags);
184
185void ath10k_ce_send_cb_register(struct ce_state *ce_state,
186 void (*send_cb) (struct ce_state *ce_state,
187 void *transfer_context,
188 u32 buffer,
189 unsigned int nbytes,
190 unsigned int transfer_id),
191 int disable_interrupts);
192
193/* Append a simple buffer (address/length) to a sendlist. */
194void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
195 u32 buffer,
196 unsigned int nbytes,
197 /* OR-ed with internal flags */
198 u32 flags);
199
200/*
201 * Queue a "sendlist" of buffers to be sent using gather to a single
202 * anonymous destination buffer
203 * ce - which copy engine to use
204 * sendlist - list of simple buffers to send using gather
205 * transfer_id - arbitrary ID; reflected to destination
206 * Returns 0 on success; otherwise an error status.
207 *
208 * Implemenation note: Pushes multiple buffers with Gather to Source ring.
209 */
210int ath10k_ce_sendlist_send(struct ce_state *ce_state,
211 void *per_transfer_send_context,
212 struct ce_sendlist *sendlist,
213 /* 14 bits */
214 unsigned int transfer_id);
215
216/*==================Recv=======================*/
217
218/*
219 * Make a buffer available to receive. The buffer must be at least of a
220 * minimal size appropriate for this copy engine (src_sz_max attribute).
221 * ce - which copy engine to use
222 * per_transfer_recv_context - context passed back to caller's recv_cb
223 * buffer - address of buffer in CE space
224 * Returns 0 on success; otherwise an error status.
225 *
226 * Implemenation note: Pushes a buffer to Dest ring.
227 */
228int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
229 void *per_transfer_recv_context,
230 u32 buffer);
231
232void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
233 void (*recv_cb) (struct ce_state *ce_state,
234 void *transfer_context,
235 u32 buffer,
236 unsigned int nbytes,
237 unsigned int transfer_id,
238 unsigned int flags));
239
240/* recv flags */
241/* Data is byte-swapped */
242#define CE_RECV_FLAG_SWAPPED 1
243
244/*
245 * Supply data for the next completed unprocessed receive descriptor.
246 * Pops buffer from Dest ring.
247 */
248int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
249 void **per_transfer_contextp,
250 u32 *bufferp,
251 unsigned int *nbytesp,
252 unsigned int *transfer_idp,
253 unsigned int *flagsp);
254/*
255 * Supply data for the next completed unprocessed send descriptor.
256 * Pops 1 completed send buffer from Source ring.
257 */
258int ath10k_ce_completed_send_next(struct ce_state *ce_state,
259 void **per_transfer_contextp,
260 u32 *bufferp,
261 unsigned int *nbytesp,
262 unsigned int *transfer_idp);
263
264/*==================CE Engine Initialization=======================*/
265
266/* Initialize an instance of a CE */
267struct ce_state *ath10k_ce_init(struct ath10k *ar,
268 unsigned int ce_id,
269 const struct ce_attr *attr);
270
271/*==================CE Engine Shutdown=======================*/
272/*
273 * Support clean shutdown by allowing the caller to revoke
274 * receive buffers. Target DMA must be stopped before using
275 * this API.
276 */
277int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
278 void **per_transfer_contextp,
279 u32 *bufferp);
280
281/*
282 * Support clean shutdown by allowing the caller to cancel
283 * pending sends. Target DMA must be stopped before using
284 * this API.
285 */
286int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
287 void **per_transfer_contextp,
288 u32 *bufferp,
289 unsigned int *nbytesp,
290 unsigned int *transfer_idp);
291
292void ath10k_ce_deinit(struct ce_state *ce_state);
293
294/*==================CE Interrupt Handlers====================*/
295void ath10k_ce_per_engine_service_any(struct ath10k *ar);
296void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
297void ath10k_ce_disable_interrupts(struct ath10k *ar);
298
299/* ce_attr.flags values */
300/* Use NonSnooping PCIe accesses? */
301#define CE_ATTR_NO_SNOOP 1
302
303/* Byte swap data words */
304#define CE_ATTR_BYTE_SWAP_DATA 2
305
306/* Swizzle descriptors? */
307#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
308
309/* no interrupt on copy completion */
310#define CE_ATTR_DIS_INTR 8
311
312/* Attributes of an instance of a Copy Engine */
313struct ce_attr {
314 /* CE_ATTR_* values */
315 unsigned int flags;
316
Kalle Valo5e3dd152013-06-12 20:52:10 +0300317 /* #entries in source ring - Must be a power of 2 */
318 unsigned int src_nentries;
319
320 /*
321 * Max source send size for this CE.
322 * This is also the minimum size of a destination buffer.
323 */
324 unsigned int src_sz_max;
325
326 /* #entries in destination ring - Must be a power of 2 */
327 unsigned int dest_nentries;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300328};
329
330/*
331 * When using sendlist_send to transfer multiple buffer fragments, the
332 * transfer context of each fragment, except last one, will be filled
333 * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
334 * each fragment done with send and the transfer context would be
335 * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
336 * status of a send completion.
337 */
338#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
339
340#define SR_BA_ADDRESS 0x0000
341#define SR_SIZE_ADDRESS 0x0004
342#define DR_BA_ADDRESS 0x0008
343#define DR_SIZE_ADDRESS 0x000c
344#define CE_CMD_ADDRESS 0x0018
345
346#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
347#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
348#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
349#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
350 (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
351 CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
352
353#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
354#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
355#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
356#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
357 (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
358 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
359#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
360 (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
361 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
362
363#define CE_CTRL1_DMAX_LENGTH_MSB 15
364#define CE_CTRL1_DMAX_LENGTH_LSB 0
365#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
366#define CE_CTRL1_DMAX_LENGTH_GET(x) \
367 (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
368#define CE_CTRL1_DMAX_LENGTH_SET(x) \
369 (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
370
371#define CE_CTRL1_ADDRESS 0x0010
372#define CE_CTRL1_HW_MASK 0x0007ffff
373#define CE_CTRL1_SW_MASK 0x0007ffff
374#define CE_CTRL1_HW_WRITE_MASK 0x00000000
375#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
376#define CE_CTRL1_RSTMASK 0xffffffff
377#define CE_CTRL1_RESET 0x00000080
378
379#define CE_CMD_HALT_STATUS_MSB 3
380#define CE_CMD_HALT_STATUS_LSB 3
381#define CE_CMD_HALT_STATUS_MASK 0x00000008
382#define CE_CMD_HALT_STATUS_GET(x) \
383 (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
384#define CE_CMD_HALT_STATUS_SET(x) \
385 (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
386#define CE_CMD_HALT_STATUS_RESET 0
387#define CE_CMD_HALT_MSB 0
388#define CE_CMD_HALT_MASK 0x00000001
389
390#define HOST_IE_COPY_COMPLETE_MSB 0
391#define HOST_IE_COPY_COMPLETE_LSB 0
392#define HOST_IE_COPY_COMPLETE_MASK 0x00000001
393#define HOST_IE_COPY_COMPLETE_GET(x) \
394 (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
395#define HOST_IE_COPY_COMPLETE_SET(x) \
396 (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
397#define HOST_IE_COPY_COMPLETE_RESET 0
398#define HOST_IE_ADDRESS 0x002c
399
400#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
401#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
402#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
403#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
404#define HOST_IS_COPY_COMPLETE_MASK 0x00000001
405#define HOST_IS_ADDRESS 0x0030
406
407#define MISC_IE_ADDRESS 0x0034
408
409#define MISC_IS_AXI_ERR_MASK 0x00000400
410
411#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
412#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
413#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
414#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
415#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
416
417#define MISC_IS_ADDRESS 0x0038
418
419#define SR_WR_INDEX_ADDRESS 0x003c
420
421#define DST_WR_INDEX_ADDRESS 0x0040
422
423#define CURRENT_SRRI_ADDRESS 0x0044
424
425#define CURRENT_DRRI_ADDRESS 0x0048
426
427#define SRC_WATERMARK_LOW_MSB 31
428#define SRC_WATERMARK_LOW_LSB 16
429#define SRC_WATERMARK_LOW_MASK 0xffff0000
430#define SRC_WATERMARK_LOW_GET(x) \
431 (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
432#define SRC_WATERMARK_LOW_SET(x) \
433 (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
434#define SRC_WATERMARK_LOW_RESET 0
435#define SRC_WATERMARK_HIGH_MSB 15
436#define SRC_WATERMARK_HIGH_LSB 0
437#define SRC_WATERMARK_HIGH_MASK 0x0000ffff
438#define SRC_WATERMARK_HIGH_GET(x) \
439 (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
440#define SRC_WATERMARK_HIGH_SET(x) \
441 (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
442#define SRC_WATERMARK_HIGH_RESET 0
443#define SRC_WATERMARK_ADDRESS 0x004c
444
445#define DST_WATERMARK_LOW_LSB 16
446#define DST_WATERMARK_LOW_MASK 0xffff0000
447#define DST_WATERMARK_LOW_SET(x) \
448 (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
449#define DST_WATERMARK_LOW_RESET 0
450#define DST_WATERMARK_HIGH_MSB 15
451#define DST_WATERMARK_HIGH_LSB 0
452#define DST_WATERMARK_HIGH_MASK 0x0000ffff
453#define DST_WATERMARK_HIGH_GET(x) \
454 (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
455#define DST_WATERMARK_HIGH_SET(x) \
456 (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
457#define DST_WATERMARK_HIGH_RESET 0
458#define DST_WATERMARK_ADDRESS 0x0050
459
460
461static inline u32 ath10k_ce_base_address(unsigned int ce_id)
462{
463 return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
464}
465
466#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
467 HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
468 HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
469 HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
470
471#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
472 MISC_IS_DST_ADDR_ERR_MASK | \
473 MISC_IS_SRC_LEN_ERR_MASK | \
474 MISC_IS_DST_MAX_LEN_VIO_MASK | \
475 MISC_IS_DST_RING_OVERFLOW_MASK | \
476 MISC_IS_SRC_RING_OVERFLOW_MASK)
477
478#define CE_SRC_RING_TO_DESC(baddr, idx) \
479 (&(((struct ce_desc *)baddr)[idx]))
480
481#define CE_DEST_RING_TO_DESC(baddr, idx) \
482 (&(((struct ce_desc *)baddr)[idx]))
483
484/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
485#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
486 (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
487
488#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
489
490#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8
491#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00
492#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
493 (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
494 CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
495#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
496
497#define CE_INTERRUPT_SUMMARY(ar) \
498 CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
499 ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
500 CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
501
502#endif /* _CE_H_ */