blob: b395d8db318a785e26755cb05f23ebf91e76dbc8 [file] [log] [blame]
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
Jon Mason926bd902010-07-15 08:47:26 +000010 * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000011 * Virtualized Server Adapter.
Jon Mason926bd902010-07-15 08:47:26 +000012 * Copyright(c) 2002-2010 Exar Corp.
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000013 ******************************************************************************/
14#ifndef VXGE_CONFIG_H
15#define VXGE_CONFIG_H
16#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000018
19#ifndef VXGE_CACHE_LINE_SIZE
20#define VXGE_CACHE_LINE_SIZE 128
21#endif
22
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000023#ifndef VXGE_ALIGN
24#define VXGE_ALIGN(adrs, size) \
25 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
26#endif
27
28#define VXGE_HW_MIN_MTU 68
29#define VXGE_HW_MAX_MTU 9600
30#define VXGE_HW_DEFAULT_MTU 1500
31
32#ifdef VXGE_DEBUG_ASSERT
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000033/**
34 * vxge_assert
35 * @test: C-condition to check
36 * @fmt: printf like format string
37 *
38 * This function implements traditional assert. By default assertions
39 * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in
40 * compilation
41 * time.
42 */
Jon Masonddd62722010-11-11 04:25:55 +000043#define vxge_assert(test) BUG_ON(!(test))
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000044#else
45#define vxge_assert(test)
46#endif /* end of VXGE_DEBUG_ASSERT */
47
48/**
Jon Masonddd62722010-11-11 04:25:55 +000049 * enum vxge_debug_level
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +000050 * @VXGE_NONE: debug disabled
51 * @VXGE_ERR: all errors going to be logged out
52 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
53 * going to be logged out. Very noisy.
54 *
55 * This enumeration going to be used to switch between different
56 * debug levels during runtime if DEBUG macro defined during
57 * compilation. If DEBUG macro not defined than code will be
58 * compiled out.
59 */
60enum vxge_debug_level {
61 VXGE_NONE = 0,
62 VXGE_TRACE = 1,
63 VXGE_ERR = 2
64};
65
66#define NULL_VPID 0xFFFFFFFF
67#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL
68#define VXGE_DEBUG_MODULE_MASK 0xffffffff
69#define VXGE_DEBUG_TRACE_MASK 0xffffffff
70#define VXGE_DEBUG_ERR_MASK 0xffffffff
71#define VXGE_DEBUG_MASK 0x000001ff
72#else
73#define VXGE_DEBUG_MODULE_MASK 0x20000000
74#define VXGE_DEBUG_TRACE_MASK 0x20000000
75#define VXGE_DEBUG_ERR_MASK 0x20000000
76#define VXGE_DEBUG_MASK 0x00000001
77#endif
78
79/*
80 * @VXGE_COMPONENT_LL: do debug for vxge link layer module
81 * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions
82 *
83 * This enumeration going to be used to distinguish modules
84 * or libraries during compilation and runtime. Makefile must declare
85 * VXGE_DEBUG_MODULE_MASK macro and set it to proper value.
86 */
87#define VXGE_COMPONENT_LL 0x20000000
88#define VXGE_COMPONENT_ALL 0xffffffff
89
90#define VXGE_HW_BASE_INF 100
91#define VXGE_HW_BASE_ERR 200
92#define VXGE_HW_BASE_BADCFG 300
93
94enum vxge_hw_status {
95 VXGE_HW_OK = 0,
96 VXGE_HW_FAIL = 1,
97 VXGE_HW_PENDING = 2,
98 VXGE_HW_COMPLETIONS_REMAIN = 3,
99
100 VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
101 VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
102
103 VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
104 VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
105 VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
106 VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
107 VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
108 VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
109 VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
110 VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
111 VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
112 VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
113 VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
114 VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
115 VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
116 VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
117 VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
118 VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
119 VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
120 VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
121 VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
122 VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
123 VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
124 VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
125
126 VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
127 VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
128 VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
129 VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
130 VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
131 VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6,
132 VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7,
133
134 VXGE_HW_EOF_TRACE_BUF = -1
135};
136
137/**
138 * enum enum vxge_hw_device_link_state - Link state enumeration.
139 * @VXGE_HW_LINK_NONE: Invalid link state.
140 * @VXGE_HW_LINK_DOWN: Link is down.
141 * @VXGE_HW_LINK_UP: Link is up.
142 *
143 */
144enum vxge_hw_device_link_state {
145 VXGE_HW_LINK_NONE,
146 VXGE_HW_LINK_DOWN,
147 VXGE_HW_LINK_UP
148};
149
150/**
151 * struct vxge_hw_device_date - Date Format
152 * @day: Day
153 * @month: Month
154 * @year: Year
155 * @date: Date in string format
156 *
157 * Structure for returning date
158 */
159
160#define VXGE_HW_FW_STRLEN 32
161struct vxge_hw_device_date {
162 u32 day;
163 u32 month;
164 u32 year;
165 char date[VXGE_HW_FW_STRLEN];
166};
167
168struct vxge_hw_device_version {
169 u32 major;
170 u32 minor;
171 u32 build;
172 char version[VXGE_HW_FW_STRLEN];
173};
174
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000175/**
176 * struct vxge_hw_fifo_config - Configuration of fifo.
177 * @enable: Is this fifo to be commissioned
178 * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors)
179 * blocks per queue.
180 * @max_frags: Max number of Tx buffers per TxDL (that is, per single
181 * transmit operation).
182 * No more than 256 transmit buffers can be specified.
183 * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size
184 * bytes. Setting @memblock_size to page size ensures
185 * by-page allocation of descriptors. 128K bytes is the
186 * maximum supported block size.
187 * @alignment_size: per Tx fragment DMA-able memory used to align transmit data
188 * (e.g., to align on a cache line).
189 * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL.
190 * Use 0 otherwise.
191 * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation,
192 * which generally improves latency of the host bridge operation
193 * (see PCI specification). For valid values please refer
194 * to struct vxge_hw_fifo_config{} in the driver sources.
195 * Configuration of all Titan fifos.
196 * Note: Valid (min, max) range for each attribute is specified in the body of
197 * the struct vxge_hw_fifo_config{} structure.
198 */
199struct vxge_hw_fifo_config {
200 u32 enable;
201#define VXGE_HW_FIFO_ENABLE 1
202#define VXGE_HW_FIFO_DISABLE 0
203
204 u32 fifo_blocks;
205#define VXGE_HW_MIN_FIFO_BLOCKS 2
206#define VXGE_HW_MAX_FIFO_BLOCKS 128
207
208 u32 max_frags;
209#define VXGE_HW_MIN_FIFO_FRAGS 1
210#define VXGE_HW_MAX_FIFO_FRAGS 256
211
212 u32 memblock_size;
213#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE
214#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072
215#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096
216
217 u32 alignment_size;
218#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0
219#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536
220#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE
221
222 u32 intr;
223#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1
224#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0
225#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0
226
227 u32 no_snoop_bits;
228#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0
229#define VXGE_HW_FIFO_NO_SNOOP_TXD 1
230#define VXGE_HW_FIFO_NO_SNOOP_FRM 2
231#define VXGE_HW_FIFO_NO_SNOOP_ALL 3
232#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0
233
234};
235/**
236 * struct vxge_hw_ring_config - Ring configurations.
237 * @enable: Is this ring to be commissioned
238 * @ring_blocks: Numbers of RxD blocks in the ring
239 * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer
240 * to Titan User Guide.
241 * @scatter_mode: Titan supports two receive scatter modes: A and B.
242 * For details please refer to Titan User Guide.
243 * @rx_timer_val: The number of 32ns periods that would be counted between two
244 * timer interrupts.
245 * @greedy_return: If Set it forces the device to return absolutely all RxD
246 * that are consumed and still on board when a timer interrupt
247 * triggers. If Clear, then if the device has already returned
248 * RxD before current timer interrupt trigerred and after the
249 * previous timer interrupt triggered, then the device is not
250 * forced to returned the rest of the consumed RxD that it has
251 * on board which account for a byte count less than the one
252 * programmed into PRC_CFG6.RXD_CRXDT field
253 * @rx_timer_ci: TBD
254 * @backoff_interval_us: Time (in microseconds), after which Titan
255 * tries to download RxDs posted by the host.
256 * Note that the "backoff" does not happen if host posts receive
257 * descriptors in the timely fashion.
258 * Ring configuration.
259 */
260struct vxge_hw_ring_config {
261 u32 enable;
262#define VXGE_HW_RING_ENABLE 1
263#define VXGE_HW_RING_DISABLE 0
264#define VXGE_HW_RING_DEFAULT 1
265
266 u32 ring_blocks;
267#define VXGE_HW_MIN_RING_BLOCKS 1
268#define VXGE_HW_MAX_RING_BLOCKS 128
269#define VXGE_HW_DEF_RING_BLOCKS 2
270
271 u32 buffer_mode;
272#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
273#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3
274#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5
275#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1
276
277 u32 scatter_mode;
278#define VXGE_HW_RING_SCATTER_MODE_A 0
279#define VXGE_HW_RING_SCATTER_MODE_B 1
280#define VXGE_HW_RING_SCATTER_MODE_C 2
281#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff
282
283 u64 rxds_limit;
284#define VXGE_HW_DEF_RING_RXDS_LIMIT 44
285};
286
287/**
288 * struct vxge_hw_vp_config - Configuration of virtual path
289 * @vp_id: Virtual Path Id
290 * @min_bandwidth: Minimum Guaranteed bandwidth
291 * @ring: See struct vxge_hw_ring_config{}.
292 * @fifo: See struct vxge_hw_fifo_config{}.
293 * @tti: Configuration of interrupt associated with Transmit.
294 * see struct vxge_hw_tim_intr_config();
295 * @rti: Configuration of interrupt associated with Receive.
296 * see struct vxge_hw_tim_intr_config();
297 * @mtu: mtu size used on this port.
298 * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to
299 * remove the VLAN tag from all received tagged frames that are not
300 * replicated at the internal L2 switch.
301 * 0 - Do not strip the VLAN tag.
302 * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are
303 * always placed into the RxDMA descriptor.
304 *
305 * This structure is used by the driver to pass the configuration parameters to
306 * configure Virtual Path.
307 */
308struct vxge_hw_vp_config {
309 u32 vp_id;
310
311#define VXGE_HW_VPATH_PRIORITY_MIN 0
312#define VXGE_HW_VPATH_PRIORITY_MAX 16
313#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0
314
315 u32 min_bandwidth;
316#define VXGE_HW_VPATH_BANDWIDTH_MIN 0
317#define VXGE_HW_VPATH_BANDWIDTH_MAX 100
318#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0
319
320 struct vxge_hw_ring_config ring;
321 struct vxge_hw_fifo_config fifo;
322 struct vxge_hw_tim_intr_config tti;
323 struct vxge_hw_tim_intr_config rti;
324
325 u32 mtu;
326#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU
327#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU
328#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff
329
330 u32 rpa_strip_vlan_tag;
331#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1
332#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0
333#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff
334
335};
336/**
337 * struct vxge_hw_device_config - Device configuration.
338 * @dma_blockpool_initial: Initial size of DMA Pool
339 * @dma_blockpool_max: Maximum blocks in DMA pool
340 * @intr_mode: Line, or MSI-X interrupt.
341 *
342 * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table).
343 * @rth_it_type: RTH IT table programming type
344 * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address
345 * @vp_config: Configuration for virtual paths
346 * @device_poll_millis: Specify the interval (in mulliseconds)
347 * to wait for register reads
348 *
349 * Titan configuration.
350 * Contains per-device configuration parameters, including:
351 * - stats sampling interval, etc.
352 *
353 * In addition, struct vxge_hw_device_config{} includes "subordinate"
354 * configurations, including:
355 * - fifos and rings;
356 * - MAC (done at firmware level).
357 *
358 * See Titan User Guide for more details.
359 * Note: Valid (min, max) range for each attribute is specified in the body of
360 * the struct vxge_hw_device_config{} structure. Please refer to the
361 * corresponding include file.
362 * See also: struct vxge_hw_tim_intr_config{}.
363 */
364struct vxge_hw_device_config {
365 u32 dma_blockpool_initial;
366 u32 dma_blockpool_max;
367#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
368#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
369#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
370#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
371
372#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
373
374 u32 intr_mode;
375#define VXGE_HW_INTR_MODE_IRQLINE 0
376#define VXGE_HW_INTR_MODE_MSIX 1
377#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
378
379#define VXGE_HW_INTR_MODE_DEF 0
380
381 u32 rth_en;
382#define VXGE_HW_RTH_DISABLE 0
383#define VXGE_HW_RTH_ENABLE 1
384#define VXGE_HW_RTH_DEFAULT 0
385
386 u32 rth_it_type;
387#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
388#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
389#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
390
391 u32 rts_mac_en;
392#define VXGE_HW_RTS_MAC_DISABLE 0
393#define VXGE_HW_RTS_MAC_ENABLE 1
394#define VXGE_HW_RTS_MAC_DEFAULT 0
395
396 struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
397
398 u32 device_poll_millis;
399#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
400#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
401#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
402
403};
404
405/**
406 * function vxge_uld_link_up_f - Link-Up callback provided by driver.
407 * @devh: HW device handle.
408 * Link-up notification callback provided by the driver.
409 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
410 *
411 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{},
412 * vxge_hw_driver_initialize().
413 */
414
415/**
416 * function vxge_uld_link_down_f - Link-Down callback provided by
417 * driver.
418 * @devh: HW device handle.
419 *
420 * Link-Down notification callback provided by the driver.
421 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
422 *
423 * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
424 * vxge_hw_driver_initialize().
425 */
426
427/**
428 * function vxge_uld_crit_err_f - Critical Error notification callback.
429 * @devh: HW device handle.
430 * (typically - at HW device iinitialization time).
431 * @type: Enumerated hw error, e.g.: double ECC.
432 * @serr_data: Titan status.
433 * @ext_data: Extended data. The contents depends on the @type.
434 *
435 * Link-Down notification callback provided by the driver.
436 * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}.
437 *
438 * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{},
439 * vxge_hw_driver_initialize().
440 */
441
442/**
443 * struct vxge_hw_uld_cbs - driver "slow-path" callbacks.
444 * @link_up: See vxge_uld_link_up_f{}.
445 * @link_down: See vxge_uld_link_down_f{}.
446 * @crit_err: See vxge_uld_crit_err_f{}.
447 *
448 * Driver slow-path (per-driver) callbacks.
449 * Implemented by driver and provided to HW via
450 * vxge_hw_driver_initialize().
451 * Note that these callbacks are not mandatory: HW will not invoke
452 * a callback if NULL is specified.
453 *
454 * See also: vxge_hw_driver_initialize().
455 */
456struct vxge_hw_uld_cbs {
457
458 void (*link_up)(struct __vxge_hw_device *devh);
459 void (*link_down)(struct __vxge_hw_device *devh);
460 void (*crit_err)(struct __vxge_hw_device *devh,
461 enum vxge_hw_event type, u64 ext_data);
462};
463
464/*
465 * struct __vxge_hw_blockpool_entry - Block private data structure
466 * @item: List header used to link.
467 * @length: Length of the block
468 * @memblock: Virtual address block
469 * @dma_addr: DMA Address of the block.
470 * @dma_handle: DMA handle of the block.
471 * @acc_handle: DMA acc handle
472 *
473 * Block is allocated with a header to put the blocks into list.
474 *
475 */
476struct __vxge_hw_blockpool_entry {
477 struct list_head item;
478 u32 length;
479 void *memblock;
480 dma_addr_t dma_addr;
481 struct pci_dev *dma_handle;
482 struct pci_dev *acc_handle;
483};
484
485/*
486 * struct __vxge_hw_blockpool - Block Pool
487 * @hldev: HW device
488 * @block_size: size of each block.
489 * @Pool_size: Number of blocks in the pool
490 * @pool_max: Maximum number of blocks above which to free additional blocks
491 * @req_out: Number of block requests with OS out standing
492 * @free_block_list: List of free blocks
493 *
494 * Block pool contains the DMA blocks preallocated.
495 *
496 */
497struct __vxge_hw_blockpool {
498 struct __vxge_hw_device *hldev;
499 u32 block_size;
500 u32 pool_size;
501 u32 pool_max;
502 u32 req_out;
503 struct list_head free_block_list;
504 struct list_head free_entry_list;
505};
506
507/*
508 * enum enum __vxge_hw_channel_type - Enumerated channel types.
509 * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel.
510 * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo.
511 * @VXGE_HW_CHANNEL_TYPE_RING: ring.
512 * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported
513 * (and recognized) channel types. Currently: 2.
514 *
515 * Enumerated channel types. Currently there are only two link-layer
516 * channels - Titan fifo and Titan ring. In the future the list will grow.
517 */
518enum __vxge_hw_channel_type {
519 VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0,
520 VXGE_HW_CHANNEL_TYPE_FIFO = 1,
521 VXGE_HW_CHANNEL_TYPE_RING = 2,
522 VXGE_HW_CHANNEL_TYPE_MAX = 3
523};
524
525/*
526 * struct __vxge_hw_channel
527 * @item: List item; used to maintain a list of open channels.
528 * @type: Channel type. See enum vxge_hw_channel_type{}.
529 * @devh: Device handle. HW device object that contains _this_ channel.
530 * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel.
531 * @length: Channel length. Currently allocated number of descriptors.
532 * The channel length "grows" when more descriptors get allocated.
533 * See _hw_mempool_grow.
534 * @reserve_arr: Reserve array. Contains descriptors that can be reserved
535 * by driver for the subsequent send or receive operation.
536 * See vxge_hw_fifo_txdl_reserve(),
537 * vxge_hw_ring_rxd_reserve().
538 * @reserve_ptr: Current pointer in the resrve array
539 * @reserve_top: Reserve top gives the maximum number of dtrs available in
540 * reserve array.
541 * @work_arr: Work array. Contains descriptors posted to the channel.
542 * Note that at any point in time @work_arr contains 3 types of
543 * descriptors:
544 * 1) posted but not yet consumed by Titan device;
545 * 2) consumed but not yet completed;
546 * 3) completed but not yet freed
547 * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free())
548 * @post_index: Post index. At any point in time points on the
549 * position in the channel, which'll contain next to-be-posted
550 * descriptor.
551 * @compl_index: Completion index. At any point in time points on the
552 * position in the channel, which will contain next
553 * to-be-completed descriptor.
554 * @free_arr: Free array. Contains completed descriptors that were freed
555 * (i.e., handed over back to HW) by driver.
556 * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free().
557 * @free_ptr: current pointer in free array
558 * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
559 * to store per-operation control information.
560 * @stats: Pointer to common statistics
561 * @userdata: Per-channel opaque (void*) user-defined context, which may be
562 * driver object, ULP connection, etc.
563 * Once channel is open, @userdata is passed back to user via
564 * vxge_hw_channel_callback_f.
565 *
566 * HW channel object.
567 *
568 * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag
569 */
570struct __vxge_hw_channel {
571 struct list_head item;
572 enum __vxge_hw_channel_type type;
573 struct __vxge_hw_device *devh;
574 struct __vxge_hw_vpath_handle *vph;
575 u32 length;
576 u32 vp_id;
577 void **reserve_arr;
578 u32 reserve_ptr;
579 u32 reserve_top;
580 void **work_arr;
581 u32 post_index ____cacheline_aligned;
582 u32 compl_index ____cacheline_aligned;
583 void **free_arr;
584 u32 free_ptr;
585 void **orig_arr;
586 u32 per_dtr_space;
587 void *userdata;
588 struct vxge_hw_common_reg __iomem *common_reg;
589 u32 first_vp_id;
590 struct vxge_hw_vpath_stats_sw_common_info *stats;
591
592} ____cacheline_aligned;
593
594/*
595 * struct __vxge_hw_virtualpath - Virtual Path
596 *
597 * @vp_id: Virtual path id
598 * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver
599 * @hldev: Hal device
600 * @vp_config: Virtual Path Config
601 * @vp_reg: VPATH Register map address in BAR0
602 * @vpmgmt_reg: VPATH_MGMT register map address
603 * @max_mtu: Max mtu that can be supported
604 * @vsport_number: vsport attached to this vpath
605 * @max_kdfc_db: Maximum kernel mode doorbells
606 * @max_nofl_db: Maximum non offload doorbells
607 * @tx_intr_num: Interrupt Number associated with the TX
608
609 * @ringh: Ring Queue
610 * @fifoh: FIFO Queue
611 * @vpath_handles: Virtual Path handles list
612 * @stats_block: Memory for DMAing stats
613 * @stats: Vpath statistics
614 *
615 * Virtual path structure to encapsulate the data related to a virtual path.
616 * Virtual paths are allocated by the HW upon getting configuration from the
617 * driver and inserted into the list of virtual paths.
618 */
619struct __vxge_hw_virtualpath {
620 u32 vp_id;
621
622 u32 vp_open;
623#define VXGE_HW_VP_NOT_OPEN 0
624#define VXGE_HW_VP_OPEN 1
625
626 struct __vxge_hw_device *hldev;
627 struct vxge_hw_vp_config *vp_config;
628 struct vxge_hw_vpath_reg __iomem *vp_reg;
629 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
630 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
631
632 u32 max_mtu;
633 u32 vsport_number;
634 u32 max_kdfc_db;
635 u32 max_nofl_db;
636
637 struct __vxge_hw_ring *____cacheline_aligned ringh;
638 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
639 struct list_head vpath_handles;
640 struct __vxge_hw_blockpool_entry *stats_block;
641 struct vxge_hw_vpath_stats_hw_info *hw_stats;
642 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
643 struct vxge_hw_vpath_stats_sw_info *sw_stats;
644};
645
646/*
647 * struct __vxge_hw_vpath_handle - List item to store callback information
648 * @item: List head to keep the item in linked list
649 * @vpath: Virtual path to which this item belongs
650 *
651 * This structure is used to store the callback information.
652 */
653struct __vxge_hw_vpath_handle{
654 struct list_head item;
655 struct __vxge_hw_virtualpath *vpath;
656};
657
658/*
659 * struct __vxge_hw_device
660 *
661 * HW device object.
662 */
663/**
664 * struct __vxge_hw_device - Hal device object
665 * @magic: Magic Number
666 * @device_id: PCI Device Id of the adapter
667 * @major_revision: PCI Device major revision
668 * @minor_revision: PCI Device minor revision
669 * @bar0: BAR0 virtual address.
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000670 * @pdev: Physical device handle
671 * @config: Confguration passed by the LL driver at initialization
672 * @link_state: Link state
673 *
674 * HW device object. Represents Titan adapter
675 */
676struct __vxge_hw_device {
677 u32 magic;
678#define VXGE_HW_DEVICE_MAGIC 0x12345678
679#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
680 u16 device_id;
681 u8 major_revision;
682 u8 minor_revision;
683 void __iomem *bar0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000684 struct pci_dev *pdev;
685 struct net_device *ndev;
686 struct vxge_hw_device_config config;
687 enum vxge_hw_device_link_state link_state;
688
689 struct vxge_hw_uld_cbs uld_callbacks;
690
691 u32 host_type;
692 u32 func_id;
693 u32 access_rights;
694#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
695#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
696#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
697 struct vxge_hw_legacy_reg __iomem *legacy_reg;
698 struct vxge_hw_toc_reg __iomem *toc_reg;
699 struct vxge_hw_common_reg __iomem *common_reg;
700 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
701 struct vxge_hw_srpcim_reg __iomem *srpcim_reg \
702 [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
703 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \
704 [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
705 struct vxge_hw_vpath_reg __iomem *vpath_reg \
706 [VXGE_HW_TITAN_VPATH_REG_SPACES];
707 u8 __iomem *kdfc;
708 u8 __iomem *usdc;
709 struct __vxge_hw_virtualpath virtual_paths \
710 [VXGE_HW_MAX_VIRTUAL_PATHS];
711 u64 vpath_assignments;
712 u64 vpaths_deployed;
713 u32 first_vp_id;
714 u64 tim_int_mask0[4];
715 u32 tim_int_mask1[4];
716
717 struct __vxge_hw_blockpool block_pool;
718 struct vxge_hw_device_stats stats;
719 u32 debug_module_mask;
720 u32 debug_level;
721 u32 level_err;
722 u32 level_trace;
723};
724
725#define VXGE_HW_INFO_LEN 64
726/**
727 * struct vxge_hw_device_hw_info - Device information
728 * @host_type: Host Type
729 * @func_id: Function Id
730 * @vpath_mask: vpath bit mask
731 * @fw_version: Firmware version
732 * @fw_date: Firmware Date
733 * @flash_version: Firmware version
734 * @flash_date: Firmware Date
735 * @mac_addrs: Mac addresses for each vpath
736 * @mac_addr_masks: Mac address masks for each vpath
737 *
738 * Returns the vpath mask that has the bits set for each vpath allocated
739 * for the driver and the first mac address for each vpath
740 */
741struct vxge_hw_device_hw_info {
742 u32 host_type;
743#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
744#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
745#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
746#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
747#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
748#define VXGE_HW_SR_VH_FUNCTION0 5
749#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
750#define VXGE_HW_VH_NORMAL_FUNCTION 7
751 u64 function_mode;
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -0700752#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
753#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000754#define VXGE_HW_FUNCTION_MODE_SRIOV 2
755#define VXGE_HW_FUNCTION_MODE_MRIOV 3
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -0700756#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
757#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
758#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
759#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
760#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
761#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
762#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
763
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000764 u32 func_id;
765 u64 vpath_mask;
766 struct vxge_hw_device_version fw_version;
767 struct vxge_hw_device_date fw_date;
768 struct vxge_hw_device_version flash_version;
769 struct vxge_hw_device_date flash_date;
770 u8 serial_number[VXGE_HW_INFO_LEN];
771 u8 part_number[VXGE_HW_INFO_LEN];
772 u8 product_desc[VXGE_HW_INFO_LEN];
773 u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
774 u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
775};
776
777/**
778 * struct vxge_hw_device_attr - Device memory spaces.
779 * @bar0: BAR0 virtual address.
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000780 * @pdev: PCI device object.
781 *
Sreenivasa Honnur7975d1e2009-07-01 21:12:23 +0000782 * Device memory spaces. Includes configuration, BAR0 etc. per device
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000783 * mapped memories. Also, includes a pointer to OS-specific PCI device object.
784 */
785struct vxge_hw_device_attr {
786 void __iomem *bar0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000787 struct pci_dev *pdev;
788 struct vxge_hw_uld_cbs uld_callbacks;
789};
790
791#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
792
793#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
794 if (i < 16) { \
795 m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
796 m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
797 } \
798 else { \
799 m1[0] = 0x80000000; \
800 m1[1] = 0x40000000; \
801 } \
802}
803
804#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
805 if (i < 16) { \
806 m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
807 m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
808 } \
809 else { \
810 m1[0] = 0; \
811 m1[1] = 0; \
812 } \
813}
814
815#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \
816 status = vxge_hw_mrpcim_stats_access(hldev, \
817 VXGE_HW_STATS_OP_READ, \
818 loc, \
819 offset, \
820 &val64); \
821 \
822 if (status != VXGE_HW_OK) \
823 return status; \
824}
825
826#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
827 status = __vxge_hw_vpath_stats_access(vpath, \
828 VXGE_HW_STATS_OP_READ, \
829 offset, \
830 &val64); \
831 if (status != VXGE_HW_OK) \
832 return status; \
833}
834
835/*
836 * struct __vxge_hw_ring - Ring channel.
837 * @channel: Channel "base" of this ring, the common part of all HW
838 * channels.
839 * @mempool: Memory pool, the pool from which descriptors get allocated.
840 * (See vxge_hw_mm.h).
841 * @config: Ring configuration, part of device configuration
842 * (see struct vxge_hw_device_config{}).
843 * @ring_length: Length of the ring
844 * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
845 * as per Titan User Guide.
846 * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec,
847 * 1-buffer mode descriptor is 32 byte long, etc.
848 * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep
849 * per-descriptor data (e.g., DMA handle for Solaris)
850 * @per_rxd_space: Per rxd space requested by driver
851 * @rxds_per_block: Number of descriptors per hardware-defined RxD
852 * block. Depends on the (1-, 3-, 5-) buffer mode.
853 * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal
854 * usage. Not to confuse with @rxd_priv_size.
855 * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
856 * @callback: Channel completion callback. HW invokes the callback when there
857 * are new completions on that channel. In many implementations
858 * the @callback executes in the hw interrupt context.
859 * @rxd_init: Channel's descriptor-initialize callback.
860 * See vxge_hw_ring_rxd_init_f{}.
861 * If not NULL, HW invokes the callback when opening
862 * the ring.
863 * @rxd_term: Channel's descriptor-terminate callback. If not NULL,
864 * HW invokes the callback when closing the corresponding channel.
865 * See also vxge_hw_channel_rxd_term_f{}.
866 * @stats: Statistics for ring
867 * Ring channel.
868 *
869 * Note: The structure is cache line aligned to better utilize
870 * CPU cache performance.
871 */
872struct __vxge_hw_ring {
873 struct __vxge_hw_channel channel;
874 struct vxge_hw_mempool *mempool;
875 struct vxge_hw_vpath_reg __iomem *vp_reg;
876 struct vxge_hw_common_reg __iomem *common_reg;
877 u32 ring_length;
878 u32 buffer_mode;
879 u32 rxd_size;
880 u32 rxd_priv_size;
881 u32 per_rxd_space;
882 u32 rxds_per_block;
883 u32 rxdblock_priv_size;
884 u32 cmpl_cnt;
885 u32 vp_id;
886 u32 doorbell_cnt;
887 u32 total_db_cnt;
888 u64 rxds_limit;
889
890 enum vxge_hw_status (*callback)(
891 struct __vxge_hw_ring *ringh,
892 void *rxdh,
893 u8 t_code,
894 void *userdata);
895
896 enum vxge_hw_status (*rxd_init)(
897 void *rxdh,
898 void *userdata);
899
900 void (*rxd_term)(
901 void *rxdh,
902 enum vxge_hw_rxd_state state,
903 void *userdata);
904
905 struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned;
906 struct vxge_hw_ring_config *config;
907} ____cacheline_aligned;
908
909/**
910 * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
911 * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
912 * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
913 * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
914 * device.
915 * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
916 * filling-in and posting later.
917 *
918 * Titan/HW descriptor states.
919 *
920 */
921enum vxge_hw_txdl_state {
922 VXGE_HW_TXDL_STATE_NONE = 0,
923 VXGE_HW_TXDL_STATE_AVAIL = 1,
924 VXGE_HW_TXDL_STATE_POSTED = 2,
925 VXGE_HW_TXDL_STATE_FREED = 3
926};
927/*
928 * struct __vxge_hw_fifo - Fifo.
929 * @channel: Channel "base" of this fifo, the common part of all HW
930 * channels.
931 * @mempool: Memory pool, from which descriptors get allocated.
932 * @config: Fifo configuration, part of device configuration
933 * (see struct vxge_hw_device_config{}).
934 * @interrupt_type: Interrupt type to be used
935 * @no_snoop_bits: See struct vxge_hw_fifo_config{}.
936 * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
937 * on TxDL please refer to Titan UG.
938 * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
939 * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv).
940 * @priv_size: Per-Tx descriptor space reserved for driver
941 * usage.
942 * @per_txdl_space: Per txdl private space for the driver
943 * @callback: Fifo completion callback. HW invokes the callback when there
944 * are new completions on that fifo. In many implementations
945 * the @callback executes in the hw interrupt context.
946 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
947 * HW invokes the callback when closing the corresponding fifo.
948 * See also vxge_hw_fifo_txdl_term_f{}.
949 * @stats: Statistics of this fifo
950 *
951 * Fifo channel.
952 * Note: The structure is cache line aligned.
953 */
954struct __vxge_hw_fifo {
955 struct __vxge_hw_channel channel;
956 struct vxge_hw_mempool *mempool;
957 struct vxge_hw_fifo_config *config;
958 struct vxge_hw_vpath_reg __iomem *vp_reg;
959 struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db;
960 u64 interrupt_type;
961 u32 no_snoop_bits;
962 u32 txdl_per_memblock;
963 u32 txdl_size;
964 u32 priv_size;
965 u32 per_txdl_space;
966 u32 vp_id;
967 u32 tx_intr_num;
968
969 enum vxge_hw_status (*callback)(
970 struct __vxge_hw_fifo *fifo_handle,
971 void *txdlh,
972 enum vxge_hw_fifo_tcode t_code,
973 void *userdata,
Benjamin LaHaiseff67df52009-08-04 10:21:03 +0000974 struct sk_buff ***skb_ptr,
975 int nr_skb,
976 int *more);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +0000977
978 void (*txdl_term)(
979 void *txdlh,
980 enum vxge_hw_txdl_state state,
981 void *userdata);
982
983 struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned;
984} ____cacheline_aligned;
985
986/*
987 * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data.
988 * @dma_addr: DMA (mapped) address of _this_ descriptor.
989 * @dma_handle: DMA handle used to map the descriptor onto device.
990 * @dma_offset: Descriptor's offset in the memory block. HW allocates
991 * descriptors in memory blocks (see struct vxge_hw_fifo_config{})
992 * Each memblock is a contiguous block of DMA-able memory.
993 * @frags: Total number of fragments (that is, contiguous data buffers)
994 * carried by this TxDL.
995 * @align_vaddr_start: Aligned virtual address start
996 * @align_vaddr: Virtual address of the per-TxDL area in memory used for
997 * alignement. Used to place one or more mis-aligned fragments
998 * @align_dma_addr: DMA address translated from the @align_vaddr.
999 * @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
1000 * @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
1001 * @align_dma_offset: The current offset into the @align_vaddr area.
1002 * Grows while filling the descriptor, gets reset.
1003 * @align_used_frags: Number of fragments used.
1004 * @alloc_frags: Total number of fragments allocated.
1005 * @unused: TODO
1006 * @next_txdl_priv: (TODO).
1007 * @first_txdp: (TODO).
1008 * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
1009 * TxDL list.
1010 * @txdlh: Corresponding txdlh to this TxDL.
1011 * @memblock: Pointer to the TxDL memory block or memory page.
1012 * on the next send operation.
1013 * @dma_object: DMA address and handle of the memory block that contains
1014 * the descriptor. This member is used only in the "checked"
1015 * version of the HW (to enforce certain assertions);
1016 * otherwise it gets compiled out.
1017 * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
1018 *
1019 * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA
1020 * information associated with the descriptor. Note that driver can ask HW
1021 * to allocate additional per-descriptor space for its own (driver-specific)
1022 * purposes.
1023 *
1024 * See also: struct vxge_hw_ring_rxd_priv{}.
1025 */
1026struct __vxge_hw_fifo_txdl_priv {
1027 dma_addr_t dma_addr;
1028 struct pci_dev *dma_handle;
1029 ptrdiff_t dma_offset;
1030 u32 frags;
1031 u8 *align_vaddr_start;
1032 u8 *align_vaddr;
1033 dma_addr_t align_dma_addr;
1034 struct pci_dev *align_dma_handle;
1035 struct pci_dev *align_dma_acch;
1036 ptrdiff_t align_dma_offset;
1037 u32 align_used_frags;
1038 u32 alloc_frags;
1039 u32 unused;
1040 struct __vxge_hw_fifo_txdl_priv *next_txdl_priv;
1041 struct vxge_hw_fifo_txd *first_txdp;
1042 void *memblock;
1043};
1044
1045/*
1046 * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
1047 * @control_0: Bits 0 to 7 - Doorbell type.
1048 * Bits 8 to 31 - Reserved.
1049 * Bits 32 to 39 - The highest TxD in this TxDL.
1050 * Bits 40 to 47 - Reserved.
1051 * Bits 48 to 55 - Reserved.
1052 * Bits 56 to 63 - No snoop flags.
1053 * @txdl_ptr: The starting location of the TxDL in host memory.
1054 *
1055 * Created by the host and written to the adapter via PIO to a Kernel Doorbell
1056 * FIFO. All non-offload doorbell wrapper fields must be written by the host as
1057 * part of a doorbell write. Consumed by the adapter but is not written by the
1058 * adapter.
1059 */
1060struct __vxge_hw_non_offload_db_wrapper {
1061 u64 control_0;
1062#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
1063#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
1064#define VXGE_HW_NODBW_TYPE_NODBW 0
1065
1066#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
1067#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
1068
1069#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
1070#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
1071#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
1072#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
1073
1074 u64 txdl_ptr;
1075};
1076
1077/*
1078 * TX Descriptor
1079 */
1080
1081/**
1082 * struct vxge_hw_fifo_txd - Transmit Descriptor
1083 * @control_0: Bits 0 to 6 - Reserved.
1084 * Bit 7 - List Ownership. This field should be initialized
1085 * to '1' by the driver before the transmit list pointer is
1086 * written to the adapter. This field will be set to '0' by the
1087 * adapter once it has completed transmitting the frame or frames in
1088 * the list. Note - This field is only valid in TxD0. Additionally,
1089 * for multi-list sequences, the driver should not release any
1090 * buffers until the ownership of the last list in the multi-list
1091 * sequence has been returned to the host.
1092 * Bits 8 to 11 - Reserved
1093 * Bits 12 to 15 - Transfer_Code. This field is only valid in
1094 * TxD0. It is used to describe the status of the transmit data
1095 * buffer transfer. This field is always overwritten by the
1096 * adapter, so this field may be initialized to any value.
1097 * Bits 16 to 17 - Host steering. This field allows the host to
1098 * override the selection of the physical transmit port.
1099 * Attention:
1100 * Normal sounds as if learned from the switch rather than from
1101 * the aggregation algorythms.
1102 * 00: Normal. Use Destination/MAC Address
1103 * lookup to determine the transmit port.
1104 * 01: Send on physical Port1.
1105 * 10: Send on physical Port0.
1106 * 11: Send on both ports.
1107 * Bits 18 to 21 - Reserved
1108 * Bits 22 to 23 - Gather_Code. This field is set by the host and
1109 * is used to describe how individual buffers comprise a frame.
1110 * 10: First descriptor of a frame.
1111 * 00: Middle of a multi-descriptor frame.
1112 * 01: Last descriptor of a frame.
1113 * 11: First and last descriptor of a frame (the entire frame
1114 * resides in a single buffer).
1115 * For multi-descriptor frames, the only valid gather code sequence
1116 * is {10, [00], 01}. In other words, the descriptors must be placed
1117 * in the list in the correct order.
1118 * Bits 24 to 27 - Reserved
1119 * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation
1120 * definition. Only valid in TxD0. This field allows the host to
1121 * indicate the Ethernet encapsulation of an outbound LSO packet.
1122 * 00 - classic mode (best guess)
1123 * 01 - LLC
1124 * 10 - SNAP
1125 * 11 - DIX
1126 * If "classic mode" is selected, the adapter will attempt to
1127 * decode the frame's Ethernet encapsulation by examining the L/T
1128 * field as follows:
1129 * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine
1130 * if packet is IPv4 or IPv6.
1131 * 0x8870 Jumbo-SNAP encoding.
1132 * 0x0800 IPv4 DIX encoding
1133 * 0x86DD IPv6 DIX encoding
1134 * others illegal encapsulation
1135 * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag.
1136 * Set to 1 to perform segmentation offload for TCP/UDP.
1137 * This field is valid only in TxD0.
1138 * Bits 31 to 33 - Reserved.
1139 * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size
1140 * This field is meaningful only when LSO_Control is non-zero.
1141 * When LSO_Control is set to TCP_LSO, the single (possibly large)
1142 * TCP segment described by this TxDL will be sent as a series of
1143 * TCP segments each of which contains no more than LSO_MSS
1144 * payload bytes.
1145 * When LSO_Control is set to UDP_LSO, the single (possibly large)
1146 * UDP datagram described by this TxDL will be sent as a series of
1147 * UDP datagrams each of which contains no more than LSO_MSS
1148 * payload bytes.
1149 * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP
1150 * or TCP payload, with the exception of the last, which will have
1151 * <= LSO_MSS bytes of payload.
1152 * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the
1153 * buffer to be read by the adapter. This field is written by the
1154 * host. A value of 0 is illegal.
1155 * Bits 32 to 63 - This value is written by the adapter upon
1156 * completion of a UDP or TCP LSO operation and indicates the number
1157 * of UDP or TCP payload bytes that were transmitted. 0x0000 will be
1158 * returned for any non-LSO operation.
1159 * @control_1: Bits 0 to 4 - Reserved.
1160 * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum
1161 * offload. This field is only valid in the first TxD of a frame.
1162 * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload.
1163 * This field is only valid in the first TxD of a frame (the TxD's
1164 * gather code must be 10 or 11). The driver should only set this
1165 * bit if it can guarantee that TCP is present.
1166 * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload.
1167 * This field is only valid in the first TxD of a frame (the TxD's
1168 * gather code must be 10 or 11). The driver should only set this
1169 * bit if it can guarantee that UDP is present.
1170 * Bits 8 to 14 - Reserved.
1171 * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to
1172 * instruct the adapter to insert the VLAN tag specified by the
1173 * Tx_VLAN_Tag field. This field is only valid in the first TxD of
1174 * a frame.
1175 * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag
1176 * to be inserted into the frame by the adapter (the first two bytes
1177 * of a VLAN tag are always 0x8100). This field is only valid if the
1178 * Tx_VLAN_Enable field is set to '1'.
1179 * Bits 32 to 33 - Reserved.
1180 * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt
1181 * number the frame associated with. This field is written by the
1182 * host. It is only valid in the first TxD of a frame.
1183 * Bits 40 to 42 - Reserved.
1184 * Bit 43 - Set to 1 to exclude the frame from bandwidth metering
1185 * functions. This field is valid only in the first TxD
1186 * of a frame.
1187 * Bits 44 to 45 - Reserved.
1188 * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to
1189 * generate an interrupt as soon as all of the frames in the list
1190 * have been transmitted. In order to have per-frame interrupts,
1191 * the driver should place a maximum of one frame per list. This
1192 * field is only valid in the first TxD of a frame.
1193 * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter
1194 * to count the frame toward the utilization interrupt specified in
1195 * the Tx_Int_Number field. This field is only valid in the first
1196 * TxD of a frame.
1197 * Bits 48 to 63 - Reserved.
1198 * @buffer_pointer: Buffer start address.
1199 * @host_control: Host_Control.Opaque 64bit data stored by driver inside the
1200 * Titan descriptor prior to posting the latter on the fifo
1201 * via vxge_hw_fifo_txdl_post().The %host_control is returned as is
1202 * to the driver with each completed descriptor.
1203 *
1204 * Transmit descriptor (TxD).Fifo descriptor contains configured number
1205 * (list) of TxDs. * For more details please refer to Titan User Guide,
1206 * Section 5.4.2 "Transmit Descriptor (TxD) Format".
1207 */
1208struct vxge_hw_fifo_txd {
1209 u64 control_0;
1210#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1211
1212#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1213#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1214#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
1215
1216
1217#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
1218#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
1219#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
1220
1221
1222#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
1223
1224#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
1225
1226#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
1227
1228 u64 control_1;
1229#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
1230#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
1231#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
1232#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
1233
1234#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
1235
1236#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
1237
1238#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
1239#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
1240
1241 u64 buffer_pointer;
1242
1243 u64 host_control;
1244};
1245
1246/**
1247 * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
1248 * @host_control: This field is exclusively for host use and is "readonly"
1249 * from the adapter's perspective.
1250 * @control_0:Bits 0 to 6 - RTH_Bucket get
1251 * Bit 7 - Own Descriptor ownership bit. This bit is set to 1
1252 * by the host, and is set to 0 by the adapter.
1253 * 0 - Host owns RxD and buffer.
1254 * 1 - The adapter owns RxD and buffer.
1255 * Bit 8 - Fast_Path_Eligible When set, indicates that the
1256 * received frame meets all of the criteria for fast path processing.
1257 * The required criteria are as follows:
1258 * !SYN &
1259 * (Transfer_Code == "Transfer OK") &
1260 * (!Is_IP_Fragment) &
1261 * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) |
1262 * (Is_IPv6)) &
1263 * ((Is_TCP & computed_L4_checksum == 0xFFFF) |
1264 * (Is_UDP & (computed_L4_checksum == 0xFFFF |
1265 * computed _L4_checksum == 0x0000)))
1266 * (same meaning for all RxD buffer modes)
1267 * Bit 9 - L3 Checksum Correct
1268 * Bit 10 - L4 Checksum Correct
1269 * Bit 11 - Reserved
1270 * Bit 12 to 15 - This field is written by the adapter. It is
1271 * used to report the status of the frame transfer to the host.
1272 * 0x0 - Transfer OK
1273 * 0x4 - RDA Failure During Transfer
1274 * 0x5 - Unparseable Packet, such as unknown IPv6 header.
1275 * 0x6 - Frame integrity error (FCS or ECC).
1276 * 0x7 - Buffer Size Error. The provided buffer(s) were not
1277 * appropriately sized and data loss occurred.
1278 * 0x8 - Internal ECC Error. RxD corrupted.
1279 * 0x9 - IPv4 Checksum error
1280 * 0xA - TCP/UDP Checksum error
1281 * 0xF - Unknown Error or Multiple Error. Indicates an
1282 * unknown problem or that more than one of transfer codes is set.
1283 * Bit 16 - SYN The adapter sets this field to indicate that
1284 * the incoming frame contained a TCP segment with its SYN bit
1285 * set and its ACK bit NOT set. (same meaning for all RxD buffer
1286 * modes)
1287 * Bit 17 - Is ICMP
1288 * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the
1289 * Socket Pair Direct Match Table and the frame was steered based
1290 * on SPDM.
1291 * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the
1292 * Indirection Table and the frame was steered based on hash
1293 * indirection.
1294 * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash
1295 * type) that was used to calculate the hash.
1296 * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN
1297 * tagged.
1298 * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation
1299 * of the received frame.
1300 * 0x0 - Ethernet DIX
1301 * 0x1 - LLC
1302 * 0x2 - SNAP (includes Jumbo-SNAP)
1303 * 0x3 - IPX
1304 * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet.
1305 * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet.
1306 * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented
1307 * IP packet.
1308 * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment.
1309 * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message.
1310 * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that
1311 * arrived with the frame. If the resulting computed IPv4 header
1312 * checksum for the frame did not produce the expected 0xFFFF value,
1313 * then the transfer code would be set to 0x9.
1314 * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that
1315 * arrived with the frame. If the resulting computed TCP/UDP checksum
1316 * for the frame did not produce the expected 0xFFFF value, then the
1317 * transfer code would be set to 0xA.
1318 * @control_1:Bits 0 to 1 - Reserved
1319 * Bits 2 to 15 - Buffer0_Size.This field is set by the host and
1320 * eventually overwritten by the adapter. The host writes the
1321 * available buffer size in bytes when it passes the descriptor to
1322 * the adapter. When a frame is delivered the host, the adapter
1323 * populates this field with the number of bytes written into the
1324 * buffer. The largest supported buffer is 16, 383 bytes.
1325 * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if
1326 * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero.
1327 * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion
1328 * of the VLAN tag, if one was detected by the adapter. This field is
1329 * populated even if VLAN-tag stripping is enabled.
1330 * @buffer0_ptr: Pointer to buffer. This field is populated by the driver.
1331 *
1332 * One buffer mode RxD for ring structure
1333 */
1334struct vxge_hw_ring_rxd_1 {
1335 u64 host_control;
1336 u64 control_0;
1337#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
1338
1339#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
1340
1341#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
1342
1343#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
1344
1345#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
1346
1347#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
1348#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
1349
1350#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
1351
1352#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
1353
1354#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
1355
1356#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
1357
1358#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
1359
1360#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
1361
1362#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
1363
1364#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
1365
1366#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
1367
1368#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
1369
1370#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
1371
1372 u64 control_1;
1373
1374#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
1375#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
1376#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
1377
1378#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
1379
1380#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
1381
1382 u64 buffer0_ptr;
1383};
1384
1385enum vxge_hw_rth_algoritms {
1386 RTH_ALG_JENKINS = 0,
1387 RTH_ALG_MS_RSS = 1,
1388 RTH_ALG_CRC32C = 2
1389};
1390
1391/**
1392 * struct vxge_hw_rth_hash_types - RTH hash types.
1393 * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4
1394 * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4
1395 * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6
1396 * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6
1397 * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex
1398 * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex
1399 *
1400 * Used to pass RTH hash types to rts_rts_set.
1401 *
1402 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1403 */
1404struct vxge_hw_rth_hash_types {
Jon Mason47f01db2010-11-11 04:25:53 +00001405 u8 hash_type_tcpipv4_en:1,
1406 hash_type_ipv4_en:1,
1407 hash_type_tcpipv6_en:1,
1408 hash_type_ipv6_en:1,
1409 hash_type_tcpipv6ex_en:1,
1410 hash_type_ipv6ex_en:1;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001411};
1412
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001413void vxge_hw_device_debug_set(
1414 struct __vxge_hw_device *devh,
1415 enum vxge_debug_level level,
1416 u32 mask);
1417
1418u32
1419vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
1420
1421u32
1422vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
1423
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001424/**
1425 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
1426 * @buf_mode: Buffer mode (1, 3 or 5)
1427 *
1428 * This function returns the size of RxD for given buffer mode
1429 */
1430static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode)
1431{
1432 return sizeof(struct vxge_hw_ring_rxd_1);
1433}
1434
1435/**
1436 * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block.
1437 * @buf_mode: Buffer mode (1 buffer mode only)
1438 *
1439 * This function returns the number of RxD for RxD block for given buffer mode
1440 */
1441static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode)
1442{
1443 return (u32)((VXGE_HW_BLOCK_SIZE-16) /
1444 sizeof(struct vxge_hw_ring_rxd_1));
1445}
1446
1447/**
1448 * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
1449 * @rxdh: Descriptor handle.
1450 * @dma_pointer: DMA address of a single receive buffer this descriptor
1451 * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
1452 * the receive buffer should be already mapped to the device
1453 * @size: Size of the receive @dma_pointer buffer.
1454 *
1455 * Prepare 1-buffer-mode Rx descriptor for posting
1456 * (via vxge_hw_ring_rxd_post()).
1457 *
1458 * This inline helper-function does not return any parameters and always
1459 * succeeds.
1460 *
1461 */
1462static inline
1463void vxge_hw_ring_rxd_1b_set(
1464 void *rxdh,
1465 dma_addr_t dma_pointer,
1466 u32 size)
1467{
1468 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1469 rxdp->buffer0_ptr = dma_pointer;
1470 rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
1471 rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
1472}
1473
1474/**
1475 * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf
1476 * descriptor.
1477 * @vpath_handle: Virtual Path handle.
1478 * @rxdh: Descriptor handle.
1479 * @dma_pointer: DMA address of a single receive buffer this descriptor
1480 * carries. Returned by HW.
1481 * @pkt_length: Length (in bytes) of the data in the buffer pointed by
1482 *
1483 * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
1484 * This inline helper-function uses completed descriptor to populate receive
1485 * buffer pointer and other "out" parameters. The function always succeeds.
1486 *
1487 */
1488static inline
1489void vxge_hw_ring_rxd_1b_get(
1490 struct __vxge_hw_ring *ring_handle,
1491 void *rxdh,
1492 u32 *pkt_length)
1493{
1494 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1495
1496 *pkt_length =
1497 (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1);
1498}
1499
1500/**
1501 * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with
1502 * a completed receive descriptor for 1b mode.
1503 * @vpath_handle: Virtual Path handle.
1504 * @rxdh: Descriptor handle.
1505 * @rxd_info: Descriptor information
1506 *
1507 * Retrieve extended information associated with a completed receive descriptor.
1508 *
1509 */
1510static inline
1511void vxge_hw_ring_rxd_1b_info_get(
1512 struct __vxge_hw_ring *ring_handle,
1513 void *rxdh,
1514 struct vxge_hw_ring_rxd_info *rxd_info)
1515{
1516
1517 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1518 rxd_info->syn_flag =
1519 (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0);
1520 rxd_info->is_icmp =
1521 (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0);
1522 rxd_info->fast_path_eligible =
1523 (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0);
1524 rxd_info->l3_cksum_valid =
1525 (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0);
1526 rxd_info->l3_cksum =
1527 (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0);
1528 rxd_info->l4_cksum_valid =
1529 (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0);
1530 rxd_info->l4_cksum =
Joe Perchesa419aef2009-08-18 11:18:35 -07001531 (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001532 rxd_info->frame =
1533 (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0);
1534 rxd_info->proto =
1535 (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0);
1536 rxd_info->is_vlan =
1537 (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0);
1538 rxd_info->vlan =
1539 (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1);
1540 rxd_info->rth_bucket =
1541 (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0);
1542 rxd_info->rth_it_hit =
1543 (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0);
1544 rxd_info->rth_spdm_hit =
1545 (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0);
1546 rxd_info->rth_hash_type =
1547 (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0);
1548 rxd_info->rth_value =
1549 (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1);
1550}
1551
1552/**
1553 * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data
1554 * of 1b mode 3b mode ring.
1555 * @rxdh: Descriptor handle.
1556 *
1557 * Returns: private driver info associated with the descriptor.
1558 * driver requests per-descriptor space via vxge_hw_ring_attr.
1559 *
1560 */
1561static inline void *vxge_hw_ring_rxd_private_get(void *rxdh)
1562{
1563 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1564 return (void *)(size_t)rxdp->host_control;
1565}
1566
1567/**
1568 * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum.
1569 * @txdlh: Descriptor handle.
1570 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
1571 * and/or TCP and/or UDP.
1572 *
1573 * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit
1574 * descriptor.
1575 * This API is part of the preparation of the transmit descriptor for posting
1576 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1577 * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1578 * and vxge_hw_fifo_txdl_buffer_set().
1579 * All these APIs fill in the fields of the fifo descriptor,
1580 * in accordance with the Titan specification.
1581 *
1582 */
1583static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits)
1584{
1585 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1586 txdp->control_1 |= cksum_bits;
1587}
1588
1589/**
1590 * vxge_hw_fifo_txdl_mss_set - Set MSS.
1591 * @txdlh: Descriptor handle.
1592 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
1593 * driver, which in turn inserts the MSS into the @txdlh.
1594 *
1595 * This API is part of the preparation of the transmit descriptor for posting
1596 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1597 * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(),
1598 * and vxge_hw_fifo_txdl_cksum_set_bits().
1599 * All these APIs fill in the fields of the fifo descriptor,
1600 * in accordance with the Titan specification.
1601 *
1602 */
1603static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss)
1604{
1605 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1606
1607 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN;
1608 txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss);
1609}
1610
1611/**
1612 * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag.
1613 * @txdlh: Descriptor handle.
1614 * @vlan_tag: 16bit VLAN tag.
1615 *
1616 * Insert VLAN tag into specified transmit descriptor.
1617 * The actual insertion of the tag into outgoing frame is done by the hardware.
1618 */
1619static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag)
1620{
1621 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1622
1623 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE;
1624 txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag);
1625}
1626
1627/**
1628 * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data.
1629 * @txdlh: Descriptor handle.
1630 *
1631 * Retrieve per-descriptor private data.
1632 * Note that driver requests per-descriptor space via
1633 * struct vxge_hw_fifo_attr passed to
1634 * vxge_hw_vpath_open().
1635 *
1636 * Returns: private driver data associated with the descriptor.
1637 */
1638static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh)
1639{
1640 struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh;
1641
1642 return (void *)(size_t)txdp->host_control;
1643}
1644
1645/**
1646 * struct vxge_hw_ring_attr - Ring open "template".
1647 * @callback: Ring completion callback. HW invokes the callback when there
1648 * are new completions on that ring. In many implementations
1649 * the @callback executes in the hw interrupt context.
1650 * @rxd_init: Ring's descriptor-initialize callback.
1651 * See vxge_hw_ring_rxd_init_f{}.
1652 * If not NULL, HW invokes the callback when opening
1653 * the ring.
1654 * @rxd_term: Ring's descriptor-terminate callback. If not NULL,
1655 * HW invokes the callback when closing the corresponding ring.
1656 * See also vxge_hw_ring_rxd_term_f{}.
1657 * @userdata: User-defined "context" of _that_ ring. Passed back to the
1658 * user as one of the @callback, @rxd_init, and @rxd_term arguments.
1659 * @per_rxd_space: If specified (i.e., greater than zero): extra space
1660 * reserved by HW per each receive descriptor.
1661 * Can be used to store
1662 * and retrieve on completion, information specific
1663 * to the driver.
1664 *
1665 * Ring open "template". User fills the structure with ring
1666 * attributes and passes it to vxge_hw_vpath_open().
1667 */
1668struct vxge_hw_ring_attr {
1669 enum vxge_hw_status (*callback)(
1670 struct __vxge_hw_ring *ringh,
1671 void *rxdh,
1672 u8 t_code,
1673 void *userdata);
1674
1675 enum vxge_hw_status (*rxd_init)(
1676 void *rxdh,
1677 void *userdata);
1678
1679 void (*rxd_term)(
1680 void *rxdh,
1681 enum vxge_hw_rxd_state state,
1682 void *userdata);
1683
1684 void *userdata;
1685 u32 per_rxd_space;
1686};
1687
1688/**
1689 * function vxge_hw_fifo_callback_f - FIFO callback.
1690 * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed
1691 * descriptors.
1692 * @txdlh: First completed descriptor.
1693 * @txdl_priv: Pointer to per txdl space allocated
1694 * @t_code: Transfer code, as per Titan User Guide.
1695 * Returned by HW.
1696 * @host_control: Opaque 64bit data stored by driver inside the Titan
1697 * descriptor prior to posting the latter on the fifo
1698 * via vxge_hw_fifo_txdl_post(). The @host_control is returned
1699 * as is to the driver with each completed descriptor.
1700 * @userdata: Opaque per-fifo data specified at fifo open
1701 * time, via vxge_hw_vpath_open().
1702 *
1703 * Fifo completion callback (type declaration). A single per-fifo
1704 * callback is specified at fifo open time, via
1705 * vxge_hw_vpath_open(). Typically gets called as part of the processing
1706 * of the Interrupt Service Routine.
1707 *
1708 * Fifo callback gets called by HW if, and only if, there is at least
1709 * one new completion on a given fifo. Upon processing the first @txdlh driver
1710 * is _supposed_ to continue consuming completions using:
1711 * - vxge_hw_fifo_txdl_next_completed()
1712 *
1713 * Note that failure to process new completions in a timely fashion
1714 * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition.
1715 *
1716 * Non-zero @t_code means failure to process transmit descriptor.
1717 *
1718 * In the "transmit" case the failure could happen, for instance, when the
1719 * link is down, in which case Titan completes the descriptor because it
1720 * is not able to send the data out.
1721 *
1722 * For details please refer to Titan User Guide.
1723 *
1724 * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}.
1725 */
1726/**
1727 * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback.
1728 * @txdlh: First completed descriptor.
1729 * @txdl_priv: Pointer to per txdl space allocated
1730 * @state: One of the enum vxge_hw_txdl_state{} enumerated states.
1731 * @userdata: Per-fifo user data (a.k.a. context) specified at
1732 * fifo open time, via vxge_hw_vpath_open().
1733 *
1734 * Terminate descriptor callback. Unless NULL is specified in the
1735 * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()),
1736 * HW invokes the callback as part of closing fifo, prior to
1737 * de-allocating the ring and associated data structures
1738 * (including descriptors).
1739 * driver should utilize the callback to (for instance) unmap
1740 * and free DMA data buffers associated with the posted (state =
1741 * VXGE_HW_TXDL_STATE_POSTED) descriptors,
1742 * as well as other relevant cleanup functions.
1743 *
1744 * See also: struct vxge_hw_fifo_attr{}
1745 */
1746/**
1747 * struct vxge_hw_fifo_attr - Fifo open "template".
1748 * @callback: Fifo completion callback. HW invokes the callback when there
1749 * are new completions on that fifo. In many implementations
1750 * the @callback executes in the hw interrupt context.
1751 * @txdl_term: Fifo's descriptor-terminate callback. If not NULL,
1752 * HW invokes the callback when closing the corresponding fifo.
1753 * See also vxge_hw_fifo_txdl_term_f{}.
1754 * @userdata: User-defined "context" of _that_ fifo. Passed back to the
1755 * user as one of the @callback, and @txdl_term arguments.
1756 * @per_txdl_space: If specified (i.e., greater than zero): extra space
1757 * reserved by HW per each transmit descriptor. Can be used to
1758 * store, and retrieve on completion, information specific
1759 * to the driver.
1760 *
1761 * Fifo open "template". User fills the structure with fifo
1762 * attributes and passes it to vxge_hw_vpath_open().
1763 */
1764struct vxge_hw_fifo_attr {
1765
1766 enum vxge_hw_status (*callback)(
1767 struct __vxge_hw_fifo *fifo_handle,
1768 void *txdlh,
1769 enum vxge_hw_fifo_tcode t_code,
1770 void *userdata,
Benjamin LaHaiseff67df52009-08-04 10:21:03 +00001771 struct sk_buff ***skb_ptr,
1772 int nr_skb, int *more);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001773
1774 void (*txdl_term)(
1775 void *txdlh,
1776 enum vxge_hw_txdl_state state,
1777 void *userdata);
1778
1779 void *userdata;
1780 u32 per_txdl_space;
1781};
1782
1783/**
1784 * struct vxge_hw_vpath_attr - Attributes of virtual path
1785 * @vp_id: Identifier of Virtual Path
1786 * @ring_attr: Attributes of ring for non-offload receive
1787 * @fifo_attr: Attributes of fifo for non-offload transmit
1788 *
1789 * Attributes of virtual path. This structure is passed as parameter
1790 * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo.
1791 */
1792struct vxge_hw_vpath_attr {
1793 u32 vp_id;
1794 struct vxge_hw_ring_attr ring_attr;
1795 struct vxge_hw_fifo_attr fifo_attr;
1796};
1797
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001798enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
1799 void __iomem *bar0,
1800 struct vxge_hw_device_hw_info *hw_info);
1801
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001802enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
1803 struct vxge_hw_device_config *device_config);
1804
1805/**
1806 * vxge_hw_device_link_state_get - Get link state.
1807 * @devh: HW device handle.
1808 *
1809 * Get link state.
1810 * Returns: link state.
1811 */
1812static inline
1813enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
1814 struct __vxge_hw_device *devh)
1815{
1816 return devh->link_state;
1817}
1818
1819void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
1820
1821const u8 *
1822vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh);
1823
1824u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
1825
1826const u8 *
1827vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
1828
1829enum vxge_hw_status __devinit vxge_hw_device_initialize(
1830 struct __vxge_hw_device **devh,
1831 struct vxge_hw_device_attr *attr,
1832 struct vxge_hw_device_config *device_config);
1833
1834enum vxge_hw_status vxge_hw_device_getpause_data(
1835 struct __vxge_hw_device *devh,
1836 u32 port,
1837 u32 *tx,
1838 u32 *rx);
1839
1840enum vxge_hw_status vxge_hw_device_setpause_data(
1841 struct __vxge_hw_device *devh,
1842 u32 port,
1843 u32 tx,
1844 u32 rx);
1845
1846static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
1847 unsigned long size,
1848 struct pci_dev **p_dmah,
1849 struct pci_dev **p_dma_acch)
1850{
1851 gfp_t flags;
1852 void *vaddr;
1853 unsigned long misaligned = 0;
Sreenivasa Honnur47231f72010-03-28 22:09:47 +00001854 int realloc_flag = 0;
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001855 *p_dma_acch = *p_dmah = NULL;
1856
1857 if (in_interrupt())
1858 flags = GFP_ATOMIC | GFP_DMA;
1859 else
1860 flags = GFP_KERNEL | GFP_DMA;
Sreenivasa Honnur47231f72010-03-28 22:09:47 +00001861realloc:
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001862 vaddr = kmalloc((size), flags);
1863 if (vaddr == NULL)
1864 return vaddr;
Sreenivasa Honnur47231f72010-03-28 22:09:47 +00001865 misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001866 VXGE_CACHE_LINE_SIZE);
Sreenivasa Honnur47231f72010-03-28 22:09:47 +00001867 if (realloc_flag)
1868 goto out;
1869
1870 if (misaligned) {
1871 /* misaligned, free current one and try allocating
1872 * size + VXGE_CACHE_LINE_SIZE memory
1873 */
1874 kfree((void *) vaddr);
1875 size += VXGE_CACHE_LINE_SIZE;
1876 realloc_flag = 1;
1877 goto realloc;
1878 }
1879out:
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001880 *(unsigned long *)p_dma_acch = misaligned;
1881 vaddr = (void *)((u8 *)vaddr + misaligned);
1882 return vaddr;
1883}
1884
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001885/*
1886 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1887 */
1888static inline void*
1889__vxge_hw_mempool_item_priv(
1890 struct vxge_hw_mempool *mempool,
1891 u32 memblock_idx,
1892 void *item,
1893 u32 *memblock_item_idx)
1894{
1895 ptrdiff_t offset;
1896 void *memblock = mempool->memblocks_arr[memblock_idx];
1897
1898
1899 offset = (u32)((u8 *)item - (u8 *)memblock);
1900 vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size);
1901
1902 (*memblock_item_idx) = (u32) offset / mempool->item_size;
1903 vxge_assert((*memblock_item_idx) < mempool->items_per_memblock);
1904
1905 return (u8 *)mempool->memblocks_priv_arr[memblock_idx] +
1906 (*memblock_item_idx) * mempool->items_priv_size;
1907}
1908
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001909/*
1910 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
1911 * for the fifo.
1912 * @fifo: Fifo
1913 * @txdp: Poniter to a TxD
1914 */
1915static inline struct __vxge_hw_fifo_txdl_priv *
1916__vxge_hw_fifo_txdl_priv(
1917 struct __vxge_hw_fifo *fifo,
1918 struct vxge_hw_fifo_txd *txdp)
1919{
1920 return (struct __vxge_hw_fifo_txdl_priv *)
1921 (((char *)((ulong)txdp->host_control)) +
1922 fifo->per_txdl_space);
1923}
1924
1925enum vxge_hw_status vxge_hw_vpath_open(
1926 struct __vxge_hw_device *devh,
1927 struct vxge_hw_vpath_attr *attr,
1928 struct __vxge_hw_vpath_handle **vpath_handle);
1929
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001930enum vxge_hw_status vxge_hw_vpath_close(
1931 struct __vxge_hw_vpath_handle *vpath_handle);
1932
1933enum vxge_hw_status
1934vxge_hw_vpath_reset(
1935 struct __vxge_hw_vpath_handle *vpath_handle);
1936
1937enum vxge_hw_status
1938vxge_hw_vpath_recover_from_reset(
1939 struct __vxge_hw_vpath_handle *vpath_handle);
1940
1941void
1942vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp);
1943
1944enum vxge_hw_status
1945vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh);
1946
1947enum vxge_hw_status vxge_hw_vpath_mtu_set(
1948 struct __vxge_hw_vpath_handle *vpath_handle,
1949 u32 new_mtu);
1950
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001951void
1952vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
1953
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001954
1955#ifndef readq
1956static inline u64 readq(void __iomem *addr)
1957{
1958 u64 ret = 0;
1959 ret = readl(addr + 4);
1960 ret <<= 32;
1961 ret |= readl(addr);
1962
1963 return ret;
1964}
1965#endif
1966
1967#ifndef writeq
1968static inline void writeq(u64 val, void __iomem *addr)
1969{
1970 writel((u32) (val), addr);
1971 writel((u32) (val >> 32), (addr + 4));
1972}
1973#endif
1974
1975static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
1976{
1977 writel(val, addr + 4);
1978}
1979
1980static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
1981{
1982 writel(val, addr);
1983}
1984
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001985enum vxge_hw_status
1986vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
1987
1988enum vxge_hw_status
Sreenivasa Honnurfa41fd12009-10-05 01:56:35 +00001989vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
stephen hemminger42821a52010-10-21 07:50:53 +00001990
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001991/**
Jon Masonddd62722010-11-11 04:25:55 +00001992 * vxge_debug_ll
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00001993 * @level: level of debug verbosity.
1994 * @mask: mask for the debug
1995 * @buf: Circular buffer for tracing
1996 * @fmt: printf like format string
1997 *
1998 * Provides logging facilities. Can be customized on per-module
1999 * basis or/and with debug levels. Input parameters, except
2000 * module and level, are the same as posix printf. This function
2001 * may be compiled out if DEBUG macro was never defined.
2002 * See also: enum vxge_debug_level{}.
2003 */
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002004#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
Jon Masonddd62722010-11-11 04:25:55 +00002005#define vxge_debug_ll(level, mask, fmt, ...) do { \
2006 if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
2007 (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
2008 if ((mask & VXGE_DEBUG_MASK) == mask) \
2009 printk(fmt "\n", __VA_ARGS__); \
2010} while (0)
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002011#else
2012#define vxge_debug_ll(level, mask, fmt, ...)
2013#endif
2014
2015enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
2016 struct __vxge_hw_vpath_handle **vpath_handles,
2017 u32 vpath_count,
2018 u8 *mtable,
2019 u8 *itable,
2020 u32 itable_size);
2021
2022enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2023 struct __vxge_hw_vpath_handle *vpath_handle,
2024 enum vxge_hw_rth_algoritms algorithm,
2025 struct vxge_hw_rth_hash_types *hash_type,
2026 u16 bucket_size);
2027
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -07002028enum vxge_hw_status
2029__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
Jon Mason4d2a5b42010-11-11 04:25:54 +00002030
2031#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
2032#define VXGE_HW_MAX_POLLING_COUNT 100
2033
2034int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
2035
2036void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
Ramkrishna Vepa40a3a912009-04-01 18:14:40 +00002037#endif