blob: 8dc2e7752e4cd1977bc4c8f846110b937c4ecba5 [file] [log] [blame]
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_plog.h"
20#include "bfa_cs.h"
21#include "bfa_modules.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022
23BFA_TRC_FILE(HAL, FCXP);
24BFA_MODULE(fcxp);
25BFA_MODULE(sgpg);
26BFA_MODULE(lps);
27BFA_MODULE(fcport);
28BFA_MODULE(rport);
29BFA_MODULE(uf);
30
Jing Huang5fbe25c2010-10-18 17:17:23 -070031/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070032 * LPS related definitions
33 */
34#define BFA_LPS_MIN_LPORTS (1)
35#define BFA_LPS_MAX_LPORTS (256)
36
37/*
38 * Maximum Vports supported per physical port or vf.
39 */
40#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
41#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
42
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070043
Jing Huang5fbe25c2010-10-18 17:17:23 -070044/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070045 * FC PORT related definitions
46 */
47/*
48 * The port is considered disabled if corresponding physical port or IOC are
49 * disabled explicitly
50 */
51#define BFA_PORT_IS_DISABLED(bfa) \
52 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
53 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
54
Jing Huang5fbe25c2010-10-18 17:17:23 -070055/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070056 * BFA port state machine events
57 */
58enum bfa_fcport_sm_event {
59 BFA_FCPORT_SM_START = 1, /* start port state machine */
60 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
61 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
62 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
63 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
64 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
65 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
66 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
67 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
68};
69
Jing Huang5fbe25c2010-10-18 17:17:23 -070070/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070071 * BFA port link notification state machine events
72 */
73
74enum bfa_fcport_ln_sm_event {
75 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
76 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
77 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
78};
79
Jing Huang5fbe25c2010-10-18 17:17:23 -070080/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070081 * RPORT related definitions
82 */
83#define bfa_rport_offline_cb(__rp) do { \
84 if ((__rp)->bfa->fcs) \
85 bfa_cb_rport_offline((__rp)->rport_drv); \
86 else { \
87 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
88 __bfa_cb_rport_offline, (__rp)); \
89 } \
90} while (0)
91
92#define bfa_rport_online_cb(__rp) do { \
93 if ((__rp)->bfa->fcs) \
94 bfa_cb_rport_online((__rp)->rport_drv); \
95 else { \
96 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
97 __bfa_cb_rport_online, (__rp)); \
98 } \
99} while (0)
100
Jing Huang5fbe25c2010-10-18 17:17:23 -0700101/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700102 * forward declarations FCXP related functions
103 */
104static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
105static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
106 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
107static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
108 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
109static void bfa_fcxp_qresume(void *cbarg);
110static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
111 struct bfi_fcxp_send_req_s *send_req);
112
Jing Huang5fbe25c2010-10-18 17:17:23 -0700113/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700114 * forward declarations for LPS functions
115 */
116static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
117 u32 *dm_len);
118static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
119 struct bfa_iocfc_cfg_s *cfg,
120 struct bfa_meminfo_s *meminfo,
121 struct bfa_pcidev_s *pcidev);
122static void bfa_lps_detach(struct bfa_s *bfa);
123static void bfa_lps_start(struct bfa_s *bfa);
124static void bfa_lps_stop(struct bfa_s *bfa);
125static void bfa_lps_iocdisable(struct bfa_s *bfa);
126static void bfa_lps_login_rsp(struct bfa_s *bfa,
127 struct bfi_lps_login_rsp_s *rsp);
128static void bfa_lps_logout_rsp(struct bfa_s *bfa,
129 struct bfi_lps_logout_rsp_s *rsp);
130static void bfa_lps_reqq_resume(void *lps_arg);
131static void bfa_lps_free(struct bfa_lps_s *lps);
132static void bfa_lps_send_login(struct bfa_lps_s *lps);
133static void bfa_lps_send_logout(struct bfa_lps_s *lps);
134static void bfa_lps_login_comp(struct bfa_lps_s *lps);
135static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
136static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
137
Jing Huang5fbe25c2010-10-18 17:17:23 -0700138/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700139 * forward declaration for LPS state machine
140 */
141static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
142static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
143static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
144 event);
145static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
146static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
147static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
148 event);
149
Jing Huang5fbe25c2010-10-18 17:17:23 -0700150/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700151 * forward declaration for FC Port functions
152 */
153static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
154static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
155static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
156static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
157static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
158static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
159static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
160 enum bfa_port_linkstate event, bfa_boolean_t trunk);
161static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
162 enum bfa_port_linkstate event);
163static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
164static void bfa_fcport_stats_get_timeout(void *cbarg);
165static void bfa_fcport_stats_clr_timeout(void *cbarg);
166static void bfa_trunk_iocdisable(struct bfa_s *bfa);
167
Jing Huang5fbe25c2010-10-18 17:17:23 -0700168/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700169 * forward declaration for FC PORT state machine
170 */
171static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
172 enum bfa_fcport_sm_event event);
173static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
174 enum bfa_fcport_sm_event event);
175static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
176 enum bfa_fcport_sm_event event);
177static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
178 enum bfa_fcport_sm_event event);
179static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
180 enum bfa_fcport_sm_event event);
181static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
182 enum bfa_fcport_sm_event event);
183static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
184 enum bfa_fcport_sm_event event);
185static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
186 enum bfa_fcport_sm_event event);
187static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
188 enum bfa_fcport_sm_event event);
189static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
190 enum bfa_fcport_sm_event event);
191static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
192 enum bfa_fcport_sm_event event);
193static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
194 enum bfa_fcport_sm_event event);
195
196static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
197 enum bfa_fcport_ln_sm_event event);
198static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
199 enum bfa_fcport_ln_sm_event event);
200static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
201 enum bfa_fcport_ln_sm_event event);
202static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
203 enum bfa_fcport_ln_sm_event event);
204static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
205 enum bfa_fcport_ln_sm_event event);
206static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
207 enum bfa_fcport_ln_sm_event event);
208static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
209 enum bfa_fcport_ln_sm_event event);
210
211static struct bfa_sm_table_s hal_port_sm_table[] = {
212 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
213 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
214 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
215 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
216 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
217 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
218 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
219 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
220 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
221 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
222 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
223 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
224};
225
226
Jing Huang5fbe25c2010-10-18 17:17:23 -0700227/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700228 * forward declaration for RPORT related functions
229 */
230static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
231static void bfa_rport_free(struct bfa_rport_s *rport);
232static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
233static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
234static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
235static void __bfa_cb_rport_online(void *cbarg,
236 bfa_boolean_t complete);
237static void __bfa_cb_rport_offline(void *cbarg,
238 bfa_boolean_t complete);
239
Jing Huang5fbe25c2010-10-18 17:17:23 -0700240/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700241 * forward declaration for RPORT state machine
242 */
243static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
244 enum bfa_rport_event event);
245static void bfa_rport_sm_created(struct bfa_rport_s *rp,
246 enum bfa_rport_event event);
247static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
248 enum bfa_rport_event event);
249static void bfa_rport_sm_online(struct bfa_rport_s *rp,
250 enum bfa_rport_event event);
251static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
252 enum bfa_rport_event event);
253static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
254 enum bfa_rport_event event);
255static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
256 enum bfa_rport_event event);
257static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
258 enum bfa_rport_event event);
259static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
260 enum bfa_rport_event event);
261static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
262 enum bfa_rport_event event);
263static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
264 enum bfa_rport_event event);
265static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
266 enum bfa_rport_event event);
267static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
268 enum bfa_rport_event event);
269
Jing Huang5fbe25c2010-10-18 17:17:23 -0700270/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700271 * PLOG related definitions
272 */
273static int
274plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
275{
276 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
277 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
278 return 1;
279
280 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
281 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
282 return 1;
283
284 return 0;
285}
286
Maggie Zhangf16a1752010-12-09 19:12:32 -0800287static u64
288bfa_get_log_time(void)
289{
290 u64 system_time = 0;
291 struct timeval tv;
292 do_gettimeofday(&tv);
293
294 /* We are interested in seconds only. */
295 system_time = tv.tv_sec;
296 return system_time;
297}
298
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700299static void
300bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
301{
302 u16 tail;
303 struct bfa_plog_rec_s *pl_recp;
304
305 if (plog->plog_enabled == 0)
306 return;
307
308 if (plkd_validate_logrec(pl_rec)) {
309 bfa_assert(0);
310 return;
311 }
312
313 tail = plog->tail;
314
315 pl_recp = &(plog->plog_recs[tail]);
316
Jing Huang6a18b162010-10-18 17:08:54 -0700317 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700318
Maggie Zhangf16a1752010-12-09 19:12:32 -0800319 pl_recp->tv = bfa_get_log_time();
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700320 BFA_PL_LOG_REC_INCR(plog->tail);
321
322 if (plog->head == plog->tail)
323 BFA_PL_LOG_REC_INCR(plog->head);
324}
325
326void
327bfa_plog_init(struct bfa_plog_s *plog)
328{
Jing Huang6a18b162010-10-18 17:08:54 -0700329 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700330
Jing Huang6a18b162010-10-18 17:08:54 -0700331 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700332 plog->head = plog->tail = 0;
333 plog->plog_enabled = 1;
334}
335
336void
337bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
338 enum bfa_plog_eid event,
339 u16 misc, char *log_str)
340{
341 struct bfa_plog_rec_s lp;
342
343 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700344 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700345 lp.mid = mid;
346 lp.eid = event;
347 lp.log_type = BFA_PL_LOG_TYPE_STRING;
348 lp.misc = misc;
349 strncpy(lp.log_entry.string_log, log_str,
350 BFA_PL_STRING_LOG_SZ - 1);
351 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
352 bfa_plog_add(plog, &lp);
353 }
354}
355
356void
357bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
358 enum bfa_plog_eid event,
359 u16 misc, u32 *intarr, u32 num_ints)
360{
361 struct bfa_plog_rec_s lp;
362 u32 i;
363
364 if (num_ints > BFA_PL_INT_LOG_SZ)
365 num_ints = BFA_PL_INT_LOG_SZ;
366
367 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700368 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700369 lp.mid = mid;
370 lp.eid = event;
371 lp.log_type = BFA_PL_LOG_TYPE_INT;
372 lp.misc = misc;
373
374 for (i = 0; i < num_ints; i++)
Jing Huang6a18b162010-10-18 17:08:54 -0700375 lp.log_entry.int_log[i] = intarr[i];
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700376
377 lp.log_num_ints = (u8) num_ints;
378
379 bfa_plog_add(plog, &lp);
380 }
381}
382
383void
384bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
385 enum bfa_plog_eid event,
386 u16 misc, struct fchs_s *fchdr)
387{
388 struct bfa_plog_rec_s lp;
389 u32 *tmp_int = (u32 *) fchdr;
390 u32 ints[BFA_PL_INT_LOG_SZ];
391
392 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700393 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700394
395 ints[0] = tmp_int[0];
396 ints[1] = tmp_int[1];
397 ints[2] = tmp_int[4];
398
399 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
400 }
401}
402
403void
404bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
405 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
406 u32 pld_w0)
407{
408 struct bfa_plog_rec_s lp;
409 u32 *tmp_int = (u32 *) fchdr;
410 u32 ints[BFA_PL_INT_LOG_SZ];
411
412 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700413 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700414
415 ints[0] = tmp_int[0];
416 ints[1] = tmp_int[1];
417 ints[2] = tmp_int[4];
418 ints[3] = pld_w0;
419
420 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
421 }
422}
423
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700424
Jing Huang5fbe25c2010-10-18 17:17:23 -0700425/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700426 * fcxp_pvt BFA FCXP private functions
427 */
428
429static void
430claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
431{
432 u8 *dm_kva = NULL;
433 u64 dm_pa;
434 u32 buf_pool_sz;
435
436 dm_kva = bfa_meminfo_dma_virt(mi);
437 dm_pa = bfa_meminfo_dma_phys(mi);
438
439 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
440
441 /*
442 * Initialize the fcxp req payload list
443 */
444 mod->req_pld_list_kva = dm_kva;
445 mod->req_pld_list_pa = dm_pa;
446 dm_kva += buf_pool_sz;
447 dm_pa += buf_pool_sz;
Jing Huang6a18b162010-10-18 17:08:54 -0700448 memset(mod->req_pld_list_kva, 0, buf_pool_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700449
450 /*
451 * Initialize the fcxp rsp payload list
452 */
453 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
454 mod->rsp_pld_list_kva = dm_kva;
455 mod->rsp_pld_list_pa = dm_pa;
456 dm_kva += buf_pool_sz;
457 dm_pa += buf_pool_sz;
Jing Huang6a18b162010-10-18 17:08:54 -0700458 memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700459
460 bfa_meminfo_dma_virt(mi) = dm_kva;
461 bfa_meminfo_dma_phys(mi) = dm_pa;
462}
463
464static void
465claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
466{
467 u16 i;
468 struct bfa_fcxp_s *fcxp;
469
470 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
Jing Huang6a18b162010-10-18 17:08:54 -0700471 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700472
473 INIT_LIST_HEAD(&mod->fcxp_free_q);
474 INIT_LIST_HEAD(&mod->fcxp_active_q);
475
476 mod->fcxp_list = fcxp;
477
478 for (i = 0; i < mod->num_fcxps; i++) {
479 fcxp->fcxp_mod = mod;
480 fcxp->fcxp_tag = i;
481
482 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
483 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
484 fcxp->reqq_waiting = BFA_FALSE;
485
486 fcxp = fcxp + 1;
487 }
488
489 bfa_meminfo_kva(mi) = (void *)fcxp;
490}
491
492static void
493bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
494 u32 *dm_len)
495{
496 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
497
498 if (num_fcxp_reqs == 0)
499 return;
500
501 /*
502 * Account for req/rsp payload
503 */
504 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
505 if (cfg->drvcfg.min_cfg)
506 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
507 else
508 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
509
510 /*
511 * Account for fcxp structs
512 */
513 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
514}
515
516static void
517bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
518 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
519{
520 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
521
Jing Huang6a18b162010-10-18 17:08:54 -0700522 memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700523 mod->bfa = bfa;
524 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
525
Jing Huang5fbe25c2010-10-18 17:17:23 -0700526 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700527 * Initialize FCXP request and response payload sizes.
528 */
529 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
530 if (!cfg->drvcfg.min_cfg)
531 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
532
533 INIT_LIST_HEAD(&mod->wait_q);
534
535 claim_fcxp_req_rsp_mem(mod, meminfo);
536 claim_fcxps_mem(mod, meminfo);
537}
538
539static void
540bfa_fcxp_detach(struct bfa_s *bfa)
541{
542}
543
544static void
545bfa_fcxp_start(struct bfa_s *bfa)
546{
547}
548
549static void
550bfa_fcxp_stop(struct bfa_s *bfa)
551{
552}
553
554static void
555bfa_fcxp_iocdisable(struct bfa_s *bfa)
556{
557 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
558 struct bfa_fcxp_s *fcxp;
559 struct list_head *qe, *qen;
560
561 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
562 fcxp = (struct bfa_fcxp_s *) qe;
563 if (fcxp->caller == NULL) {
564 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
565 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
566 bfa_fcxp_free(fcxp);
567 } else {
568 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
569 bfa_cb_queue(bfa, &fcxp->hcb_qe,
570 __bfa_fcxp_send_cbfn, fcxp);
571 }
572 }
573}
574
575static struct bfa_fcxp_s *
576bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
577{
578 struct bfa_fcxp_s *fcxp;
579
580 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
581
582 if (fcxp)
583 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
584
585 return fcxp;
586}
587
588static void
589bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
590 struct bfa_s *bfa,
591 u8 *use_ibuf,
592 u32 *nr_sgles,
593 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
594 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
595 struct list_head *r_sgpg_q,
596 int n_sgles,
597 bfa_fcxp_get_sgaddr_t sga_cbfn,
598 bfa_fcxp_get_sglen_t sglen_cbfn)
599{
600
601 bfa_assert(bfa != NULL);
602
603 bfa_trc(bfa, fcxp->fcxp_tag);
604
605 if (n_sgles == 0) {
606 *use_ibuf = 1;
607 } else {
608 bfa_assert(*sga_cbfn != NULL);
609 bfa_assert(*sglen_cbfn != NULL);
610
611 *use_ibuf = 0;
612 *r_sga_cbfn = sga_cbfn;
613 *r_sglen_cbfn = sglen_cbfn;
614
615 *nr_sgles = n_sgles;
616
617 /*
618 * alloc required sgpgs
619 */
620 if (n_sgles > BFI_SGE_INLINE)
621 bfa_assert(0);
622 }
623
624}
625
626static void
627bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
628 void *caller, struct bfa_s *bfa, int nreq_sgles,
629 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
630 bfa_fcxp_get_sglen_t req_sglen_cbfn,
631 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
632 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
633{
634
635 bfa_assert(bfa != NULL);
636
637 bfa_trc(bfa, fcxp->fcxp_tag);
638
639 fcxp->caller = caller;
640
641 bfa_fcxp_init_reqrsp(fcxp, bfa,
642 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
643 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
644 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
645
646 bfa_fcxp_init_reqrsp(fcxp, bfa,
647 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
648 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
649 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
650
651}
652
653static void
654bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
655{
656 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
657 struct bfa_fcxp_wqe_s *wqe;
658
659 bfa_q_deq(&mod->wait_q, &wqe);
660 if (wqe) {
661 bfa_trc(mod->bfa, fcxp->fcxp_tag);
662
663 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
664 wqe->nrsp_sgles, wqe->req_sga_cbfn,
665 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
666 wqe->rsp_sglen_cbfn);
667
668 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
669 return;
670 }
671
672 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
673 list_del(&fcxp->qe);
674 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
675}
676
677static void
678bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
679 bfa_status_t req_status, u32 rsp_len,
680 u32 resid_len, struct fchs_s *rsp_fchs)
681{
682 /* discarded fcxp completion */
683}
684
685static void
686__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
687{
688 struct bfa_fcxp_s *fcxp = cbarg;
689
690 if (complete) {
691 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
692 fcxp->rsp_status, fcxp->rsp_len,
693 fcxp->residue_len, &fcxp->rsp_fchs);
694 } else {
695 bfa_fcxp_free(fcxp);
696 }
697}
698
699static void
700hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
701{
702 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
703 struct bfa_fcxp_s *fcxp;
Jing Huangba816ea2010-10-18 17:10:50 -0700704 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700705
706 bfa_trc(bfa, fcxp_tag);
707
Jing Huangba816ea2010-10-18 17:10:50 -0700708 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700709
Jing Huang5fbe25c2010-10-18 17:17:23 -0700710 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700711 * @todo f/w should not set residue to non-0 when everything
712 * is received.
713 */
714 if (fcxp_rsp->req_status == BFA_STATUS_OK)
715 fcxp_rsp->residue_len = 0;
716 else
Jing Huangba816ea2010-10-18 17:10:50 -0700717 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700718
719 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
720
721 bfa_assert(fcxp->send_cbfn != NULL);
722
723 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
724
725 if (fcxp->send_cbfn != NULL) {
726 bfa_trc(mod->bfa, (NULL == fcxp->caller));
727 if (fcxp->caller == NULL) {
728 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
729 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
730 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
731 /*
732 * fcxp automatically freed on return from the callback
733 */
734 bfa_fcxp_free(fcxp);
735 } else {
736 fcxp->rsp_status = fcxp_rsp->req_status;
737 fcxp->rsp_len = fcxp_rsp->rsp_len;
738 fcxp->residue_len = fcxp_rsp->residue_len;
739 fcxp->rsp_fchs = fcxp_rsp->fchs;
740
741 bfa_cb_queue(bfa, &fcxp->hcb_qe,
742 __bfa_fcxp_send_cbfn, fcxp);
743 }
744 } else {
745 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
746 }
747}
748
749static void
750hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
751{
752 union bfi_addr_u sga_zero = { {0} };
753
754 sge->sg_len = reqlen;
755 sge->flags = BFI_SGE_DATA_LAST;
756 bfa_dma_addr_set(sge[0].sga, req_pa);
757 bfa_sge_to_be(sge);
758 sge++;
759
760 sge->sga = sga_zero;
761 sge->sg_len = reqlen;
762 sge->flags = BFI_SGE_PGDLEN;
763 bfa_sge_to_be(sge);
764}
765
766static void
767hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
768 struct fchs_s *fchs)
769{
770 /*
771 * TODO: TX ox_id
772 */
773 if (reqlen > 0) {
774 if (fcxp->use_ireqbuf) {
775 u32 pld_w0 =
776 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
777
778 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
779 BFA_PL_EID_TX,
780 reqlen + sizeof(struct fchs_s), fchs,
781 pld_w0);
782 } else {
783 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
784 BFA_PL_EID_TX,
785 reqlen + sizeof(struct fchs_s),
786 fchs);
787 }
788 } else {
789 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
790 reqlen + sizeof(struct fchs_s), fchs);
791 }
792}
793
794static void
795hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
796 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
797{
798 if (fcxp_rsp->rsp_len > 0) {
799 if (fcxp->use_irspbuf) {
800 u32 pld_w0 =
801 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
802
803 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
804 BFA_PL_EID_RX,
805 (u16) fcxp_rsp->rsp_len,
806 &fcxp_rsp->fchs, pld_w0);
807 } else {
808 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
809 BFA_PL_EID_RX,
810 (u16) fcxp_rsp->rsp_len,
811 &fcxp_rsp->fchs);
812 }
813 } else {
814 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
815 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
816 }
817}
818
Jing Huang5fbe25c2010-10-18 17:17:23 -0700819/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700820 * Handler to resume sending fcxp when space in available in cpe queue.
821 */
822static void
823bfa_fcxp_qresume(void *cbarg)
824{
825 struct bfa_fcxp_s *fcxp = cbarg;
826 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
827 struct bfi_fcxp_send_req_s *send_req;
828
829 fcxp->reqq_waiting = BFA_FALSE;
830 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
831 bfa_fcxp_queue(fcxp, send_req);
832}
833
Jing Huang5fbe25c2010-10-18 17:17:23 -0700834/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700835 * Queue fcxp send request to foimrware.
836 */
837static void
838bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
839{
840 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
841 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
842 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
843 struct bfa_rport_s *rport = reqi->bfa_rport;
844
845 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
846 bfa_lpuid(bfa));
847
Jing Huangba816ea2010-10-18 17:10:50 -0700848 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700849 if (rport) {
850 send_req->rport_fw_hndl = rport->fw_handle;
Jing Huangba816ea2010-10-18 17:10:50 -0700851 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700852 if (send_req->max_frmsz == 0)
Jing Huangba816ea2010-10-18 17:10:50 -0700853 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700854 } else {
855 send_req->rport_fw_hndl = 0;
Jing Huangba816ea2010-10-18 17:10:50 -0700856 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700857 }
858
Jing Huangba816ea2010-10-18 17:10:50 -0700859 send_req->vf_id = cpu_to_be16(reqi->vf_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700860 send_req->lp_tag = reqi->lp_tag;
861 send_req->class = reqi->class;
862 send_req->rsp_timeout = rspi->rsp_timeout;
863 send_req->cts = reqi->cts;
864 send_req->fchs = reqi->fchs;
865
Jing Huangba816ea2010-10-18 17:10:50 -0700866 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
867 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700868
869 /*
870 * setup req sgles
871 */
872 if (fcxp->use_ireqbuf == 1) {
873 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
874 BFA_FCXP_REQ_PLD_PA(fcxp));
875 } else {
876 if (fcxp->nreq_sgles > 0) {
877 bfa_assert(fcxp->nreq_sgles == 1);
878 hal_fcxp_set_local_sges(send_req->req_sge,
879 reqi->req_tot_len,
880 fcxp->req_sga_cbfn(fcxp->caller,
881 0));
882 } else {
883 bfa_assert(reqi->req_tot_len == 0);
884 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
885 }
886 }
887
888 /*
889 * setup rsp sgles
890 */
891 if (fcxp->use_irspbuf == 1) {
892 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
893
894 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
895 BFA_FCXP_RSP_PLD_PA(fcxp));
896
897 } else {
898 if (fcxp->nrsp_sgles > 0) {
899 bfa_assert(fcxp->nrsp_sgles == 1);
900 hal_fcxp_set_local_sges(send_req->rsp_sge,
901 rspi->rsp_maxlen,
902 fcxp->rsp_sga_cbfn(fcxp->caller,
903 0));
904 } else {
905 bfa_assert(rspi->rsp_maxlen == 0);
906 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
907 }
908 }
909
910 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
911
912 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
913
914 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
915 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
916}
917
Jing Huang5fbe25c2010-10-18 17:17:23 -0700918/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700919 * Allocate an FCXP instance to send a response or to send a request
920 * that has a response. Request/response buffers are allocated by caller.
921 *
922 * @param[in] bfa BFA bfa instance
923 * @param[in] nreq_sgles Number of SG elements required for request
924 * buffer. 0, if fcxp internal buffers are used.
925 * Use bfa_fcxp_get_reqbuf() to get the
926 * internal req buffer.
927 * @param[in] req_sgles SG elements describing request buffer. Will be
928 * copied in by BFA and hence can be freed on
929 * return from this function.
930 * @param[in] get_req_sga function ptr to be called to get a request SG
931 * Address (given the sge index).
932 * @param[in] get_req_sglen function ptr to be called to get a request SG
933 * len (given the sge index).
934 * @param[in] get_rsp_sga function ptr to be called to get a response SG
935 * Address (given the sge index).
936 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
937 * len (given the sge index).
938 *
939 * @return FCXP instance. NULL on failure.
940 */
941struct bfa_fcxp_s *
942bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
943 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
944 bfa_fcxp_get_sglen_t req_sglen_cbfn,
945 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
946 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
947{
948 struct bfa_fcxp_s *fcxp = NULL;
949
950 bfa_assert(bfa != NULL);
951
952 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
953 if (fcxp == NULL)
954 return NULL;
955
956 bfa_trc(bfa, fcxp->fcxp_tag);
957
958 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
959 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
960
961 return fcxp;
962}
963
Jing Huang5fbe25c2010-10-18 17:17:23 -0700964/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700965 * Get the internal request buffer pointer
966 *
967 * @param[in] fcxp BFA fcxp pointer
968 *
969 * @return pointer to the internal request buffer
970 */
971void *
972bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
973{
974 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
975 void *reqbuf;
976
977 bfa_assert(fcxp->use_ireqbuf == 1);
978 reqbuf = ((u8 *)mod->req_pld_list_kva) +
979 fcxp->fcxp_tag * mod->req_pld_sz;
980 return reqbuf;
981}
982
983u32
984bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
985{
986 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
987
988 return mod->req_pld_sz;
989}
990
Jing Huang5fbe25c2010-10-18 17:17:23 -0700991/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700992 * Get the internal response buffer pointer
993 *
994 * @param[in] fcxp BFA fcxp pointer
995 *
996 * @return pointer to the internal request buffer
997 */
998void *
999bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1000{
1001 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1002 void *rspbuf;
1003
1004 bfa_assert(fcxp->use_irspbuf == 1);
1005
1006 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1007 fcxp->fcxp_tag * mod->rsp_pld_sz;
1008 return rspbuf;
1009}
1010
Jing Huang5fbe25c2010-10-18 17:17:23 -07001011/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001012 * Free the BFA FCXP
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001013 *
1014 * @param[in] fcxp BFA fcxp pointer
1015 *
1016 * @return void
1017 */
1018void
1019bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1020{
1021 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1022
1023 bfa_assert(fcxp != NULL);
1024 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1025 bfa_fcxp_put(fcxp);
1026}
1027
Jing Huang5fbe25c2010-10-18 17:17:23 -07001028/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001029 * Send a FCXP request
1030 *
1031 * @param[in] fcxp BFA fcxp pointer
1032 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1033 * @param[in] vf_id virtual Fabric ID
1034 * @param[in] lp_tag lport tag
1035 * @param[in] cts use Continous sequence
1036 * @param[in] cos fc Class of Service
1037 * @param[in] reqlen request length, does not include FCHS length
1038 * @param[in] fchs fc Header Pointer. The header content will be copied
1039 * in by BFA.
1040 *
1041 * @param[in] cbfn call back function to be called on receiving
1042 * the response
1043 * @param[in] cbarg arg for cbfn
1044 * @param[in] rsp_timeout
1045 * response timeout
1046 *
1047 * @return bfa_status_t
1048 */
1049void
1050bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1051 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1052 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1053 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1054{
1055 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1056 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1057 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1058 struct bfi_fcxp_send_req_s *send_req;
1059
1060 bfa_trc(bfa, fcxp->fcxp_tag);
1061
Jing Huang5fbe25c2010-10-18 17:17:23 -07001062 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001063 * setup request/response info
1064 */
1065 reqi->bfa_rport = rport;
1066 reqi->vf_id = vf_id;
1067 reqi->lp_tag = lp_tag;
1068 reqi->class = cos;
1069 rspi->rsp_timeout = rsp_timeout;
1070 reqi->cts = cts;
1071 reqi->fchs = *fchs;
1072 reqi->req_tot_len = reqlen;
1073 rspi->rsp_maxlen = rsp_maxlen;
1074 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1075 fcxp->send_cbarg = cbarg;
1076
Jing Huang5fbe25c2010-10-18 17:17:23 -07001077 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001078 * If no room in CPE queue, wait for space in request queue
1079 */
1080 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1081 if (!send_req) {
1082 bfa_trc(bfa, fcxp->fcxp_tag);
1083 fcxp->reqq_waiting = BFA_TRUE;
1084 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1085 return;
1086 }
1087
1088 bfa_fcxp_queue(fcxp, send_req);
1089}
1090
Jing Huang5fbe25c2010-10-18 17:17:23 -07001091/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001092 * Abort a BFA FCXP
1093 *
1094 * @param[in] fcxp BFA fcxp pointer
1095 *
1096 * @return void
1097 */
1098bfa_status_t
1099bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1100{
1101 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1102 bfa_assert(0);
1103 return BFA_STATUS_OK;
1104}
1105
1106void
1107bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1108 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1109 void *caller, int nreq_sgles,
1110 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1111 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1112 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1113 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1114{
1115 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1116
1117 bfa_assert(list_empty(&mod->fcxp_free_q));
1118
1119 wqe->alloc_cbfn = alloc_cbfn;
1120 wqe->alloc_cbarg = alloc_cbarg;
1121 wqe->caller = caller;
1122 wqe->bfa = bfa;
1123 wqe->nreq_sgles = nreq_sgles;
1124 wqe->nrsp_sgles = nrsp_sgles;
1125 wqe->req_sga_cbfn = req_sga_cbfn;
1126 wqe->req_sglen_cbfn = req_sglen_cbfn;
1127 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1128 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1129
1130 list_add_tail(&wqe->qe, &mod->wait_q);
1131}
1132
1133void
1134bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1135{
1136 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1137
1138 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
1139 list_del(&wqe->qe);
1140}
1141
1142void
1143bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1144{
Jing Huang5fbe25c2010-10-18 17:17:23 -07001145 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001146 * If waiting for room in request queue, cancel reqq wait
1147 * and free fcxp.
1148 */
1149 if (fcxp->reqq_waiting) {
1150 fcxp->reqq_waiting = BFA_FALSE;
1151 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1152 bfa_fcxp_free(fcxp);
1153 return;
1154 }
1155
1156 fcxp->send_cbfn = bfa_fcxp_null_comp;
1157}
1158
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001159void
1160bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1161{
1162 switch (msg->mhdr.msg_id) {
1163 case BFI_FCXP_I2H_SEND_RSP:
1164 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1165 break;
1166
1167 default:
1168 bfa_trc(bfa, msg->mhdr.msg_id);
1169 bfa_assert(0);
1170 }
1171}
1172
1173u32
1174bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1175{
1176 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1177
1178 return mod->rsp_pld_sz;
1179}
1180
1181
Jing Huang5fbe25c2010-10-18 17:17:23 -07001182/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001183 * BFA LPS state machine functions
1184 */
1185
Jing Huang5fbe25c2010-10-18 17:17:23 -07001186/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001187 * Init state -- no login
1188 */
1189static void
1190bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1191{
1192 bfa_trc(lps->bfa, lps->lp_tag);
1193 bfa_trc(lps->bfa, event);
1194
1195 switch (event) {
1196 case BFA_LPS_SM_LOGIN:
1197 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1198 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1199 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1200 } else {
1201 bfa_sm_set_state(lps, bfa_lps_sm_login);
1202 bfa_lps_send_login(lps);
1203 }
1204
1205 if (lps->fdisc)
1206 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1207 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1208 else
1209 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1210 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1211 break;
1212
1213 case BFA_LPS_SM_LOGOUT:
1214 bfa_lps_logout_comp(lps);
1215 break;
1216
1217 case BFA_LPS_SM_DELETE:
1218 bfa_lps_free(lps);
1219 break;
1220
1221 case BFA_LPS_SM_RX_CVL:
1222 case BFA_LPS_SM_OFFLINE:
1223 break;
1224
1225 case BFA_LPS_SM_FWRSP:
1226 /*
1227 * Could happen when fabric detects loopback and discards
1228 * the lps request. Fw will eventually sent out the timeout
1229 * Just ignore
1230 */
1231 break;
1232
1233 default:
1234 bfa_sm_fault(lps->bfa, event);
1235 }
1236}
1237
Jing Huang5fbe25c2010-10-18 17:17:23 -07001238/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001239 * login is in progress -- awaiting response from firmware
1240 */
1241static void
1242bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1243{
1244 bfa_trc(lps->bfa, lps->lp_tag);
1245 bfa_trc(lps->bfa, event);
1246
1247 switch (event) {
1248 case BFA_LPS_SM_FWRSP:
1249 if (lps->status == BFA_STATUS_OK) {
1250 bfa_sm_set_state(lps, bfa_lps_sm_online);
1251 if (lps->fdisc)
1252 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1253 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1254 else
1255 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1256 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1257 } else {
1258 bfa_sm_set_state(lps, bfa_lps_sm_init);
1259 if (lps->fdisc)
1260 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1261 BFA_PL_EID_LOGIN, 0,
1262 "FDISC Fail (RJT or timeout)");
1263 else
1264 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1265 BFA_PL_EID_LOGIN, 0,
1266 "FLOGI Fail (RJT or timeout)");
1267 }
1268 bfa_lps_login_comp(lps);
1269 break;
1270
1271 case BFA_LPS_SM_OFFLINE:
1272 bfa_sm_set_state(lps, bfa_lps_sm_init);
1273 break;
1274
1275 default:
1276 bfa_sm_fault(lps->bfa, event);
1277 }
1278}
1279
Jing Huang5fbe25c2010-10-18 17:17:23 -07001280/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001281 * login pending - awaiting space in request queue
1282 */
1283static void
1284bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1285{
1286 bfa_trc(lps->bfa, lps->lp_tag);
1287 bfa_trc(lps->bfa, event);
1288
1289 switch (event) {
1290 case BFA_LPS_SM_RESUME:
1291 bfa_sm_set_state(lps, bfa_lps_sm_login);
1292 break;
1293
1294 case BFA_LPS_SM_OFFLINE:
1295 bfa_sm_set_state(lps, bfa_lps_sm_init);
1296 bfa_reqq_wcancel(&lps->wqe);
1297 break;
1298
1299 case BFA_LPS_SM_RX_CVL:
1300 /*
1301 * Login was not even sent out; so when getting out
1302 * of this state, it will appear like a login retry
1303 * after Clear virtual link
1304 */
1305 break;
1306
1307 default:
1308 bfa_sm_fault(lps->bfa, event);
1309 }
1310}
1311
Jing Huang5fbe25c2010-10-18 17:17:23 -07001312/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001313 * login complete
1314 */
1315static void
1316bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1317{
1318 bfa_trc(lps->bfa, lps->lp_tag);
1319 bfa_trc(lps->bfa, event);
1320
1321 switch (event) {
1322 case BFA_LPS_SM_LOGOUT:
1323 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1324 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1325 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1326 } else {
1327 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1328 bfa_lps_send_logout(lps);
1329 }
1330 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1331 BFA_PL_EID_LOGO, 0, "Logout");
1332 break;
1333
1334 case BFA_LPS_SM_RX_CVL:
1335 bfa_sm_set_state(lps, bfa_lps_sm_init);
1336
1337 /* Let the vport module know about this event */
1338 bfa_lps_cvl_event(lps);
1339 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1340 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1341 break;
1342
1343 case BFA_LPS_SM_OFFLINE:
1344 case BFA_LPS_SM_DELETE:
1345 bfa_sm_set_state(lps, bfa_lps_sm_init);
1346 break;
1347
1348 default:
1349 bfa_sm_fault(lps->bfa, event);
1350 }
1351}
1352
Jing Huang5fbe25c2010-10-18 17:17:23 -07001353/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001354 * logout in progress - awaiting firmware response
1355 */
1356static void
1357bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1358{
1359 bfa_trc(lps->bfa, lps->lp_tag);
1360 bfa_trc(lps->bfa, event);
1361
1362 switch (event) {
1363 case BFA_LPS_SM_FWRSP:
1364 bfa_sm_set_state(lps, bfa_lps_sm_init);
1365 bfa_lps_logout_comp(lps);
1366 break;
1367
1368 case BFA_LPS_SM_OFFLINE:
1369 bfa_sm_set_state(lps, bfa_lps_sm_init);
1370 break;
1371
1372 default:
1373 bfa_sm_fault(lps->bfa, event);
1374 }
1375}
1376
Jing Huang5fbe25c2010-10-18 17:17:23 -07001377/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001378 * logout pending -- awaiting space in request queue
1379 */
1380static void
1381bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1382{
1383 bfa_trc(lps->bfa, lps->lp_tag);
1384 bfa_trc(lps->bfa, event);
1385
1386 switch (event) {
1387 case BFA_LPS_SM_RESUME:
1388 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1389 bfa_lps_send_logout(lps);
1390 break;
1391
1392 case BFA_LPS_SM_OFFLINE:
1393 bfa_sm_set_state(lps, bfa_lps_sm_init);
1394 bfa_reqq_wcancel(&lps->wqe);
1395 break;
1396
1397 default:
1398 bfa_sm_fault(lps->bfa, event);
1399 }
1400}
1401
1402
1403
Jing Huang5fbe25c2010-10-18 17:17:23 -07001404/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001405 * lps_pvt BFA LPS private functions
1406 */
1407
Jing Huang5fbe25c2010-10-18 17:17:23 -07001408/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001409 * return memory requirement
1410 */
1411static void
1412bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1413 u32 *dm_len)
1414{
1415 if (cfg->drvcfg.min_cfg)
1416 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1417 else
1418 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1419}
1420
Jing Huang5fbe25c2010-10-18 17:17:23 -07001421/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001422 * bfa module attach at initialization time
1423 */
1424static void
1425bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1426 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1427{
1428 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1429 struct bfa_lps_s *lps;
1430 int i;
1431
Jing Huang6a18b162010-10-18 17:08:54 -07001432 memset(mod, 0, sizeof(struct bfa_lps_mod_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001433 mod->num_lps = BFA_LPS_MAX_LPORTS;
1434 if (cfg->drvcfg.min_cfg)
1435 mod->num_lps = BFA_LPS_MIN_LPORTS;
1436 else
1437 mod->num_lps = BFA_LPS_MAX_LPORTS;
1438 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1439
1440 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1441
1442 INIT_LIST_HEAD(&mod->lps_free_q);
1443 INIT_LIST_HEAD(&mod->lps_active_q);
1444
1445 for (i = 0; i < mod->num_lps; i++, lps++) {
1446 lps->bfa = bfa;
1447 lps->lp_tag = (u8) i;
1448 lps->reqq = BFA_REQQ_LPS;
1449 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1450 list_add_tail(&lps->qe, &mod->lps_free_q);
1451 }
1452}
1453
1454static void
1455bfa_lps_detach(struct bfa_s *bfa)
1456{
1457}
1458
1459static void
1460bfa_lps_start(struct bfa_s *bfa)
1461{
1462}
1463
1464static void
1465bfa_lps_stop(struct bfa_s *bfa)
1466{
1467}
1468
Jing Huang5fbe25c2010-10-18 17:17:23 -07001469/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001470 * IOC in disabled state -- consider all lps offline
1471 */
1472static void
1473bfa_lps_iocdisable(struct bfa_s *bfa)
1474{
1475 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1476 struct bfa_lps_s *lps;
1477 struct list_head *qe, *qen;
1478
1479 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1480 lps = (struct bfa_lps_s *) qe;
1481 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1482 }
1483}
1484
Jing Huang5fbe25c2010-10-18 17:17:23 -07001485/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001486 * Firmware login response
1487 */
1488static void
1489bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1490{
1491 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1492 struct bfa_lps_s *lps;
1493
1494 bfa_assert(rsp->lp_tag < mod->num_lps);
1495 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1496
1497 lps->status = rsp->status;
1498 switch (rsp->status) {
1499 case BFA_STATUS_OK:
1500 lps->fport = rsp->f_port;
1501 lps->npiv_en = rsp->npiv_en;
1502 lps->lp_pid = rsp->lp_pid;
Jing Huangba816ea2010-10-18 17:10:50 -07001503 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001504 lps->pr_pwwn = rsp->port_name;
1505 lps->pr_nwwn = rsp->node_name;
1506 lps->auth_req = rsp->auth_req;
1507 lps->lp_mac = rsp->lp_mac;
1508 lps->brcd_switch = rsp->brcd_switch;
1509 lps->fcf_mac = rsp->fcf_mac;
1510
1511 break;
1512
1513 case BFA_STATUS_FABRIC_RJT:
1514 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1515 lps->lsrjt_expl = rsp->lsrjt_expl;
1516
1517 break;
1518
1519 case BFA_STATUS_EPROTOCOL:
1520 lps->ext_status = rsp->ext_status;
1521
1522 break;
1523
1524 default:
1525 /* Nothing to do with other status */
1526 break;
1527 }
1528
1529 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1530}
1531
Jing Huang5fbe25c2010-10-18 17:17:23 -07001532/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001533 * Firmware logout response
1534 */
1535static void
1536bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1537{
1538 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1539 struct bfa_lps_s *lps;
1540
1541 bfa_assert(rsp->lp_tag < mod->num_lps);
1542 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1543
1544 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1545}
1546
Jing Huang5fbe25c2010-10-18 17:17:23 -07001547/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001548 * Firmware received a Clear virtual link request (for FCoE)
1549 */
1550static void
1551bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1552{
1553 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1554 struct bfa_lps_s *lps;
1555
1556 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1557
1558 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1559}
1560
Jing Huang5fbe25c2010-10-18 17:17:23 -07001561/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001562 * Space is available in request queue, resume queueing request to firmware.
1563 */
1564static void
1565bfa_lps_reqq_resume(void *lps_arg)
1566{
1567 struct bfa_lps_s *lps = lps_arg;
1568
1569 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1570}
1571
Jing Huang5fbe25c2010-10-18 17:17:23 -07001572/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001573 * lps is freed -- triggered by vport delete
1574 */
1575static void
1576bfa_lps_free(struct bfa_lps_s *lps)
1577{
1578 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1579
1580 lps->lp_pid = 0;
1581 list_del(&lps->qe);
1582 list_add_tail(&lps->qe, &mod->lps_free_q);
1583}
1584
Jing Huang5fbe25c2010-10-18 17:17:23 -07001585/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001586 * send login request to firmware
1587 */
1588static void
1589bfa_lps_send_login(struct bfa_lps_s *lps)
1590{
1591 struct bfi_lps_login_req_s *m;
1592
1593 m = bfa_reqq_next(lps->bfa, lps->reqq);
1594 bfa_assert(m);
1595
1596 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1597 bfa_lpuid(lps->bfa));
1598
1599 m->lp_tag = lps->lp_tag;
1600 m->alpa = lps->alpa;
Jing Huangba816ea2010-10-18 17:10:50 -07001601 m->pdu_size = cpu_to_be16(lps->pdusz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001602 m->pwwn = lps->pwwn;
1603 m->nwwn = lps->nwwn;
1604 m->fdisc = lps->fdisc;
1605 m->auth_en = lps->auth_en;
1606
1607 bfa_reqq_produce(lps->bfa, lps->reqq);
1608}
1609
Jing Huang5fbe25c2010-10-18 17:17:23 -07001610/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001611 * send logout request to firmware
1612 */
1613static void
1614bfa_lps_send_logout(struct bfa_lps_s *lps)
1615{
1616 struct bfi_lps_logout_req_s *m;
1617
1618 m = bfa_reqq_next(lps->bfa, lps->reqq);
1619 bfa_assert(m);
1620
1621 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1622 bfa_lpuid(lps->bfa));
1623
1624 m->lp_tag = lps->lp_tag;
1625 m->port_name = lps->pwwn;
1626 bfa_reqq_produce(lps->bfa, lps->reqq);
1627}
1628
Jing Huang5fbe25c2010-10-18 17:17:23 -07001629/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001630 * Indirect login completion handler for non-fcs
1631 */
1632static void
1633bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1634{
1635 struct bfa_lps_s *lps = arg;
1636
1637 if (!complete)
1638 return;
1639
1640 if (lps->fdisc)
1641 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1642 else
1643 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1644}
1645
Jing Huang5fbe25c2010-10-18 17:17:23 -07001646/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001647 * Login completion handler -- direct call for fcs, queue for others
1648 */
1649static void
1650bfa_lps_login_comp(struct bfa_lps_s *lps)
1651{
1652 if (!lps->bfa->fcs) {
1653 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1654 lps);
1655 return;
1656 }
1657
1658 if (lps->fdisc)
1659 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1660 else
1661 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1662}
1663
Jing Huang5fbe25c2010-10-18 17:17:23 -07001664/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001665 * Indirect logout completion handler for non-fcs
1666 */
1667static void
1668bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1669{
1670 struct bfa_lps_s *lps = arg;
1671
1672 if (!complete)
1673 return;
1674
1675 if (lps->fdisc)
1676 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1677}
1678
Jing Huang5fbe25c2010-10-18 17:17:23 -07001679/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001680 * Logout completion handler -- direct call for fcs, queue for others
1681 */
1682static void
1683bfa_lps_logout_comp(struct bfa_lps_s *lps)
1684{
1685 if (!lps->bfa->fcs) {
1686 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1687 lps);
1688 return;
1689 }
1690 if (lps->fdisc)
1691 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1692}
1693
Jing Huang5fbe25c2010-10-18 17:17:23 -07001694/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001695 * Clear virtual link completion handler for non-fcs
1696 */
1697static void
1698bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1699{
1700 struct bfa_lps_s *lps = arg;
1701
1702 if (!complete)
1703 return;
1704
1705 /* Clear virtual link to base port will result in link down */
1706 if (lps->fdisc)
1707 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1708}
1709
Jing Huang5fbe25c2010-10-18 17:17:23 -07001710/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001711 * Received Clear virtual link event --direct call for fcs,
1712 * queue for others
1713 */
1714static void
1715bfa_lps_cvl_event(struct bfa_lps_s *lps)
1716{
1717 if (!lps->bfa->fcs) {
1718 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1719 lps);
1720 return;
1721 }
1722
1723 /* Clear virtual link to base port will result in link down */
1724 if (lps->fdisc)
1725 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1726}
1727
1728
1729
Jing Huang5fbe25c2010-10-18 17:17:23 -07001730/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001731 * lps_public BFA LPS public functions
1732 */
1733
1734u32
1735bfa_lps_get_max_vport(struct bfa_s *bfa)
1736{
1737 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1738 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1739 else
1740 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1741}
1742
Jing Huang5fbe25c2010-10-18 17:17:23 -07001743/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001744 * Allocate a lport srvice tag.
1745 */
1746struct bfa_lps_s *
1747bfa_lps_alloc(struct bfa_s *bfa)
1748{
1749 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1750 struct bfa_lps_s *lps = NULL;
1751
1752 bfa_q_deq(&mod->lps_free_q, &lps);
1753
1754 if (lps == NULL)
1755 return NULL;
1756
1757 list_add_tail(&lps->qe, &mod->lps_active_q);
1758
1759 bfa_sm_set_state(lps, bfa_lps_sm_init);
1760 return lps;
1761}
1762
Jing Huang5fbe25c2010-10-18 17:17:23 -07001763/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001764 * Free lport service tag. This can be called anytime after an alloc.
1765 * No need to wait for any pending login/logout completions.
1766 */
1767void
1768bfa_lps_delete(struct bfa_lps_s *lps)
1769{
1770 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1771}
1772
Jing Huang5fbe25c2010-10-18 17:17:23 -07001773/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001774 * Initiate a lport login.
1775 */
1776void
1777bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1778 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1779{
1780 lps->uarg = uarg;
1781 lps->alpa = alpa;
1782 lps->pdusz = pdusz;
1783 lps->pwwn = pwwn;
1784 lps->nwwn = nwwn;
1785 lps->fdisc = BFA_FALSE;
1786 lps->auth_en = auth_en;
1787 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1788}
1789
Jing Huang5fbe25c2010-10-18 17:17:23 -07001790/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001791 * Initiate a lport fdisc login.
1792 */
1793void
1794bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1795 wwn_t nwwn)
1796{
1797 lps->uarg = uarg;
1798 lps->alpa = 0;
1799 lps->pdusz = pdusz;
1800 lps->pwwn = pwwn;
1801 lps->nwwn = nwwn;
1802 lps->fdisc = BFA_TRUE;
1803 lps->auth_en = BFA_FALSE;
1804 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1805}
1806
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001807
Jing Huang5fbe25c2010-10-18 17:17:23 -07001808/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001809 * Initiate a lport FDSIC logout.
1810 */
1811void
1812bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1813{
1814 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1815}
1816
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001817
Jing Huang5fbe25c2010-10-18 17:17:23 -07001818/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001819 * Return lport services tag given the pid
1820 */
1821u8
1822bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1823{
1824 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1825 struct bfa_lps_s *lps;
1826 int i;
1827
1828 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1829 if (lps->lp_pid == pid)
1830 return lps->lp_tag;
1831 }
1832
1833 /* Return base port tag anyway */
1834 return 0;
1835}
1836
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001837
Jing Huang5fbe25c2010-10-18 17:17:23 -07001838/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001839 * return port id assigned to the base lport
1840 */
1841u32
1842bfa_lps_get_base_pid(struct bfa_s *bfa)
1843{
1844 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1845
1846 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1847}
1848
Jing Huang5fbe25c2010-10-18 17:17:23 -07001849/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001850 * LPS firmware message class handler.
1851 */
1852void
1853bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1854{
1855 union bfi_lps_i2h_msg_u msg;
1856
1857 bfa_trc(bfa, m->mhdr.msg_id);
1858 msg.msg = m;
1859
1860 switch (m->mhdr.msg_id) {
1861 case BFI_LPS_H2I_LOGIN_RSP:
1862 bfa_lps_login_rsp(bfa, msg.login_rsp);
1863 break;
1864
1865 case BFI_LPS_H2I_LOGOUT_RSP:
1866 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1867 break;
1868
1869 case BFI_LPS_H2I_CVL_EVENT:
1870 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1871 break;
1872
1873 default:
1874 bfa_trc(bfa, m->mhdr.msg_id);
1875 bfa_assert(0);
1876 }
1877}
1878
Jing Huang5fbe25c2010-10-18 17:17:23 -07001879/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001880 * FC PORT state machine functions
1881 */
1882static void
1883bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
1884 enum bfa_fcport_sm_event event)
1885{
1886 bfa_trc(fcport->bfa, event);
1887
1888 switch (event) {
1889 case BFA_FCPORT_SM_START:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001890 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001891 * Start event after IOC is configured and BFA is started.
1892 */
1893 if (bfa_fcport_send_enable(fcport)) {
1894 bfa_trc(fcport->bfa, BFA_TRUE);
1895 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1896 } else {
1897 bfa_trc(fcport->bfa, BFA_FALSE);
1898 bfa_sm_set_state(fcport,
1899 bfa_fcport_sm_enabling_qwait);
1900 }
1901 break;
1902
1903 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001904 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001905 * Port is persistently configured to be in enabled state. Do
1906 * not change state. Port enabling is done when START event is
1907 * received.
1908 */
1909 break;
1910
1911 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001912 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001913 * If a port is persistently configured to be disabled, the
1914 * first event will a port disable request.
1915 */
1916 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
1917 break;
1918
1919 case BFA_FCPORT_SM_HWFAIL:
1920 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
1921 break;
1922
1923 default:
1924 bfa_sm_fault(fcport->bfa, event);
1925 }
1926}
1927
1928static void
1929bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
1930 enum bfa_fcport_sm_event event)
1931{
1932 char pwwn_buf[BFA_STRING_32];
1933 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
1934 bfa_trc(fcport->bfa, event);
1935
1936 switch (event) {
1937 case BFA_FCPORT_SM_QRESUME:
1938 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1939 bfa_fcport_send_enable(fcport);
1940 break;
1941
1942 case BFA_FCPORT_SM_STOP:
1943 bfa_reqq_wcancel(&fcport->reqq_wait);
1944 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
1945 break;
1946
1947 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001948 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001949 * Already enable is in progress.
1950 */
1951 break;
1952
1953 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001954 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001955 * Just send disable request to firmware when room becomes
1956 * available in request queue.
1957 */
1958 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
1959 bfa_reqq_wcancel(&fcport->reqq_wait);
1960 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
1961 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
1962 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08001963 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001964 "Base port disabled: WWN = %s\n", pwwn_buf);
1965 break;
1966
1967 case BFA_FCPORT_SM_LINKUP:
1968 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001969 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001970 * Possible to get link events when doing back-to-back
1971 * enable/disables.
1972 */
1973 break;
1974
1975 case BFA_FCPORT_SM_HWFAIL:
1976 bfa_reqq_wcancel(&fcport->reqq_wait);
1977 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
1978 break;
1979
1980 default:
1981 bfa_sm_fault(fcport->bfa, event);
1982 }
1983}
1984
1985static void
1986bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
1987 enum bfa_fcport_sm_event event)
1988{
1989 char pwwn_buf[BFA_STRING_32];
1990 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
1991 bfa_trc(fcport->bfa, event);
1992
1993 switch (event) {
1994 case BFA_FCPORT_SM_FWRSP:
1995 case BFA_FCPORT_SM_LINKDOWN:
1996 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
1997 break;
1998
1999 case BFA_FCPORT_SM_LINKUP:
2000 bfa_fcport_update_linkinfo(fcport);
2001 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2002
2003 bfa_assert(fcport->event_cbfn);
2004 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2005 break;
2006
2007 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002008 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002009 * Already being enabled.
2010 */
2011 break;
2012
2013 case BFA_FCPORT_SM_DISABLE:
2014 if (bfa_fcport_send_disable(fcport))
2015 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2016 else
2017 bfa_sm_set_state(fcport,
2018 bfa_fcport_sm_disabling_qwait);
2019
2020 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2021 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2022 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002023 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002024 "Base port disabled: WWN = %s\n", pwwn_buf);
2025 break;
2026
2027 case BFA_FCPORT_SM_STOP:
2028 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2029 break;
2030
2031 case BFA_FCPORT_SM_HWFAIL:
2032 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2033 break;
2034
2035 default:
2036 bfa_sm_fault(fcport->bfa, event);
2037 }
2038}
2039
2040static void
2041bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2042 enum bfa_fcport_sm_event event)
2043{
2044 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2045 char pwwn_buf[BFA_STRING_32];
2046 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2047
2048 bfa_trc(fcport->bfa, event);
2049
2050 switch (event) {
2051 case BFA_FCPORT_SM_LINKUP:
2052 bfa_fcport_update_linkinfo(fcport);
2053 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2054 bfa_assert(fcport->event_cbfn);
2055 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2056 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2057 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2058
2059 bfa_trc(fcport->bfa,
2060 pevent->link_state.vc_fcf.fcf.fipenabled);
2061 bfa_trc(fcport->bfa,
2062 pevent->link_state.vc_fcf.fcf.fipfailed);
2063
2064 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2065 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2066 BFA_PL_EID_FIP_FCF_DISC, 0,
2067 "FIP FCF Discovery Failed");
2068 else
2069 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2070 BFA_PL_EID_FIP_FCF_DISC, 0,
2071 "FIP FCF Discovered");
2072 }
2073
2074 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2075 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002076 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002077 "Base port online: WWN = %s\n", pwwn_buf);
2078 break;
2079
2080 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002081 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002082 * Possible to get link down event.
2083 */
2084 break;
2085
2086 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002087 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002088 * Already enabled.
2089 */
2090 break;
2091
2092 case BFA_FCPORT_SM_DISABLE:
2093 if (bfa_fcport_send_disable(fcport))
2094 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2095 else
2096 bfa_sm_set_state(fcport,
2097 bfa_fcport_sm_disabling_qwait);
2098
2099 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2100 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2101 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002102 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002103 "Base port disabled: WWN = %s\n", pwwn_buf);
2104 break;
2105
2106 case BFA_FCPORT_SM_STOP:
2107 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2108 break;
2109
2110 case BFA_FCPORT_SM_HWFAIL:
2111 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2112 break;
2113
2114 default:
2115 bfa_sm_fault(fcport->bfa, event);
2116 }
2117}
2118
2119static void
2120bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2121 enum bfa_fcport_sm_event event)
2122{
2123 char pwwn_buf[BFA_STRING_32];
2124 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2125
2126 bfa_trc(fcport->bfa, event);
2127
2128 switch (event) {
2129 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002130 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002131 * Already enabled.
2132 */
2133 break;
2134
2135 case BFA_FCPORT_SM_DISABLE:
2136 if (bfa_fcport_send_disable(fcport))
2137 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2138 else
2139 bfa_sm_set_state(fcport,
2140 bfa_fcport_sm_disabling_qwait);
2141
2142 bfa_fcport_reset_linkinfo(fcport);
2143 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2144 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2145 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2146 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002147 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002148 "Base port offline: WWN = %s\n", pwwn_buf);
Jing Huang88166242010-12-09 17:11:53 -08002149 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002150 "Base port disabled: WWN = %s\n", pwwn_buf);
2151 break;
2152
2153 case BFA_FCPORT_SM_LINKDOWN:
2154 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2155 bfa_fcport_reset_linkinfo(fcport);
2156 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2157 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2158 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2159 wwn2str(pwwn_buf, fcport->pwwn);
2160 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002161 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002162 "Base port offline: WWN = %s\n", pwwn_buf);
2163 else
Jing Huang88166242010-12-09 17:11:53 -08002164 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002165 "Base port (WWN = %s) "
2166 "lost fabric connectivity\n", pwwn_buf);
2167 break;
2168
2169 case BFA_FCPORT_SM_STOP:
2170 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2171 bfa_fcport_reset_linkinfo(fcport);
2172 wwn2str(pwwn_buf, fcport->pwwn);
2173 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002174 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002175 "Base port offline: WWN = %s\n", pwwn_buf);
2176 else
Jing Huang88166242010-12-09 17:11:53 -08002177 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002178 "Base port (WWN = %s) "
2179 "lost fabric connectivity\n", pwwn_buf);
2180 break;
2181
2182 case BFA_FCPORT_SM_HWFAIL:
2183 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2184 bfa_fcport_reset_linkinfo(fcport);
2185 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2186 wwn2str(pwwn_buf, fcport->pwwn);
2187 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002188 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002189 "Base port offline: WWN = %s\n", pwwn_buf);
2190 else
Jing Huang88166242010-12-09 17:11:53 -08002191 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002192 "Base port (WWN = %s) "
2193 "lost fabric connectivity\n", pwwn_buf);
2194 break;
2195
2196 default:
2197 bfa_sm_fault(fcport->bfa, event);
2198 }
2199}
2200
2201static void
2202bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2203 enum bfa_fcport_sm_event event)
2204{
2205 bfa_trc(fcport->bfa, event);
2206
2207 switch (event) {
2208 case BFA_FCPORT_SM_QRESUME:
2209 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2210 bfa_fcport_send_disable(fcport);
2211 break;
2212
2213 case BFA_FCPORT_SM_STOP:
2214 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2215 bfa_reqq_wcancel(&fcport->reqq_wait);
2216 break;
2217
2218 case BFA_FCPORT_SM_ENABLE:
2219 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2220 break;
2221
2222 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002223 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002224 * Already being disabled.
2225 */
2226 break;
2227
2228 case BFA_FCPORT_SM_LINKUP:
2229 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002230 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002231 * Possible to get link events when doing back-to-back
2232 * enable/disables.
2233 */
2234 break;
2235
2236 case BFA_FCPORT_SM_HWFAIL:
2237 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2238 bfa_reqq_wcancel(&fcport->reqq_wait);
2239 break;
2240
2241 default:
2242 bfa_sm_fault(fcport->bfa, event);
2243 }
2244}
2245
2246static void
2247bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2248 enum bfa_fcport_sm_event event)
2249{
2250 bfa_trc(fcport->bfa, event);
2251
2252 switch (event) {
2253 case BFA_FCPORT_SM_QRESUME:
2254 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2255 bfa_fcport_send_disable(fcport);
2256 if (bfa_fcport_send_enable(fcport))
2257 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2258 else
2259 bfa_sm_set_state(fcport,
2260 bfa_fcport_sm_enabling_qwait);
2261 break;
2262
2263 case BFA_FCPORT_SM_STOP:
2264 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2265 bfa_reqq_wcancel(&fcport->reqq_wait);
2266 break;
2267
2268 case BFA_FCPORT_SM_ENABLE:
2269 break;
2270
2271 case BFA_FCPORT_SM_DISABLE:
2272 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2273 break;
2274
2275 case BFA_FCPORT_SM_LINKUP:
2276 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002277 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002278 * Possible to get link events when doing back-to-back
2279 * enable/disables.
2280 */
2281 break;
2282
2283 case BFA_FCPORT_SM_HWFAIL:
2284 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2285 bfa_reqq_wcancel(&fcport->reqq_wait);
2286 break;
2287
2288 default:
2289 bfa_sm_fault(fcport->bfa, event);
2290 }
2291}
2292
2293static void
2294bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2295 enum bfa_fcport_sm_event event)
2296{
2297 char pwwn_buf[BFA_STRING_32];
2298 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2299 bfa_trc(fcport->bfa, event);
2300
2301 switch (event) {
2302 case BFA_FCPORT_SM_FWRSP:
2303 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2304 break;
2305
2306 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002307 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002308 * Already being disabled.
2309 */
2310 break;
2311
2312 case BFA_FCPORT_SM_ENABLE:
2313 if (bfa_fcport_send_enable(fcport))
2314 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2315 else
2316 bfa_sm_set_state(fcport,
2317 bfa_fcport_sm_enabling_qwait);
2318
2319 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2320 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2321 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002322 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002323 "Base port enabled: WWN = %s\n", pwwn_buf);
2324 break;
2325
2326 case BFA_FCPORT_SM_STOP:
2327 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2328 break;
2329
2330 case BFA_FCPORT_SM_LINKUP:
2331 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002332 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002333 * Possible to get link events when doing back-to-back
2334 * enable/disables.
2335 */
2336 break;
2337
2338 case BFA_FCPORT_SM_HWFAIL:
2339 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2340 break;
2341
2342 default:
2343 bfa_sm_fault(fcport->bfa, event);
2344 }
2345}
2346
2347static void
2348bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2349 enum bfa_fcport_sm_event event)
2350{
2351 char pwwn_buf[BFA_STRING_32];
2352 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2353 bfa_trc(fcport->bfa, event);
2354
2355 switch (event) {
2356 case BFA_FCPORT_SM_START:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002357 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002358 * Ignore start event for a port that is disabled.
2359 */
2360 break;
2361
2362 case BFA_FCPORT_SM_STOP:
2363 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2364 break;
2365
2366 case BFA_FCPORT_SM_ENABLE:
2367 if (bfa_fcport_send_enable(fcport))
2368 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2369 else
2370 bfa_sm_set_state(fcport,
2371 bfa_fcport_sm_enabling_qwait);
2372
2373 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2374 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2375 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002376 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002377 "Base port enabled: WWN = %s\n", pwwn_buf);
2378 break;
2379
2380 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002381 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002382 * Already disabled.
2383 */
2384 break;
2385
2386 case BFA_FCPORT_SM_HWFAIL:
2387 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2388 break;
2389
2390 default:
2391 bfa_sm_fault(fcport->bfa, event);
2392 }
2393}
2394
2395static void
2396bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2397 enum bfa_fcport_sm_event event)
2398{
2399 bfa_trc(fcport->bfa, event);
2400
2401 switch (event) {
2402 case BFA_FCPORT_SM_START:
2403 if (bfa_fcport_send_enable(fcport))
2404 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2405 else
2406 bfa_sm_set_state(fcport,
2407 bfa_fcport_sm_enabling_qwait);
2408 break;
2409
2410 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002411 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002412 * Ignore all other events.
2413 */
2414 ;
2415 }
2416}
2417
Jing Huang5fbe25c2010-10-18 17:17:23 -07002418/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002419 * Port is enabled. IOC is down/failed.
2420 */
2421static void
2422bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2423 enum bfa_fcport_sm_event event)
2424{
2425 bfa_trc(fcport->bfa, event);
2426
2427 switch (event) {
2428 case BFA_FCPORT_SM_START:
2429 if (bfa_fcport_send_enable(fcport))
2430 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2431 else
2432 bfa_sm_set_state(fcport,
2433 bfa_fcport_sm_enabling_qwait);
2434 break;
2435
2436 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002437 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002438 * Ignore all events.
2439 */
2440 ;
2441 }
2442}
2443
Jing Huang5fbe25c2010-10-18 17:17:23 -07002444/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002445 * Port is disabled. IOC is down/failed.
2446 */
2447static void
2448bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2449 enum bfa_fcport_sm_event event)
2450{
2451 bfa_trc(fcport->bfa, event);
2452
2453 switch (event) {
2454 case BFA_FCPORT_SM_START:
2455 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2456 break;
2457
2458 case BFA_FCPORT_SM_ENABLE:
2459 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2460 break;
2461
2462 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002463 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002464 * Ignore all events.
2465 */
2466 ;
2467 }
2468}
2469
Jing Huang5fbe25c2010-10-18 17:17:23 -07002470/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002471 * Link state is down
2472 */
2473static void
2474bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2475 enum bfa_fcport_ln_sm_event event)
2476{
2477 bfa_trc(ln->fcport->bfa, event);
2478
2479 switch (event) {
2480 case BFA_FCPORT_LN_SM_LINKUP:
2481 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2482 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2483 break;
2484
2485 default:
2486 bfa_sm_fault(ln->fcport->bfa, event);
2487 }
2488}
2489
Jing Huang5fbe25c2010-10-18 17:17:23 -07002490/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002491 * Link state is waiting for down notification
2492 */
2493static void
2494bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2495 enum bfa_fcport_ln_sm_event event)
2496{
2497 bfa_trc(ln->fcport->bfa, event);
2498
2499 switch (event) {
2500 case BFA_FCPORT_LN_SM_LINKUP:
2501 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2502 break;
2503
2504 case BFA_FCPORT_LN_SM_NOTIFICATION:
2505 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2506 break;
2507
2508 default:
2509 bfa_sm_fault(ln->fcport->bfa, event);
2510 }
2511}
2512
Jing Huang5fbe25c2010-10-18 17:17:23 -07002513/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002514 * Link state is waiting for down notification and there is a pending up
2515 */
2516static void
2517bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2518 enum bfa_fcport_ln_sm_event event)
2519{
2520 bfa_trc(ln->fcport->bfa, event);
2521
2522 switch (event) {
2523 case BFA_FCPORT_LN_SM_LINKDOWN:
2524 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2525 break;
2526
2527 case BFA_FCPORT_LN_SM_NOTIFICATION:
2528 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2529 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2530 break;
2531
2532 default:
2533 bfa_sm_fault(ln->fcport->bfa, event);
2534 }
2535}
2536
Jing Huang5fbe25c2010-10-18 17:17:23 -07002537/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002538 * Link state is up
2539 */
2540static void
2541bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2542 enum bfa_fcport_ln_sm_event event)
2543{
2544 bfa_trc(ln->fcport->bfa, event);
2545
2546 switch (event) {
2547 case BFA_FCPORT_LN_SM_LINKDOWN:
2548 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2549 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2550 break;
2551
2552 default:
2553 bfa_sm_fault(ln->fcport->bfa, event);
2554 }
2555}
2556
Jing Huang5fbe25c2010-10-18 17:17:23 -07002557/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002558 * Link state is waiting for up notification
2559 */
2560static void
2561bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2562 enum bfa_fcport_ln_sm_event event)
2563{
2564 bfa_trc(ln->fcport->bfa, event);
2565
2566 switch (event) {
2567 case BFA_FCPORT_LN_SM_LINKDOWN:
2568 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2569 break;
2570
2571 case BFA_FCPORT_LN_SM_NOTIFICATION:
2572 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2573 break;
2574
2575 default:
2576 bfa_sm_fault(ln->fcport->bfa, event);
2577 }
2578}
2579
Jing Huang5fbe25c2010-10-18 17:17:23 -07002580/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002581 * Link state is waiting for up notification and there is a pending down
2582 */
2583static void
2584bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2585 enum bfa_fcport_ln_sm_event event)
2586{
2587 bfa_trc(ln->fcport->bfa, event);
2588
2589 switch (event) {
2590 case BFA_FCPORT_LN_SM_LINKUP:
2591 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2592 break;
2593
2594 case BFA_FCPORT_LN_SM_NOTIFICATION:
2595 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2596 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2597 break;
2598
2599 default:
2600 bfa_sm_fault(ln->fcport->bfa, event);
2601 }
2602}
2603
Jing Huang5fbe25c2010-10-18 17:17:23 -07002604/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002605 * Link state is waiting for up notification and there are pending down and up
2606 */
2607static void
2608bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2609 enum bfa_fcport_ln_sm_event event)
2610{
2611 bfa_trc(ln->fcport->bfa, event);
2612
2613 switch (event) {
2614 case BFA_FCPORT_LN_SM_LINKDOWN:
2615 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2616 break;
2617
2618 case BFA_FCPORT_LN_SM_NOTIFICATION:
2619 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2620 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2621 break;
2622
2623 default:
2624 bfa_sm_fault(ln->fcport->bfa, event);
2625 }
2626}
2627
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002628static void
2629__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2630{
2631 struct bfa_fcport_ln_s *ln = cbarg;
2632
2633 if (complete)
2634 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2635 else
2636 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2637}
2638
Jing Huang5fbe25c2010-10-18 17:17:23 -07002639/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002640 * Send SCN notification to upper layers.
2641 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2642 */
2643static void
2644bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2645 bfa_boolean_t trunk)
2646{
2647 if (fcport->cfg.trunked && !trunk)
2648 return;
2649
2650 switch (event) {
2651 case BFA_PORT_LINKUP:
2652 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2653 break;
2654 case BFA_PORT_LINKDOWN:
2655 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2656 break;
2657 default:
2658 bfa_assert(0);
2659 }
2660}
2661
2662static void
2663bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2664{
2665 struct bfa_fcport_s *fcport = ln->fcport;
2666
2667 if (fcport->bfa->fcs) {
2668 fcport->event_cbfn(fcport->event_cbarg, event);
2669 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2670 } else {
2671 ln->ln_event = event;
2672 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2673 __bfa_cb_fcport_event, ln);
2674 }
2675}
2676
2677#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2678 BFA_CACHELINE_SZ))
2679
2680static void
2681bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2682 u32 *dm_len)
2683{
2684 *dm_len += FCPORT_STATS_DMA_SZ;
2685}
2686
2687static void
2688bfa_fcport_qresume(void *cbarg)
2689{
2690 struct bfa_fcport_s *fcport = cbarg;
2691
2692 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2693}
2694
2695static void
2696bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2697{
2698 u8 *dm_kva;
2699 u64 dm_pa;
2700
2701 dm_kva = bfa_meminfo_dma_virt(meminfo);
2702 dm_pa = bfa_meminfo_dma_phys(meminfo);
2703
2704 fcport->stats_kva = dm_kva;
2705 fcport->stats_pa = dm_pa;
2706 fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
2707
2708 dm_kva += FCPORT_STATS_DMA_SZ;
2709 dm_pa += FCPORT_STATS_DMA_SZ;
2710
2711 bfa_meminfo_dma_virt(meminfo) = dm_kva;
2712 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2713}
2714
Jing Huang5fbe25c2010-10-18 17:17:23 -07002715/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002716 * Memory initialization.
2717 */
2718static void
2719bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2720 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2721{
2722 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2723 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2724 struct bfa_fcport_ln_s *ln = &fcport->ln;
Maggie Zhangf16a1752010-12-09 19:12:32 -08002725 struct timeval tv;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002726
Jing Huang6a18b162010-10-18 17:08:54 -07002727 memset(fcport, 0, sizeof(struct bfa_fcport_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002728 fcport->bfa = bfa;
2729 ln->fcport = fcport;
2730
2731 bfa_fcport_mem_claim(fcport, meminfo);
2732
2733 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2734 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2735
Jing Huang5fbe25c2010-10-18 17:17:23 -07002736 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002737 * initialize time stamp for stats reset
2738 */
Maggie Zhangf16a1752010-12-09 19:12:32 -08002739 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002740 fcport->stats_reset_time = tv.tv_sec;
2741
Jing Huang5fbe25c2010-10-18 17:17:23 -07002742 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002743 * initialize and set default configuration
2744 */
2745 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2746 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2747 port_cfg->trunked = BFA_FALSE;
2748 port_cfg->maxfrsize = 0;
2749
2750 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2751
2752 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2753}
2754
2755static void
2756bfa_fcport_detach(struct bfa_s *bfa)
2757{
2758}
2759
Jing Huang5fbe25c2010-10-18 17:17:23 -07002760/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002761 * Called when IOC is ready.
2762 */
2763static void
2764bfa_fcport_start(struct bfa_s *bfa)
2765{
2766 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2767}
2768
Jing Huang5fbe25c2010-10-18 17:17:23 -07002769/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002770 * Called before IOC is stopped.
2771 */
2772static void
2773bfa_fcport_stop(struct bfa_s *bfa)
2774{
2775 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2776 bfa_trunk_iocdisable(bfa);
2777}
2778
Jing Huang5fbe25c2010-10-18 17:17:23 -07002779/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002780 * Called when IOC failure is detected.
2781 */
2782static void
2783bfa_fcport_iocdisable(struct bfa_s *bfa)
2784{
2785 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2786
2787 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2788 bfa_trunk_iocdisable(bfa);
2789}
2790
2791static void
2792bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2793{
2794 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2795 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2796
2797 fcport->speed = pevent->link_state.speed;
2798 fcport->topology = pevent->link_state.topology;
2799
2800 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2801 fcport->myalpa = 0;
2802
2803 /* QoS Details */
Jing Huang6a18b162010-10-18 17:08:54 -07002804 fcport->qos_attr = pevent->link_state.qos_attr;
2805 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002806
Jing Huang5fbe25c2010-10-18 17:17:23 -07002807 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002808 * update trunk state if applicable
2809 */
2810 if (!fcport->cfg.trunked)
2811 trunk->attr.state = BFA_TRUNK_DISABLED;
2812
2813 /* update FCoE specific */
Jing Huangba816ea2010-10-18 17:10:50 -07002814 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002815
2816 bfa_trc(fcport->bfa, fcport->speed);
2817 bfa_trc(fcport->bfa, fcport->topology);
2818}
2819
2820static void
2821bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2822{
2823 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2824 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2825}
2826
Jing Huang5fbe25c2010-10-18 17:17:23 -07002827/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002828 * Send port enable message to firmware.
2829 */
2830static bfa_boolean_t
2831bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2832{
2833 struct bfi_fcport_enable_req_s *m;
2834
Jing Huang5fbe25c2010-10-18 17:17:23 -07002835 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002836 * Increment message tag before queue check, so that responses to old
2837 * requests are discarded.
2838 */
2839 fcport->msgtag++;
2840
Jing Huang5fbe25c2010-10-18 17:17:23 -07002841 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002842 * check for room in queue to send request now
2843 */
2844 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2845 if (!m) {
2846 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2847 &fcport->reqq_wait);
2848 return BFA_FALSE;
2849 }
2850
2851 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
2852 bfa_lpuid(fcport->bfa));
2853 m->nwwn = fcport->nwwn;
2854 m->pwwn = fcport->pwwn;
2855 m->port_cfg = fcport->cfg;
2856 m->msgtag = fcport->msgtag;
Jing Huangba816ea2010-10-18 17:10:50 -07002857 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002858 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
2859 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
2860 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
2861
Jing Huang5fbe25c2010-10-18 17:17:23 -07002862 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002863 * queue I/O message to firmware
2864 */
2865 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2866 return BFA_TRUE;
2867}
2868
Jing Huang5fbe25c2010-10-18 17:17:23 -07002869/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002870 * Send port disable message to firmware.
2871 */
2872static bfa_boolean_t
2873bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
2874{
2875 struct bfi_fcport_req_s *m;
2876
Jing Huang5fbe25c2010-10-18 17:17:23 -07002877 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002878 * Increment message tag before queue check, so that responses to old
2879 * requests are discarded.
2880 */
2881 fcport->msgtag++;
2882
Jing Huang5fbe25c2010-10-18 17:17:23 -07002883 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002884 * check for room in queue to send request now
2885 */
2886 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2887 if (!m) {
2888 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2889 &fcport->reqq_wait);
2890 return BFA_FALSE;
2891 }
2892
2893 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
2894 bfa_lpuid(fcport->bfa));
2895 m->msgtag = fcport->msgtag;
2896
Jing Huang5fbe25c2010-10-18 17:17:23 -07002897 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002898 * queue I/O message to firmware
2899 */
2900 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2901
2902 return BFA_TRUE;
2903}
2904
2905static void
2906bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
2907{
Maggie Zhangf7f738122010-12-09 19:08:43 -08002908 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
2909 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002910
2911 bfa_trc(fcport->bfa, fcport->pwwn);
2912 bfa_trc(fcport->bfa, fcport->nwwn);
2913}
2914
2915static void
2916bfa_fcport_send_txcredit(void *port_cbarg)
2917{
2918
2919 struct bfa_fcport_s *fcport = port_cbarg;
2920 struct bfi_fcport_set_svc_params_req_s *m;
2921
Jing Huang5fbe25c2010-10-18 17:17:23 -07002922 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002923 * check for room in queue to send request now
2924 */
2925 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2926 if (!m) {
2927 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
2928 return;
2929 }
2930
2931 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
2932 bfa_lpuid(fcport->bfa));
Jing Huangba816ea2010-10-18 17:10:50 -07002933 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002934
Jing Huang5fbe25c2010-10-18 17:17:23 -07002935 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002936 * queue I/O message to firmware
2937 */
2938 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2939}
2940
2941static void
2942bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
2943 struct bfa_qos_stats_s *s)
2944{
2945 u32 *dip = (u32 *) d;
Maggie50444a32010-11-29 18:26:32 -08002946 __be32 *sip = (__be32 *) s;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002947 int i;
2948
2949 /* Now swap the 32 bit fields */
2950 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
Jing Huangba816ea2010-10-18 17:10:50 -07002951 dip[i] = be32_to_cpu(sip[i]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002952}
2953
2954static void
2955bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
2956 struct bfa_fcoe_stats_s *s)
2957{
2958 u32 *dip = (u32 *) d;
Maggie50444a32010-11-29 18:26:32 -08002959 __be32 *sip = (__be32 *) s;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002960 int i;
2961
2962 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
2963 i = i + 2) {
Maggie Zhangf16a1752010-12-09 19:12:32 -08002964#ifdef __BIG_ENDIAN
Jing Huangba816ea2010-10-18 17:10:50 -07002965 dip[i] = be32_to_cpu(sip[i]);
2966 dip[i + 1] = be32_to_cpu(sip[i + 1]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002967#else
Jing Huangba816ea2010-10-18 17:10:50 -07002968 dip[i] = be32_to_cpu(sip[i + 1]);
2969 dip[i + 1] = be32_to_cpu(sip[i]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002970#endif
2971 }
2972}
2973
2974static void
2975__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
2976{
2977 struct bfa_fcport_s *fcport = cbarg;
2978
2979 if (complete) {
2980 if (fcport->stats_status == BFA_STATUS_OK) {
Maggie Zhangf16a1752010-12-09 19:12:32 -08002981 struct timeval tv;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002982
2983 /* Swap FC QoS or FCoE stats */
2984 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2985 bfa_fcport_qos_stats_swap(
2986 &fcport->stats_ret->fcqos,
2987 &fcport->stats->fcqos);
2988 } else {
2989 bfa_fcport_fcoe_stats_swap(
2990 &fcport->stats_ret->fcoe,
2991 &fcport->stats->fcoe);
2992
Maggie Zhangf16a1752010-12-09 19:12:32 -08002993 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002994 fcport->stats_ret->fcoe.secs_reset =
2995 tv.tv_sec - fcport->stats_reset_time;
2996 }
2997 }
2998 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
2999 } else {
3000 fcport->stats_busy = BFA_FALSE;
3001 fcport->stats_status = BFA_STATUS_OK;
3002 }
3003}
3004
3005static void
3006bfa_fcport_stats_get_timeout(void *cbarg)
3007{
3008 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3009
3010 bfa_trc(fcport->bfa, fcport->stats_qfull);
3011
3012 if (fcport->stats_qfull) {
3013 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3014 fcport->stats_qfull = BFA_FALSE;
3015 }
3016
3017 fcport->stats_status = BFA_STATUS_ETIMER;
3018 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3019 fcport);
3020}
3021
3022static void
3023bfa_fcport_send_stats_get(void *cbarg)
3024{
3025 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3026 struct bfi_fcport_req_s *msg;
3027
3028 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3029
3030 if (!msg) {
3031 fcport->stats_qfull = BFA_TRUE;
3032 bfa_reqq_winit(&fcport->stats_reqq_wait,
3033 bfa_fcport_send_stats_get, fcport);
3034 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3035 &fcport->stats_reqq_wait);
3036 return;
3037 }
3038 fcport->stats_qfull = BFA_FALSE;
3039
Jing Huang6a18b162010-10-18 17:08:54 -07003040 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003041 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3042 bfa_lpuid(fcport->bfa));
3043 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3044}
3045
3046static void
3047__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3048{
3049 struct bfa_fcport_s *fcport = cbarg;
3050
3051 if (complete) {
Maggie Zhangf16a1752010-12-09 19:12:32 -08003052 struct timeval tv;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003053
Jing Huang5fbe25c2010-10-18 17:17:23 -07003054 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003055 * re-initialize time stamp for stats reset
3056 */
Maggie Zhangf16a1752010-12-09 19:12:32 -08003057 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003058 fcport->stats_reset_time = tv.tv_sec;
3059
3060 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3061 } else {
3062 fcport->stats_busy = BFA_FALSE;
3063 fcport->stats_status = BFA_STATUS_OK;
3064 }
3065}
3066
3067static void
3068bfa_fcport_stats_clr_timeout(void *cbarg)
3069{
3070 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3071
3072 bfa_trc(fcport->bfa, fcport->stats_qfull);
3073
3074 if (fcport->stats_qfull) {
3075 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3076 fcport->stats_qfull = BFA_FALSE;
3077 }
3078
3079 fcport->stats_status = BFA_STATUS_ETIMER;
3080 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3081 __bfa_cb_fcport_stats_clr, fcport);
3082}
3083
3084static void
3085bfa_fcport_send_stats_clear(void *cbarg)
3086{
3087 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3088 struct bfi_fcport_req_s *msg;
3089
3090 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3091
3092 if (!msg) {
3093 fcport->stats_qfull = BFA_TRUE;
3094 bfa_reqq_winit(&fcport->stats_reqq_wait,
3095 bfa_fcport_send_stats_clear, fcport);
3096 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3097 &fcport->stats_reqq_wait);
3098 return;
3099 }
3100 fcport->stats_qfull = BFA_FALSE;
3101
Jing Huang6a18b162010-10-18 17:08:54 -07003102 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003103 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3104 bfa_lpuid(fcport->bfa));
3105 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3106}
3107
Jing Huang5fbe25c2010-10-18 17:17:23 -07003108/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003109 * Handle trunk SCN event from firmware.
3110 */
3111static void
3112bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3113{
3114 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3115 struct bfi_fcport_trunk_link_s *tlink;
3116 struct bfa_trunk_link_attr_s *lattr;
3117 enum bfa_trunk_state state_prev;
3118 int i;
3119 int link_bm = 0;
3120
3121 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3122 bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
3123 scn->trunk_state == BFA_TRUNK_OFFLINE);
3124
3125 bfa_trc(fcport->bfa, trunk->attr.state);
3126 bfa_trc(fcport->bfa, scn->trunk_state);
3127 bfa_trc(fcport->bfa, scn->trunk_speed);
3128
Jing Huang5fbe25c2010-10-18 17:17:23 -07003129 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003130 * Save off new state for trunk attribute query
3131 */
3132 state_prev = trunk->attr.state;
3133 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3134 trunk->attr.state = scn->trunk_state;
3135 trunk->attr.speed = scn->trunk_speed;
3136 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3137 lattr = &trunk->attr.link_attr[i];
3138 tlink = &scn->tlink[i];
3139
3140 lattr->link_state = tlink->state;
3141 lattr->trunk_wwn = tlink->trunk_wwn;
3142 lattr->fctl = tlink->fctl;
3143 lattr->speed = tlink->speed;
Jing Huangba816ea2010-10-18 17:10:50 -07003144 lattr->deskew = be32_to_cpu(tlink->deskew);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003145
3146 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3147 fcport->speed = tlink->speed;
3148 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3149 link_bm |= 1 << i;
3150 }
3151
3152 bfa_trc(fcport->bfa, lattr->link_state);
3153 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3154 bfa_trc(fcport->bfa, lattr->fctl);
3155 bfa_trc(fcport->bfa, lattr->speed);
3156 bfa_trc(fcport->bfa, lattr->deskew);
3157 }
3158
3159 switch (link_bm) {
3160 case 3:
3161 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3162 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3163 break;
3164 case 2:
3165 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3166 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3167 break;
3168 case 1:
3169 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3170 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3171 break;
3172 default:
3173 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3174 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3175 }
3176
Jing Huang5fbe25c2010-10-18 17:17:23 -07003177 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003178 * Notify upper layers if trunk state changed.
3179 */
3180 if ((state_prev != trunk->attr.state) ||
3181 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3182 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3183 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3184 }
3185}
3186
3187static void
3188bfa_trunk_iocdisable(struct bfa_s *bfa)
3189{
3190 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3191 int i = 0;
3192
Jing Huang5fbe25c2010-10-18 17:17:23 -07003193 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003194 * In trunked mode, notify upper layers that link is down
3195 */
3196 if (fcport->cfg.trunked) {
3197 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3198 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3199
3200 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3201 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3202 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3203 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3204 fcport->trunk.attr.link_attr[i].fctl =
3205 BFA_TRUNK_LINK_FCTL_NORMAL;
3206 fcport->trunk.attr.link_attr[i].link_state =
3207 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3208 fcport->trunk.attr.link_attr[i].speed =
3209 BFA_PORT_SPEED_UNKNOWN;
3210 fcport->trunk.attr.link_attr[i].deskew = 0;
3211 }
3212 }
3213}
3214
Jing Huang5fbe25c2010-10-18 17:17:23 -07003215/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003216 * Called to initialize port attributes
3217 */
3218void
3219bfa_fcport_init(struct bfa_s *bfa)
3220{
3221 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3222
Jing Huang5fbe25c2010-10-18 17:17:23 -07003223 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003224 * Initialize port attributes from IOC hardware data.
3225 */
3226 bfa_fcport_set_wwns(fcport);
3227 if (fcport->cfg.maxfrsize == 0)
3228 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3229 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3230 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3231
3232 bfa_assert(fcport->cfg.maxfrsize);
3233 bfa_assert(fcport->cfg.rx_bbcredit);
3234 bfa_assert(fcport->speed_sup);
3235}
3236
Jing Huang5fbe25c2010-10-18 17:17:23 -07003237/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003238 * Firmware message handler.
3239 */
3240void
3241bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3242{
3243 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3244 union bfi_fcport_i2h_msg_u i2hmsg;
3245
3246 i2hmsg.msg = msg;
3247 fcport->event_arg.i2hmsg = i2hmsg;
3248
3249 bfa_trc(bfa, msg->mhdr.msg_id);
3250 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3251
3252 switch (msg->mhdr.msg_id) {
3253 case BFI_FCPORT_I2H_ENABLE_RSP:
3254 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3255 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3256 break;
3257
3258 case BFI_FCPORT_I2H_DISABLE_RSP:
3259 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3260 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3261 break;
3262
3263 case BFI_FCPORT_I2H_EVENT:
3264 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3265 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3266 else
3267 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3268 break;
3269
3270 case BFI_FCPORT_I2H_TRUNK_SCN:
3271 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3272 break;
3273
3274 case BFI_FCPORT_I2H_STATS_GET_RSP:
3275 /*
3276 * check for timer pop before processing the rsp
3277 */
3278 if (fcport->stats_busy == BFA_FALSE ||
3279 fcport->stats_status == BFA_STATUS_ETIMER)
3280 break;
3281
3282 bfa_timer_stop(&fcport->timer);
3283 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3284 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3285 __bfa_cb_fcport_stats_get, fcport);
3286 break;
3287
3288 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3289 /*
3290 * check for timer pop before processing the rsp
3291 */
3292 if (fcport->stats_busy == BFA_FALSE ||
3293 fcport->stats_status == BFA_STATUS_ETIMER)
3294 break;
3295
3296 bfa_timer_stop(&fcport->timer);
3297 fcport->stats_status = BFA_STATUS_OK;
3298 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3299 __bfa_cb_fcport_stats_clr, fcport);
3300 break;
3301
3302 case BFI_FCPORT_I2H_ENABLE_AEN:
3303 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3304 break;
3305
3306 case BFI_FCPORT_I2H_DISABLE_AEN:
3307 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3308 break;
3309
3310 default:
3311 bfa_assert(0);
3312 break;
3313 }
3314}
3315
Jing Huang5fbe25c2010-10-18 17:17:23 -07003316/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003317 * Registered callback for port events.
3318 */
3319void
3320bfa_fcport_event_register(struct bfa_s *bfa,
3321 void (*cbfn) (void *cbarg,
3322 enum bfa_port_linkstate event),
3323 void *cbarg)
3324{
3325 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3326
3327 fcport->event_cbfn = cbfn;
3328 fcport->event_cbarg = cbarg;
3329}
3330
3331bfa_status_t
3332bfa_fcport_enable(struct bfa_s *bfa)
3333{
3334 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3335
3336 if (bfa_ioc_is_disabled(&bfa->ioc))
3337 return BFA_STATUS_IOC_DISABLED;
3338
3339 if (fcport->diag_busy)
3340 return BFA_STATUS_DIAG_BUSY;
3341
3342 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3343 return BFA_STATUS_OK;
3344}
3345
3346bfa_status_t
3347bfa_fcport_disable(struct bfa_s *bfa)
3348{
3349
3350 if (bfa_ioc_is_disabled(&bfa->ioc))
3351 return BFA_STATUS_IOC_DISABLED;
3352
3353 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3354 return BFA_STATUS_OK;
3355}
3356
Jing Huang5fbe25c2010-10-18 17:17:23 -07003357/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003358 * Configure port speed.
3359 */
3360bfa_status_t
3361bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3362{
3363 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3364
3365 bfa_trc(bfa, speed);
3366
3367 if (fcport->cfg.trunked == BFA_TRUE)
3368 return BFA_STATUS_TRUNK_ENABLED;
3369 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3370 bfa_trc(bfa, fcport->speed_sup);
3371 return BFA_STATUS_UNSUPP_SPEED;
3372 }
3373
3374 fcport->cfg.speed = speed;
3375
3376 return BFA_STATUS_OK;
3377}
3378
Jing Huang5fbe25c2010-10-18 17:17:23 -07003379/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003380 * Get current speed.
3381 */
3382enum bfa_port_speed
3383bfa_fcport_get_speed(struct bfa_s *bfa)
3384{
3385 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3386
3387 return fcport->speed;
3388}
3389
Jing Huang5fbe25c2010-10-18 17:17:23 -07003390/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003391 * Configure port topology.
3392 */
3393bfa_status_t
3394bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3395{
3396 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3397
3398 bfa_trc(bfa, topology);
3399 bfa_trc(bfa, fcport->cfg.topology);
3400
3401 switch (topology) {
3402 case BFA_PORT_TOPOLOGY_P2P:
3403 case BFA_PORT_TOPOLOGY_LOOP:
3404 case BFA_PORT_TOPOLOGY_AUTO:
3405 break;
3406
3407 default:
3408 return BFA_STATUS_EINVAL;
3409 }
3410
3411 fcport->cfg.topology = topology;
3412 return BFA_STATUS_OK;
3413}
3414
Jing Huang5fbe25c2010-10-18 17:17:23 -07003415/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003416 * Get current topology.
3417 */
3418enum bfa_port_topology
3419bfa_fcport_get_topology(struct bfa_s *bfa)
3420{
3421 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3422
3423 return fcport->topology;
3424}
3425
3426bfa_status_t
3427bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3428{
3429 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3430
3431 bfa_trc(bfa, alpa);
3432 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3433 bfa_trc(bfa, fcport->cfg.hardalpa);
3434
3435 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3436 fcport->cfg.hardalpa = alpa;
3437
3438 return BFA_STATUS_OK;
3439}
3440
3441bfa_status_t
3442bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3443{
3444 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3445
3446 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3447 bfa_trc(bfa, fcport->cfg.hardalpa);
3448
3449 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3450 return BFA_STATUS_OK;
3451}
3452
3453bfa_boolean_t
3454bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3455{
3456 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3457
3458 *alpa = fcport->cfg.hardalpa;
3459 return fcport->cfg.cfg_hardalpa;
3460}
3461
3462u8
3463bfa_fcport_get_myalpa(struct bfa_s *bfa)
3464{
3465 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3466
3467 return fcport->myalpa;
3468}
3469
3470bfa_status_t
3471bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3472{
3473 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3474
3475 bfa_trc(bfa, maxfrsize);
3476 bfa_trc(bfa, fcport->cfg.maxfrsize);
3477
3478 /* with in range */
3479 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3480 return BFA_STATUS_INVLD_DFSZ;
3481
3482 /* power of 2, if not the max frame size of 2112 */
3483 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3484 return BFA_STATUS_INVLD_DFSZ;
3485
3486 fcport->cfg.maxfrsize = maxfrsize;
3487 return BFA_STATUS_OK;
3488}
3489
3490u16
3491bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3492{
3493 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3494
3495 return fcport->cfg.maxfrsize;
3496}
3497
3498u8
3499bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3500{
3501 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3502
3503 return fcport->cfg.rx_bbcredit;
3504}
3505
3506void
3507bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3508{
3509 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3510
3511 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3512 bfa_fcport_send_txcredit(fcport);
3513}
3514
Jing Huang5fbe25c2010-10-18 17:17:23 -07003515/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003516 * Get port attributes.
3517 */
3518
3519wwn_t
3520bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3521{
3522 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3523 if (node)
3524 return fcport->nwwn;
3525 else
3526 return fcport->pwwn;
3527}
3528
3529void
3530bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3531{
3532 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3533
Jing Huang6a18b162010-10-18 17:08:54 -07003534 memset(attr, 0, sizeof(struct bfa_port_attr_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003535
3536 attr->nwwn = fcport->nwwn;
3537 attr->pwwn = fcport->pwwn;
3538
Maggie Zhangf7f738122010-12-09 19:08:43 -08003539 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3540 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003541
Jing Huang6a18b162010-10-18 17:08:54 -07003542 memcpy(&attr->pport_cfg, &fcport->cfg,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003543 sizeof(struct bfa_port_cfg_s));
3544 /* speed attributes */
3545 attr->pport_cfg.speed = fcport->cfg.speed;
3546 attr->speed_supported = fcport->speed_sup;
3547 attr->speed = fcport->speed;
3548 attr->cos_supported = FC_CLASS_3;
3549
3550 /* topology attributes */
3551 attr->pport_cfg.topology = fcport->cfg.topology;
3552 attr->topology = fcport->topology;
3553 attr->pport_cfg.trunked = fcport->cfg.trunked;
3554
3555 /* beacon attributes */
3556 attr->beacon = fcport->beacon;
3557 attr->link_e2e_beacon = fcport->link_e2e_beacon;
Maggie Zhangf7f738122010-12-09 19:08:43 -08003558 attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003559 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3560
3561 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3562 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3563 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3564 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3565 attr->port_state = BFA_PORT_ST_IOCDIS;
3566 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3567 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3568
3569 /* FCoE vlan */
3570 attr->fcoe_vlan = fcport->fcoe_vlan;
3571}
3572
3573#define BFA_FCPORT_STATS_TOV 1000
3574
Jing Huang5fbe25c2010-10-18 17:17:23 -07003575/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003576 * Fetch port statistics (FCQoS or FCoE).
3577 */
3578bfa_status_t
3579bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3580 bfa_cb_port_t cbfn, void *cbarg)
3581{
3582 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3583
3584 if (fcport->stats_busy) {
3585 bfa_trc(bfa, fcport->stats_busy);
3586 return BFA_STATUS_DEVBUSY;
3587 }
3588
3589 fcport->stats_busy = BFA_TRUE;
3590 fcport->stats_ret = stats;
3591 fcport->stats_cbfn = cbfn;
3592 fcport->stats_cbarg = cbarg;
3593
3594 bfa_fcport_send_stats_get(fcport);
3595
3596 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3597 fcport, BFA_FCPORT_STATS_TOV);
3598 return BFA_STATUS_OK;
3599}
3600
Jing Huang5fbe25c2010-10-18 17:17:23 -07003601/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003602 * Reset port statistics (FCQoS or FCoE).
3603 */
3604bfa_status_t
3605bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3606{
3607 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3608
3609 if (fcport->stats_busy) {
3610 bfa_trc(bfa, fcport->stats_busy);
3611 return BFA_STATUS_DEVBUSY;
3612 }
3613
3614 fcport->stats_busy = BFA_TRUE;
3615 fcport->stats_cbfn = cbfn;
3616 fcport->stats_cbarg = cbarg;
3617
3618 bfa_fcport_send_stats_clear(fcport);
3619
3620 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3621 fcport, BFA_FCPORT_STATS_TOV);
3622 return BFA_STATUS_OK;
3623}
3624
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003625
Jing Huang5fbe25c2010-10-18 17:17:23 -07003626/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003627 * Fetch port attributes.
3628 */
3629bfa_boolean_t
3630bfa_fcport_is_disabled(struct bfa_s *bfa)
3631{
3632 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3633
3634 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3635 BFA_PORT_ST_DISABLED;
3636
3637}
3638
3639bfa_boolean_t
3640bfa_fcport_is_ratelim(struct bfa_s *bfa)
3641{
3642 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3643
3644 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3645
3646}
3647
Jing Huang5fbe25c2010-10-18 17:17:23 -07003648/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003649 * Get default minimum ratelim speed
3650 */
3651enum bfa_port_speed
3652bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3653{
3654 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3655
3656 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3657 return fcport->cfg.trl_def_speed;
3658
3659}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003660
3661bfa_boolean_t
3662bfa_fcport_is_linkup(struct bfa_s *bfa)
3663{
3664 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3665
3666 return (!fcport->cfg.trunked &&
3667 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3668 (fcport->cfg.trunked &&
3669 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3670}
3671
3672bfa_boolean_t
3673bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3674{
3675 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3676
3677 return fcport->cfg.qos_enabled;
3678}
3679
Jing Huang5fbe25c2010-10-18 17:17:23 -07003680/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003681 * Rport State machine functions
3682 */
Jing Huang5fbe25c2010-10-18 17:17:23 -07003683/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003684 * Beginning state, only online event expected.
3685 */
3686static void
3687bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3688{
3689 bfa_trc(rp->bfa, rp->rport_tag);
3690 bfa_trc(rp->bfa, event);
3691
3692 switch (event) {
3693 case BFA_RPORT_SM_CREATE:
3694 bfa_stats(rp, sm_un_cr);
3695 bfa_sm_set_state(rp, bfa_rport_sm_created);
3696 break;
3697
3698 default:
3699 bfa_stats(rp, sm_un_unexp);
3700 bfa_sm_fault(rp->bfa, event);
3701 }
3702}
3703
3704static void
3705bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3706{
3707 bfa_trc(rp->bfa, rp->rport_tag);
3708 bfa_trc(rp->bfa, event);
3709
3710 switch (event) {
3711 case BFA_RPORT_SM_ONLINE:
3712 bfa_stats(rp, sm_cr_on);
3713 if (bfa_rport_send_fwcreate(rp))
3714 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3715 else
3716 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3717 break;
3718
3719 case BFA_RPORT_SM_DELETE:
3720 bfa_stats(rp, sm_cr_del);
3721 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3722 bfa_rport_free(rp);
3723 break;
3724
3725 case BFA_RPORT_SM_HWFAIL:
3726 bfa_stats(rp, sm_cr_hwf);
3727 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3728 break;
3729
3730 default:
3731 bfa_stats(rp, sm_cr_unexp);
3732 bfa_sm_fault(rp->bfa, event);
3733 }
3734}
3735
Jing Huang5fbe25c2010-10-18 17:17:23 -07003736/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003737 * Waiting for rport create response from firmware.
3738 */
3739static void
3740bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3741{
3742 bfa_trc(rp->bfa, rp->rport_tag);
3743 bfa_trc(rp->bfa, event);
3744
3745 switch (event) {
3746 case BFA_RPORT_SM_FWRSP:
3747 bfa_stats(rp, sm_fwc_rsp);
3748 bfa_sm_set_state(rp, bfa_rport_sm_online);
3749 bfa_rport_online_cb(rp);
3750 break;
3751
3752 case BFA_RPORT_SM_DELETE:
3753 bfa_stats(rp, sm_fwc_del);
3754 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3755 break;
3756
3757 case BFA_RPORT_SM_OFFLINE:
3758 bfa_stats(rp, sm_fwc_off);
3759 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
3760 break;
3761
3762 case BFA_RPORT_SM_HWFAIL:
3763 bfa_stats(rp, sm_fwc_hwf);
3764 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3765 break;
3766
3767 default:
3768 bfa_stats(rp, sm_fwc_unexp);
3769 bfa_sm_fault(rp->bfa, event);
3770 }
3771}
3772
Jing Huang5fbe25c2010-10-18 17:17:23 -07003773/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003774 * Request queue is full, awaiting queue resume to send create request.
3775 */
3776static void
3777bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3778{
3779 bfa_trc(rp->bfa, rp->rport_tag);
3780 bfa_trc(rp->bfa, event);
3781
3782 switch (event) {
3783 case BFA_RPORT_SM_QRESUME:
3784 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3785 bfa_rport_send_fwcreate(rp);
3786 break;
3787
3788 case BFA_RPORT_SM_DELETE:
3789 bfa_stats(rp, sm_fwc_del);
3790 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3791 bfa_reqq_wcancel(&rp->reqq_wait);
3792 bfa_rport_free(rp);
3793 break;
3794
3795 case BFA_RPORT_SM_OFFLINE:
3796 bfa_stats(rp, sm_fwc_off);
3797 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3798 bfa_reqq_wcancel(&rp->reqq_wait);
3799 bfa_rport_offline_cb(rp);
3800 break;
3801
3802 case BFA_RPORT_SM_HWFAIL:
3803 bfa_stats(rp, sm_fwc_hwf);
3804 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3805 bfa_reqq_wcancel(&rp->reqq_wait);
3806 break;
3807
3808 default:
3809 bfa_stats(rp, sm_fwc_unexp);
3810 bfa_sm_fault(rp->bfa, event);
3811 }
3812}
3813
Jing Huang5fbe25c2010-10-18 17:17:23 -07003814/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003815 * Online state - normal parking state.
3816 */
3817static void
3818bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
3819{
3820 struct bfi_rport_qos_scn_s *qos_scn;
3821
3822 bfa_trc(rp->bfa, rp->rport_tag);
3823 bfa_trc(rp->bfa, event);
3824
3825 switch (event) {
3826 case BFA_RPORT_SM_OFFLINE:
3827 bfa_stats(rp, sm_on_off);
3828 if (bfa_rport_send_fwdelete(rp))
3829 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3830 else
3831 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
3832 break;
3833
3834 case BFA_RPORT_SM_DELETE:
3835 bfa_stats(rp, sm_on_del);
3836 if (bfa_rport_send_fwdelete(rp))
3837 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3838 else
3839 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3840 break;
3841
3842 case BFA_RPORT_SM_HWFAIL:
3843 bfa_stats(rp, sm_on_hwf);
3844 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3845 break;
3846
3847 case BFA_RPORT_SM_SET_SPEED:
3848 bfa_rport_send_fwspeed(rp);
3849 break;
3850
3851 case BFA_RPORT_SM_QOS_SCN:
3852 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
3853 rp->qos_attr = qos_scn->new_qos_attr;
3854 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
3855 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
3856 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
3857 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
3858
3859 qos_scn->old_qos_attr.qos_flow_id =
Jing Huangba816ea2010-10-18 17:10:50 -07003860 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003861 qos_scn->new_qos_attr.qos_flow_id =
Jing Huangba816ea2010-10-18 17:10:50 -07003862 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003863
3864 if (qos_scn->old_qos_attr.qos_flow_id !=
3865 qos_scn->new_qos_attr.qos_flow_id)
3866 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
3867 qos_scn->old_qos_attr,
3868 qos_scn->new_qos_attr);
3869 if (qos_scn->old_qos_attr.qos_priority !=
3870 qos_scn->new_qos_attr.qos_priority)
3871 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
3872 qos_scn->old_qos_attr,
3873 qos_scn->new_qos_attr);
3874 break;
3875
3876 default:
3877 bfa_stats(rp, sm_on_unexp);
3878 bfa_sm_fault(rp->bfa, event);
3879 }
3880}
3881
Jing Huang5fbe25c2010-10-18 17:17:23 -07003882/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003883 * Firmware rport is being deleted - awaiting f/w response.
3884 */
3885static void
3886bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
3887{
3888 bfa_trc(rp->bfa, rp->rport_tag);
3889 bfa_trc(rp->bfa, event);
3890
3891 switch (event) {
3892 case BFA_RPORT_SM_FWRSP:
3893 bfa_stats(rp, sm_fwd_rsp);
3894 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3895 bfa_rport_offline_cb(rp);
3896 break;
3897
3898 case BFA_RPORT_SM_DELETE:
3899 bfa_stats(rp, sm_fwd_del);
3900 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3901 break;
3902
3903 case BFA_RPORT_SM_HWFAIL:
3904 bfa_stats(rp, sm_fwd_hwf);
3905 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3906 bfa_rport_offline_cb(rp);
3907 break;
3908
3909 default:
3910 bfa_stats(rp, sm_fwd_unexp);
3911 bfa_sm_fault(rp->bfa, event);
3912 }
3913}
3914
3915static void
3916bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3917{
3918 bfa_trc(rp->bfa, rp->rport_tag);
3919 bfa_trc(rp->bfa, event);
3920
3921 switch (event) {
3922 case BFA_RPORT_SM_QRESUME:
3923 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3924 bfa_rport_send_fwdelete(rp);
3925 break;
3926
3927 case BFA_RPORT_SM_DELETE:
3928 bfa_stats(rp, sm_fwd_del);
3929 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3930 break;
3931
3932 case BFA_RPORT_SM_HWFAIL:
3933 bfa_stats(rp, sm_fwd_hwf);
3934 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3935 bfa_reqq_wcancel(&rp->reqq_wait);
3936 bfa_rport_offline_cb(rp);
3937 break;
3938
3939 default:
3940 bfa_stats(rp, sm_fwd_unexp);
3941 bfa_sm_fault(rp->bfa, event);
3942 }
3943}
3944
Jing Huang5fbe25c2010-10-18 17:17:23 -07003945/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003946 * Offline state.
3947 */
3948static void
3949bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
3950{
3951 bfa_trc(rp->bfa, rp->rport_tag);
3952 bfa_trc(rp->bfa, event);
3953
3954 switch (event) {
3955 case BFA_RPORT_SM_DELETE:
3956 bfa_stats(rp, sm_off_del);
3957 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3958 bfa_rport_free(rp);
3959 break;
3960
3961 case BFA_RPORT_SM_ONLINE:
3962 bfa_stats(rp, sm_off_on);
3963 if (bfa_rport_send_fwcreate(rp))
3964 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3965 else
3966 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3967 break;
3968
3969 case BFA_RPORT_SM_HWFAIL:
3970 bfa_stats(rp, sm_off_hwf);
3971 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3972 break;
3973
3974 default:
3975 bfa_stats(rp, sm_off_unexp);
3976 bfa_sm_fault(rp->bfa, event);
3977 }
3978}
3979
Jing Huang5fbe25c2010-10-18 17:17:23 -07003980/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003981 * Rport is deleted, waiting for firmware response to delete.
3982 */
3983static void
3984bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
3985{
3986 bfa_trc(rp->bfa, rp->rport_tag);
3987 bfa_trc(rp->bfa, event);
3988
3989 switch (event) {
3990 case BFA_RPORT_SM_FWRSP:
3991 bfa_stats(rp, sm_del_fwrsp);
3992 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3993 bfa_rport_free(rp);
3994 break;
3995
3996 case BFA_RPORT_SM_HWFAIL:
3997 bfa_stats(rp, sm_del_hwf);
3998 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3999 bfa_rport_free(rp);
4000 break;
4001
4002 default:
4003 bfa_sm_fault(rp->bfa, event);
4004 }
4005}
4006
4007static void
4008bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4009{
4010 bfa_trc(rp->bfa, rp->rport_tag);
4011 bfa_trc(rp->bfa, event);
4012
4013 switch (event) {
4014 case BFA_RPORT_SM_QRESUME:
4015 bfa_stats(rp, sm_del_fwrsp);
4016 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4017 bfa_rport_send_fwdelete(rp);
4018 break;
4019
4020 case BFA_RPORT_SM_HWFAIL:
4021 bfa_stats(rp, sm_del_hwf);
4022 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4023 bfa_reqq_wcancel(&rp->reqq_wait);
4024 bfa_rport_free(rp);
4025 break;
4026
4027 default:
4028 bfa_sm_fault(rp->bfa, event);
4029 }
4030}
4031
Jing Huang5fbe25c2010-10-18 17:17:23 -07004032/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004033 * Waiting for rport create response from firmware. A delete is pending.
4034 */
4035static void
4036bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4037 enum bfa_rport_event event)
4038{
4039 bfa_trc(rp->bfa, rp->rport_tag);
4040 bfa_trc(rp->bfa, event);
4041
4042 switch (event) {
4043 case BFA_RPORT_SM_FWRSP:
4044 bfa_stats(rp, sm_delp_fwrsp);
4045 if (bfa_rport_send_fwdelete(rp))
4046 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4047 else
4048 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4049 break;
4050
4051 case BFA_RPORT_SM_HWFAIL:
4052 bfa_stats(rp, sm_delp_hwf);
4053 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4054 bfa_rport_free(rp);
4055 break;
4056
4057 default:
4058 bfa_stats(rp, sm_delp_unexp);
4059 bfa_sm_fault(rp->bfa, event);
4060 }
4061}
4062
Jing Huang5fbe25c2010-10-18 17:17:23 -07004063/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004064 * Waiting for rport create response from firmware. Rport offline is pending.
4065 */
4066static void
4067bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4068 enum bfa_rport_event event)
4069{
4070 bfa_trc(rp->bfa, rp->rport_tag);
4071 bfa_trc(rp->bfa, event);
4072
4073 switch (event) {
4074 case BFA_RPORT_SM_FWRSP:
4075 bfa_stats(rp, sm_offp_fwrsp);
4076 if (bfa_rport_send_fwdelete(rp))
4077 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4078 else
4079 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4080 break;
4081
4082 case BFA_RPORT_SM_DELETE:
4083 bfa_stats(rp, sm_offp_del);
4084 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4085 break;
4086
4087 case BFA_RPORT_SM_HWFAIL:
4088 bfa_stats(rp, sm_offp_hwf);
4089 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4090 break;
4091
4092 default:
4093 bfa_stats(rp, sm_offp_unexp);
4094 bfa_sm_fault(rp->bfa, event);
4095 }
4096}
4097
Jing Huang5fbe25c2010-10-18 17:17:23 -07004098/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004099 * IOC h/w failed.
4100 */
4101static void
4102bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4103{
4104 bfa_trc(rp->bfa, rp->rport_tag);
4105 bfa_trc(rp->bfa, event);
4106
4107 switch (event) {
4108 case BFA_RPORT_SM_OFFLINE:
4109 bfa_stats(rp, sm_iocd_off);
4110 bfa_rport_offline_cb(rp);
4111 break;
4112
4113 case BFA_RPORT_SM_DELETE:
4114 bfa_stats(rp, sm_iocd_del);
4115 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4116 bfa_rport_free(rp);
4117 break;
4118
4119 case BFA_RPORT_SM_ONLINE:
4120 bfa_stats(rp, sm_iocd_on);
4121 if (bfa_rport_send_fwcreate(rp))
4122 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4123 else
4124 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4125 break;
4126
4127 case BFA_RPORT_SM_HWFAIL:
4128 break;
4129
4130 default:
4131 bfa_stats(rp, sm_iocd_unexp);
4132 bfa_sm_fault(rp->bfa, event);
4133 }
4134}
4135
4136
4137
Jing Huang5fbe25c2010-10-18 17:17:23 -07004138/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004139 * bfa_rport_private BFA rport private functions
4140 */
4141
4142static void
4143__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4144{
4145 struct bfa_rport_s *rp = cbarg;
4146
4147 if (complete)
4148 bfa_cb_rport_online(rp->rport_drv);
4149}
4150
4151static void
4152__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4153{
4154 struct bfa_rport_s *rp = cbarg;
4155
4156 if (complete)
4157 bfa_cb_rport_offline(rp->rport_drv);
4158}
4159
4160static void
4161bfa_rport_qresume(void *cbarg)
4162{
4163 struct bfa_rport_s *rp = cbarg;
4164
4165 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4166}
4167
4168static void
4169bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4170 u32 *dm_len)
4171{
4172 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4173 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4174
4175 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4176}
4177
4178static void
4179bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4180 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4181{
4182 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4183 struct bfa_rport_s *rp;
4184 u16 i;
4185
4186 INIT_LIST_HEAD(&mod->rp_free_q);
4187 INIT_LIST_HEAD(&mod->rp_active_q);
4188
4189 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4190 mod->rps_list = rp;
4191 mod->num_rports = cfg->fwcfg.num_rports;
4192
4193 bfa_assert(mod->num_rports &&
4194 !(mod->num_rports & (mod->num_rports - 1)));
4195
4196 for (i = 0; i < mod->num_rports; i++, rp++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004197 memset(rp, 0, sizeof(struct bfa_rport_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004198 rp->bfa = bfa;
4199 rp->rport_tag = i;
4200 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4201
Jing Huang5fbe25c2010-10-18 17:17:23 -07004202 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004203 * - is unused
4204 */
4205 if (i)
4206 list_add_tail(&rp->qe, &mod->rp_free_q);
4207
4208 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4209 }
4210
Jing Huang5fbe25c2010-10-18 17:17:23 -07004211 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004212 * consume memory
4213 */
4214 bfa_meminfo_kva(meminfo) = (u8 *) rp;
4215}
4216
4217static void
4218bfa_rport_detach(struct bfa_s *bfa)
4219{
4220}
4221
4222static void
4223bfa_rport_start(struct bfa_s *bfa)
4224{
4225}
4226
4227static void
4228bfa_rport_stop(struct bfa_s *bfa)
4229{
4230}
4231
4232static void
4233bfa_rport_iocdisable(struct bfa_s *bfa)
4234{
4235 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4236 struct bfa_rport_s *rport;
4237 struct list_head *qe, *qen;
4238
4239 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4240 rport = (struct bfa_rport_s *) qe;
4241 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4242 }
4243}
4244
4245static struct bfa_rport_s *
4246bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4247{
4248 struct bfa_rport_s *rport;
4249
4250 bfa_q_deq(&mod->rp_free_q, &rport);
4251 if (rport)
4252 list_add_tail(&rport->qe, &mod->rp_active_q);
4253
4254 return rport;
4255}
4256
4257static void
4258bfa_rport_free(struct bfa_rport_s *rport)
4259{
4260 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4261
4262 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
4263 list_del(&rport->qe);
4264 list_add_tail(&rport->qe, &mod->rp_free_q);
4265}
4266
4267static bfa_boolean_t
4268bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4269{
4270 struct bfi_rport_create_req_s *m;
4271
Jing Huang5fbe25c2010-10-18 17:17:23 -07004272 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004273 * check for room in queue to send request now
4274 */
4275 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4276 if (!m) {
4277 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4278 return BFA_FALSE;
4279 }
4280
4281 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4282 bfa_lpuid(rp->bfa));
4283 m->bfa_handle = rp->rport_tag;
Jing Huangba816ea2010-10-18 17:10:50 -07004284 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004285 m->pid = rp->rport_info.pid;
4286 m->lp_tag = rp->rport_info.lp_tag;
4287 m->local_pid = rp->rport_info.local_pid;
4288 m->fc_class = rp->rport_info.fc_class;
4289 m->vf_en = rp->rport_info.vf_en;
4290 m->vf_id = rp->rport_info.vf_id;
4291 m->cisc = rp->rport_info.cisc;
4292
Jing Huang5fbe25c2010-10-18 17:17:23 -07004293 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004294 * queue I/O message to firmware
4295 */
4296 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4297 return BFA_TRUE;
4298}
4299
4300static bfa_boolean_t
4301bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4302{
4303 struct bfi_rport_delete_req_s *m;
4304
Jing Huang5fbe25c2010-10-18 17:17:23 -07004305 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004306 * check for room in queue to send request now
4307 */
4308 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4309 if (!m) {
4310 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4311 return BFA_FALSE;
4312 }
4313
4314 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4315 bfa_lpuid(rp->bfa));
4316 m->fw_handle = rp->fw_handle;
4317
Jing Huang5fbe25c2010-10-18 17:17:23 -07004318 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004319 * queue I/O message to firmware
4320 */
4321 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4322 return BFA_TRUE;
4323}
4324
4325static bfa_boolean_t
4326bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4327{
4328 struct bfa_rport_speed_req_s *m;
4329
Jing Huang5fbe25c2010-10-18 17:17:23 -07004330 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004331 * check for room in queue to send request now
4332 */
4333 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4334 if (!m) {
4335 bfa_trc(rp->bfa, rp->rport_info.speed);
4336 return BFA_FALSE;
4337 }
4338
4339 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4340 bfa_lpuid(rp->bfa));
4341 m->fw_handle = rp->fw_handle;
4342 m->speed = (u8)rp->rport_info.speed;
4343
Jing Huang5fbe25c2010-10-18 17:17:23 -07004344 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004345 * queue I/O message to firmware
4346 */
4347 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4348 return BFA_TRUE;
4349}
4350
4351
4352
Jing Huang5fbe25c2010-10-18 17:17:23 -07004353/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004354 * bfa_rport_public
4355 */
4356
Jing Huang5fbe25c2010-10-18 17:17:23 -07004357/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004358 * Rport interrupt processing.
4359 */
4360void
4361bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4362{
4363 union bfi_rport_i2h_msg_u msg;
4364 struct bfa_rport_s *rp;
4365
4366 bfa_trc(bfa, m->mhdr.msg_id);
4367
4368 msg.msg = m;
4369
4370 switch (m->mhdr.msg_id) {
4371 case BFI_RPORT_I2H_CREATE_RSP:
4372 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4373 rp->fw_handle = msg.create_rsp->fw_handle;
4374 rp->qos_attr = msg.create_rsp->qos_attr;
4375 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
4376 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4377 break;
4378
4379 case BFI_RPORT_I2H_DELETE_RSP:
4380 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4381 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
4382 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4383 break;
4384
4385 case BFI_RPORT_I2H_QOS_SCN:
4386 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4387 rp->event_arg.fw_msg = msg.qos_scn_evt;
4388 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4389 break;
4390
4391 default:
4392 bfa_trc(bfa, m->mhdr.msg_id);
4393 bfa_assert(0);
4394 }
4395}
4396
4397
4398
Jing Huang5fbe25c2010-10-18 17:17:23 -07004399/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004400 * bfa_rport_api
4401 */
4402
4403struct bfa_rport_s *
4404bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4405{
4406 struct bfa_rport_s *rp;
4407
4408 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4409
4410 if (rp == NULL)
4411 return NULL;
4412
4413 rp->bfa = bfa;
4414 rp->rport_drv = rport_drv;
Maggie Zhangf7f738122010-12-09 19:08:43 -08004415 memset(&rp->stats, 0, sizeof(rp->stats));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004416
4417 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4418 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4419
4420 return rp;
4421}
4422
4423void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004424bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4425{
4426 bfa_assert(rport_info->max_frmsz != 0);
4427
Jing Huang5fbe25c2010-10-18 17:17:23 -07004428 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004429 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4430 * responses. Default to minimum size.
4431 */
4432 if (rport_info->max_frmsz == 0) {
4433 bfa_trc(rport->bfa, rport->rport_tag);
4434 rport_info->max_frmsz = FC_MIN_PDUSZ;
4435 }
4436
Jing Huang6a18b162010-10-18 17:08:54 -07004437 rport->rport_info = *rport_info;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004438 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4439}
4440
4441void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004442bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4443{
4444 bfa_assert(speed != 0);
4445 bfa_assert(speed != BFA_PORT_SPEED_AUTO);
4446
4447 rport->rport_info.speed = speed;
4448 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4449}
4450
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004451
Jing Huang5fbe25c2010-10-18 17:17:23 -07004452/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004453 * SGPG related functions
4454 */
4455
Jing Huang5fbe25c2010-10-18 17:17:23 -07004456/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004457 * Compute and return memory needed by FCP(im) module.
4458 */
4459static void
4460bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4461 u32 *dm_len)
4462{
4463 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4464 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4465
4466 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4467 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4468}
4469
4470
4471static void
4472bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4473 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4474{
4475 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4476 int i;
4477 struct bfa_sgpg_s *hsgpg;
4478 struct bfi_sgpg_s *sgpg;
4479 u64 align_len;
4480
4481 union {
4482 u64 pa;
4483 union bfi_addr_u addr;
4484 } sgpg_pa, sgpg_pa_tmp;
4485
4486 INIT_LIST_HEAD(&mod->sgpg_q);
4487 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4488
4489 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4490
4491 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4492 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4493 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4494 mod->sgpg_arr_pa += align_len;
4495 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4496 align_len);
4497 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4498 align_len);
4499
4500 hsgpg = mod->hsgpg_arr;
4501 sgpg = mod->sgpg_arr;
4502 sgpg_pa.pa = mod->sgpg_arr_pa;
4503 mod->free_sgpgs = mod->num_sgpgs;
4504
4505 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4506
4507 for (i = 0; i < mod->num_sgpgs; i++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004508 memset(hsgpg, 0, sizeof(*hsgpg));
4509 memset(sgpg, 0, sizeof(*sgpg));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004510
4511 hsgpg->sgpg = sgpg;
4512 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4513 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4514 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4515
4516 hsgpg++;
4517 sgpg++;
4518 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4519 }
4520
4521 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4522 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4523 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4524}
4525
4526static void
4527bfa_sgpg_detach(struct bfa_s *bfa)
4528{
4529}
4530
4531static void
4532bfa_sgpg_start(struct bfa_s *bfa)
4533{
4534}
4535
4536static void
4537bfa_sgpg_stop(struct bfa_s *bfa)
4538{
4539}
4540
4541static void
4542bfa_sgpg_iocdisable(struct bfa_s *bfa)
4543{
4544}
4545
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004546bfa_status_t
4547bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4548{
4549 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4550 struct bfa_sgpg_s *hsgpg;
4551 int i;
4552
4553 bfa_trc_fp(bfa, nsgpgs);
4554
4555 if (mod->free_sgpgs < nsgpgs)
4556 return BFA_STATUS_ENOMEM;
4557
4558 for (i = 0; i < nsgpgs; i++) {
4559 bfa_q_deq(&mod->sgpg_q, &hsgpg);
4560 bfa_assert(hsgpg);
4561 list_add_tail(&hsgpg->qe, sgpg_q);
4562 }
4563
4564 mod->free_sgpgs -= nsgpgs;
4565 return BFA_STATUS_OK;
4566}
4567
4568void
4569bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4570{
4571 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4572 struct bfa_sgpg_wqe_s *wqe;
4573
4574 bfa_trc_fp(bfa, nsgpg);
4575
4576 mod->free_sgpgs += nsgpg;
4577 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
4578
4579 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4580
4581 if (list_empty(&mod->sgpg_wait_q))
4582 return;
4583
Jing Huang5fbe25c2010-10-18 17:17:23 -07004584 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004585 * satisfy as many waiting requests as possible
4586 */
4587 do {
4588 wqe = bfa_q_first(&mod->sgpg_wait_q);
4589 if (mod->free_sgpgs < wqe->nsgpg)
4590 nsgpg = mod->free_sgpgs;
4591 else
4592 nsgpg = wqe->nsgpg;
4593 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4594 wqe->nsgpg -= nsgpg;
4595 if (wqe->nsgpg == 0) {
4596 list_del(&wqe->qe);
4597 wqe->cbfn(wqe->cbarg);
4598 }
4599 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4600}
4601
4602void
4603bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4604{
4605 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4606
4607 bfa_assert(nsgpg > 0);
4608 bfa_assert(nsgpg > mod->free_sgpgs);
4609
4610 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4611
Jing Huang5fbe25c2010-10-18 17:17:23 -07004612 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004613 * allocate any left to this one first
4614 */
4615 if (mod->free_sgpgs) {
Jing Huang5fbe25c2010-10-18 17:17:23 -07004616 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004617 * no one else is waiting for SGPG
4618 */
4619 bfa_assert(list_empty(&mod->sgpg_wait_q));
4620 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4621 wqe->nsgpg -= mod->free_sgpgs;
4622 mod->free_sgpgs = 0;
4623 }
4624
4625 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4626}
4627
4628void
4629bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4630{
4631 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4632
4633 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
4634 list_del(&wqe->qe);
4635
4636 if (wqe->nsgpg_total != wqe->nsgpg)
4637 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4638 wqe->nsgpg_total - wqe->nsgpg);
4639}
4640
4641void
4642bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4643 void *cbarg)
4644{
4645 INIT_LIST_HEAD(&wqe->sgpg_q);
4646 wqe->cbfn = cbfn;
4647 wqe->cbarg = cbarg;
4648}
4649
Jing Huang5fbe25c2010-10-18 17:17:23 -07004650/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004651 * UF related functions
4652 */
4653/*
4654 *****************************************************************************
4655 * Internal functions
4656 *****************************************************************************
4657 */
4658static void
4659__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4660{
4661 struct bfa_uf_s *uf = cbarg;
4662 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4663
4664 if (complete)
4665 ufm->ufrecv(ufm->cbarg, uf);
4666}
4667
4668static void
4669claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4670{
4671 u32 uf_pb_tot_sz;
4672
4673 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
4674 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
4675 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
4676 BFA_DMA_ALIGN_SZ);
4677
4678 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
4679 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
4680
Jing Huang6a18b162010-10-18 17:08:54 -07004681 memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004682}
4683
4684static void
4685claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4686{
4687 struct bfi_uf_buf_post_s *uf_bp_msg;
4688 struct bfi_sge_s *sge;
4689 union bfi_addr_u sga_zero = { {0} };
4690 u16 i;
4691 u16 buf_len;
4692
4693 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
4694 uf_bp_msg = ufm->uf_buf_posts;
4695
4696 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4697 i++, uf_bp_msg++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004698 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004699
4700 uf_bp_msg->buf_tag = i;
4701 buf_len = sizeof(struct bfa_uf_buf_s);
Jing Huangba816ea2010-10-18 17:10:50 -07004702 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004703 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
4704 bfa_lpuid(ufm->bfa));
4705
4706 sge = uf_bp_msg->sge;
4707 sge[0].sg_len = buf_len;
4708 sge[0].flags = BFI_SGE_DATA_LAST;
4709 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
4710 bfa_sge_to_be(sge);
4711
4712 sge[1].sg_len = buf_len;
4713 sge[1].flags = BFI_SGE_PGDLEN;
4714 sge[1].sga = sga_zero;
4715 bfa_sge_to_be(&sge[1]);
4716 }
4717
Jing Huang5fbe25c2010-10-18 17:17:23 -07004718 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004719 * advance pointer beyond consumed memory
4720 */
4721 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
4722}
4723
4724static void
4725claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4726{
4727 u16 i;
4728 struct bfa_uf_s *uf;
4729
4730 /*
4731 * Claim block of memory for UF list
4732 */
4733 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
4734
4735 /*
4736 * Initialize UFs and queue it in UF free queue
4737 */
4738 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004739 memset(uf, 0, sizeof(struct bfa_uf_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004740 uf->bfa = ufm->bfa;
4741 uf->uf_tag = i;
4742 uf->pb_len = sizeof(struct bfa_uf_buf_s);
4743 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
4744 uf->buf_pa = ufm_pbs_pa(ufm, i);
4745 list_add_tail(&uf->qe, &ufm->uf_free_q);
4746 }
4747
Jing Huang5fbe25c2010-10-18 17:17:23 -07004748 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004749 * advance memory pointer
4750 */
4751 bfa_meminfo_kva(mi) = (u8 *) uf;
4752}
4753
4754static void
4755uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4756{
4757 claim_uf_pbs(ufm, mi);
4758 claim_ufs(ufm, mi);
4759 claim_uf_post_msgs(ufm, mi);
4760}
4761
4762static void
4763bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
4764{
4765 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
4766
4767 /*
4768 * dma-able memory for UF posted bufs
4769 */
4770 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
4771 BFA_DMA_ALIGN_SZ);
4772
4773 /*
4774 * kernel Virtual memory for UFs and UF buf post msg copies
4775 */
4776 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
4777 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
4778}
4779
4780static void
4781bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4782 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4783{
4784 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4785
Jing Huang6a18b162010-10-18 17:08:54 -07004786 memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004787 ufm->bfa = bfa;
4788 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
4789 INIT_LIST_HEAD(&ufm->uf_free_q);
4790 INIT_LIST_HEAD(&ufm->uf_posted_q);
4791
4792 uf_mem_claim(ufm, meminfo);
4793}
4794
4795static void
4796bfa_uf_detach(struct bfa_s *bfa)
4797{
4798}
4799
4800static struct bfa_uf_s *
4801bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
4802{
4803 struct bfa_uf_s *uf;
4804
4805 bfa_q_deq(&uf_mod->uf_free_q, &uf);
4806 return uf;
4807}
4808
4809static void
4810bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
4811{
4812 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
4813}
4814
4815static bfa_status_t
4816bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
4817{
4818 struct bfi_uf_buf_post_s *uf_post_msg;
4819
4820 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
4821 if (!uf_post_msg)
4822 return BFA_STATUS_FAILED;
4823
Jing Huang6a18b162010-10-18 17:08:54 -07004824 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004825 sizeof(struct bfi_uf_buf_post_s));
4826 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
4827
4828 bfa_trc(ufm->bfa, uf->uf_tag);
4829
4830 list_add_tail(&uf->qe, &ufm->uf_posted_q);
4831 return BFA_STATUS_OK;
4832}
4833
4834static void
4835bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
4836{
4837 struct bfa_uf_s *uf;
4838
4839 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
4840 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
4841 break;
4842 }
4843}
4844
4845static void
4846uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
4847{
4848 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4849 u16 uf_tag = m->buf_tag;
4850 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
4851 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
4852 u8 *buf = &uf_buf->d[0];
4853 struct fchs_s *fchs;
4854
Jing Huangba816ea2010-10-18 17:10:50 -07004855 m->frm_len = be16_to_cpu(m->frm_len);
4856 m->xfr_len = be16_to_cpu(m->xfr_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004857
4858 fchs = (struct fchs_s *)uf_buf;
4859
4860 list_del(&uf->qe); /* dequeue from posted queue */
4861
4862 uf->data_ptr = buf;
4863 uf->data_len = m->xfr_len;
4864
4865 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
4866
4867 if (uf->data_len == sizeof(struct fchs_s)) {
4868 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
4869 uf->data_len, (struct fchs_s *)buf);
4870 } else {
4871 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
4872 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
4873 BFA_PL_EID_RX, uf->data_len,
4874 (struct fchs_s *)buf, pld_w0);
4875 }
4876
4877 if (bfa->fcs)
4878 __bfa_cb_uf_recv(uf, BFA_TRUE);
4879 else
4880 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
4881}
4882
4883static void
4884bfa_uf_stop(struct bfa_s *bfa)
4885{
4886}
4887
4888static void
4889bfa_uf_iocdisable(struct bfa_s *bfa)
4890{
4891 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4892 struct bfa_uf_s *uf;
4893 struct list_head *qe, *qen;
4894
4895 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
4896 uf = (struct bfa_uf_s *) qe;
4897 list_del(&uf->qe);
4898 bfa_uf_put(ufm, uf);
4899 }
4900}
4901
4902static void
4903bfa_uf_start(struct bfa_s *bfa)
4904{
4905 bfa_uf_post_all(BFA_UF_MOD(bfa));
4906}
4907
Jing Huang5fbe25c2010-10-18 17:17:23 -07004908/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004909 * Register handler for all unsolicted recieve frames.
4910 *
4911 * @param[in] bfa BFA instance
4912 * @param[in] ufrecv receive handler function
4913 * @param[in] cbarg receive handler arg
4914 */
4915void
4916bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
4917{
4918 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4919
4920 ufm->ufrecv = ufrecv;
4921 ufm->cbarg = cbarg;
4922}
4923
Jing Huang5fbe25c2010-10-18 17:17:23 -07004924/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004925 * Free an unsolicited frame back to BFA.
4926 *
4927 * @param[in] uf unsolicited frame to be freed
4928 *
4929 * @return None
4930 */
4931void
4932bfa_uf_free(struct bfa_uf_s *uf)
4933{
4934 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
4935 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
4936}
4937
4938
4939
Jing Huang5fbe25c2010-10-18 17:17:23 -07004940/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004941 * uf_pub BFA uf module public functions
4942 */
4943void
4944bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
4945{
4946 bfa_trc(bfa, msg->mhdr.msg_id);
4947
4948 switch (msg->mhdr.msg_id) {
4949 case BFI_UF_I2H_FRM_RCVD:
4950 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
4951 break;
4952
4953 default:
4954 bfa_trc(bfa, msg->mhdr.msg_id);
4955 bfa_assert(0);
4956 }
4957}
4958
4959