blob: 21caaefce99fc9f143af955622615efce1434867 [file] [log] [blame]
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_plog.h"
20#include "bfa_cs.h"
21#include "bfa_modules.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022
23BFA_TRC_FILE(HAL, FCXP);
Krishna Gudipati3d7fc662011-06-24 20:28:17 -070024BFA_MODULE(fcdiag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070025BFA_MODULE(fcxp);
26BFA_MODULE(sgpg);
27BFA_MODULE(lps);
28BFA_MODULE(fcport);
29BFA_MODULE(rport);
30BFA_MODULE(uf);
31
Jing Huang5fbe25c2010-10-18 17:17:23 -070032/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070033 * LPS related definitions
34 */
35#define BFA_LPS_MIN_LPORTS (1)
36#define BFA_LPS_MAX_LPORTS (256)
37
38/*
39 * Maximum Vports supported per physical port or vf.
40 */
41#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
43
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070044
Jing Huang5fbe25c2010-10-18 17:17:23 -070045/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070046 * FC PORT related definitions
47 */
48/*
49 * The port is considered disabled if corresponding physical port or IOC are
50 * disabled explicitly
51 */
52#define BFA_PORT_IS_DISABLED(bfa) \
53 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
54 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
55
Jing Huang5fbe25c2010-10-18 17:17:23 -070056/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070057 * BFA port state machine events
58 */
59enum bfa_fcport_sm_event {
60 BFA_FCPORT_SM_START = 1, /* start port state machine */
61 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
62 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
63 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
64 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
65 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
66 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
67 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
68 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
69};
70
Jing Huang5fbe25c2010-10-18 17:17:23 -070071/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070072 * BFA port link notification state machine events
73 */
74
75enum bfa_fcport_ln_sm_event {
76 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
77 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
78 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
79};
80
Jing Huang5fbe25c2010-10-18 17:17:23 -070081/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070082 * RPORT related definitions
83 */
84#define bfa_rport_offline_cb(__rp) do { \
85 if ((__rp)->bfa->fcs) \
86 bfa_cb_rport_offline((__rp)->rport_drv); \
87 else { \
88 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
89 __bfa_cb_rport_offline, (__rp)); \
90 } \
91} while (0)
92
93#define bfa_rport_online_cb(__rp) do { \
94 if ((__rp)->bfa->fcs) \
95 bfa_cb_rport_online((__rp)->rport_drv); \
96 else { \
97 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
98 __bfa_cb_rport_online, (__rp)); \
99 } \
100} while (0)
101
Jing Huang5fbe25c2010-10-18 17:17:23 -0700102/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700103 * forward declarations FCXP related functions
104 */
105static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
106static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
107 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
108static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
109 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
110static void bfa_fcxp_qresume(void *cbarg);
111static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
112 struct bfi_fcxp_send_req_s *send_req);
113
Jing Huang5fbe25c2010-10-18 17:17:23 -0700114/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700115 * forward declarations for LPS functions
116 */
Krishna Gudipati45070252011-06-24 20:24:29 -0700117static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
118 struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700119static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
120 struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700121 struct bfa_pcidev_s *pcidev);
122static void bfa_lps_detach(struct bfa_s *bfa);
123static void bfa_lps_start(struct bfa_s *bfa);
124static void bfa_lps_stop(struct bfa_s *bfa);
125static void bfa_lps_iocdisable(struct bfa_s *bfa);
126static void bfa_lps_login_rsp(struct bfa_s *bfa,
127 struct bfi_lps_login_rsp_s *rsp);
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700128static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700129static void bfa_lps_logout_rsp(struct bfa_s *bfa,
130 struct bfi_lps_logout_rsp_s *rsp);
131static void bfa_lps_reqq_resume(void *lps_arg);
132static void bfa_lps_free(struct bfa_lps_s *lps);
133static void bfa_lps_send_login(struct bfa_lps_s *lps);
134static void bfa_lps_send_logout(struct bfa_lps_s *lps);
Krishna Gudipatib7044952010-12-13 16:17:42 -0800135static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700136static void bfa_lps_login_comp(struct bfa_lps_s *lps);
137static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
138static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
139
Jing Huang5fbe25c2010-10-18 17:17:23 -0700140/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700141 * forward declaration for LPS state machine
142 */
143static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
144static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
145static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
146 event);
147static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
Krishna Gudipatib7044952010-12-13 16:17:42 -0800148static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
149 enum bfa_lps_event event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700150static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
151static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
152 event);
153
Jing Huang5fbe25c2010-10-18 17:17:23 -0700154/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700155 * forward declaration for FC Port functions
156 */
157static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
158static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
159static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
160static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
161static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
162static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
163static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
164 enum bfa_port_linkstate event, bfa_boolean_t trunk);
165static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
166 enum bfa_port_linkstate event);
167static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
168static void bfa_fcport_stats_get_timeout(void *cbarg);
169static void bfa_fcport_stats_clr_timeout(void *cbarg);
170static void bfa_trunk_iocdisable(struct bfa_s *bfa);
171
Jing Huang5fbe25c2010-10-18 17:17:23 -0700172/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700173 * forward declaration for FC PORT state machine
174 */
175static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
176 enum bfa_fcport_sm_event event);
177static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
178 enum bfa_fcport_sm_event event);
179static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
180 enum bfa_fcport_sm_event event);
181static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
182 enum bfa_fcport_sm_event event);
183static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
184 enum bfa_fcport_sm_event event);
185static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
186 enum bfa_fcport_sm_event event);
187static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
188 enum bfa_fcport_sm_event event);
189static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
190 enum bfa_fcport_sm_event event);
191static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
192 enum bfa_fcport_sm_event event);
193static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
194 enum bfa_fcport_sm_event event);
195static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
196 enum bfa_fcport_sm_event event);
197static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
198 enum bfa_fcport_sm_event event);
199
200static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
201 enum bfa_fcport_ln_sm_event event);
202static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
203 enum bfa_fcport_ln_sm_event event);
204static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
205 enum bfa_fcport_ln_sm_event event);
206static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
207 enum bfa_fcport_ln_sm_event event);
208static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
209 enum bfa_fcport_ln_sm_event event);
210static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
211 enum bfa_fcport_ln_sm_event event);
212static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
213 enum bfa_fcport_ln_sm_event event);
214
215static struct bfa_sm_table_s hal_port_sm_table[] = {
216 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
217 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
218 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
219 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
220 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
221 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
222 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
223 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
224 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
225 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
226 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
227 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
228};
229
230
Jing Huang5fbe25c2010-10-18 17:17:23 -0700231/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700232 * forward declaration for RPORT related functions
233 */
234static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
235static void bfa_rport_free(struct bfa_rport_s *rport);
236static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
237static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
238static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
239static void __bfa_cb_rport_online(void *cbarg,
240 bfa_boolean_t complete);
241static void __bfa_cb_rport_offline(void *cbarg,
242 bfa_boolean_t complete);
243
Jing Huang5fbe25c2010-10-18 17:17:23 -0700244/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700245 * forward declaration for RPORT state machine
246 */
247static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
248 enum bfa_rport_event event);
249static void bfa_rport_sm_created(struct bfa_rport_s *rp,
250 enum bfa_rport_event event);
251static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
252 enum bfa_rport_event event);
253static void bfa_rport_sm_online(struct bfa_rport_s *rp,
254 enum bfa_rport_event event);
255static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
256 enum bfa_rport_event event);
257static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
258 enum bfa_rport_event event);
259static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
260 enum bfa_rport_event event);
261static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
262 enum bfa_rport_event event);
263static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
264 enum bfa_rport_event event);
265static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
266 enum bfa_rport_event event);
267static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
268 enum bfa_rport_event event);
269static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
270 enum bfa_rport_event event);
271static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
272 enum bfa_rport_event event);
273
Jing Huang5fbe25c2010-10-18 17:17:23 -0700274/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700275 * PLOG related definitions
276 */
277static int
278plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
279{
280 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
281 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
282 return 1;
283
284 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
285 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
286 return 1;
287
288 return 0;
289}
290
Maggie Zhangf16a1752010-12-09 19:12:32 -0800291static u64
292bfa_get_log_time(void)
293{
294 u64 system_time = 0;
295 struct timeval tv;
296 do_gettimeofday(&tv);
297
298 /* We are interested in seconds only. */
299 system_time = tv.tv_sec;
300 return system_time;
301}
302
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700303static void
304bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
305{
306 u16 tail;
307 struct bfa_plog_rec_s *pl_recp;
308
309 if (plog->plog_enabled == 0)
310 return;
311
312 if (plkd_validate_logrec(pl_rec)) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800313 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700314 return;
315 }
316
317 tail = plog->tail;
318
319 pl_recp = &(plog->plog_recs[tail]);
320
Jing Huang6a18b162010-10-18 17:08:54 -0700321 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700322
Maggie Zhangf16a1752010-12-09 19:12:32 -0800323 pl_recp->tv = bfa_get_log_time();
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700324 BFA_PL_LOG_REC_INCR(plog->tail);
325
326 if (plog->head == plog->tail)
327 BFA_PL_LOG_REC_INCR(plog->head);
328}
329
330void
331bfa_plog_init(struct bfa_plog_s *plog)
332{
Jing Huang6a18b162010-10-18 17:08:54 -0700333 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700334
Jing Huang6a18b162010-10-18 17:08:54 -0700335 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700336 plog->head = plog->tail = 0;
337 plog->plog_enabled = 1;
338}
339
340void
341bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
342 enum bfa_plog_eid event,
343 u16 misc, char *log_str)
344{
345 struct bfa_plog_rec_s lp;
346
347 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700348 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700349 lp.mid = mid;
350 lp.eid = event;
351 lp.log_type = BFA_PL_LOG_TYPE_STRING;
352 lp.misc = misc;
353 strncpy(lp.log_entry.string_log, log_str,
354 BFA_PL_STRING_LOG_SZ - 1);
355 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
356 bfa_plog_add(plog, &lp);
357 }
358}
359
360void
361bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
362 enum bfa_plog_eid event,
363 u16 misc, u32 *intarr, u32 num_ints)
364{
365 struct bfa_plog_rec_s lp;
366 u32 i;
367
368 if (num_ints > BFA_PL_INT_LOG_SZ)
369 num_ints = BFA_PL_INT_LOG_SZ;
370
371 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700372 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700373 lp.mid = mid;
374 lp.eid = event;
375 lp.log_type = BFA_PL_LOG_TYPE_INT;
376 lp.misc = misc;
377
378 for (i = 0; i < num_ints; i++)
Jing Huang6a18b162010-10-18 17:08:54 -0700379 lp.log_entry.int_log[i] = intarr[i];
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700380
381 lp.log_num_ints = (u8) num_ints;
382
383 bfa_plog_add(plog, &lp);
384 }
385}
386
387void
388bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
389 enum bfa_plog_eid event,
390 u16 misc, struct fchs_s *fchdr)
391{
392 struct bfa_plog_rec_s lp;
393 u32 *tmp_int = (u32 *) fchdr;
394 u32 ints[BFA_PL_INT_LOG_SZ];
395
396 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700397 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700398
399 ints[0] = tmp_int[0];
400 ints[1] = tmp_int[1];
401 ints[2] = tmp_int[4];
402
403 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
404 }
405}
406
407void
408bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
409 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
410 u32 pld_w0)
411{
412 struct bfa_plog_rec_s lp;
413 u32 *tmp_int = (u32 *) fchdr;
414 u32 ints[BFA_PL_INT_LOG_SZ];
415
416 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700417 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700418
419 ints[0] = tmp_int[0];
420 ints[1] = tmp_int[1];
421 ints[2] = tmp_int[4];
422 ints[3] = pld_w0;
423
424 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
425 }
426}
427
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700428
Jing Huang5fbe25c2010-10-18 17:17:23 -0700429/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700430 * fcxp_pvt BFA FCXP private functions
431 */
432
433static void
Krishna Gudipati45070252011-06-24 20:24:29 -0700434claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700435{
436 u16 i;
437 struct bfa_fcxp_s *fcxp;
438
Krishna Gudipati45070252011-06-24 20:24:29 -0700439 fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
Jing Huang6a18b162010-10-18 17:08:54 -0700440 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700441
442 INIT_LIST_HEAD(&mod->fcxp_free_q);
443 INIT_LIST_HEAD(&mod->fcxp_active_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700444 INIT_LIST_HEAD(&mod->fcxp_unused_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700445
446 mod->fcxp_list = fcxp;
447
448 for (i = 0; i < mod->num_fcxps; i++) {
449 fcxp->fcxp_mod = mod;
450 fcxp->fcxp_tag = i;
451
452 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
453 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
454 fcxp->reqq_waiting = BFA_FALSE;
455
456 fcxp = fcxp + 1;
457 }
458
Krishna Gudipati45070252011-06-24 20:24:29 -0700459 bfa_mem_kva_curp(mod) = (void *)fcxp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700460}
461
462static void
Krishna Gudipati45070252011-06-24 20:24:29 -0700463bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
464 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700465{
Krishna Gudipati45070252011-06-24 20:24:29 -0700466 struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
467 struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
468 struct bfa_mem_dma_s *seg_ptr;
469 u16 nsegs, idx, per_seg_fcxp;
470 u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs;
471 u32 per_fcxp_sz;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700472
Krishna Gudipati45070252011-06-24 20:24:29 -0700473 if (num_fcxps == 0)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700474 return;
475
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700476 if (cfg->drvcfg.min_cfg)
Krishna Gudipati45070252011-06-24 20:24:29 -0700477 per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700478 else
Krishna Gudipati45070252011-06-24 20:24:29 -0700479 per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700480
Krishna Gudipati45070252011-06-24 20:24:29 -0700481 /* dma memory */
482 nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
483 per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
484
485 bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
486 if (num_fcxps >= per_seg_fcxp) {
487 num_fcxps -= per_seg_fcxp;
488 bfa_mem_dma_setup(minfo, seg_ptr,
489 per_seg_fcxp * per_fcxp_sz);
490 } else
491 bfa_mem_dma_setup(minfo, seg_ptr,
492 num_fcxps * per_fcxp_sz);
493 }
494
495 /* kva memory */
496 bfa_mem_kva_setup(minfo, fcxp_kva,
497 cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700498}
499
500static void
501bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -0700502 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700503{
504 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
505
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700506 mod->bfa = bfa;
507 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
508
Jing Huang5fbe25c2010-10-18 17:17:23 -0700509 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700510 * Initialize FCXP request and response payload sizes.
511 */
512 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
513 if (!cfg->drvcfg.min_cfg)
514 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
515
516 INIT_LIST_HEAD(&mod->wait_q);
517
Krishna Gudipati45070252011-06-24 20:24:29 -0700518 claim_fcxps_mem(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700519}
520
521static void
522bfa_fcxp_detach(struct bfa_s *bfa)
523{
524}
525
526static void
527bfa_fcxp_start(struct bfa_s *bfa)
528{
529}
530
531static void
532bfa_fcxp_stop(struct bfa_s *bfa)
533{
534}
535
536static void
537bfa_fcxp_iocdisable(struct bfa_s *bfa)
538{
539 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
540 struct bfa_fcxp_s *fcxp;
541 struct list_head *qe, *qen;
542
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700543 /* Enqueue unused fcxp resources to free_q */
544 list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
545
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700546 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
547 fcxp = (struct bfa_fcxp_s *) qe;
548 if (fcxp->caller == NULL) {
549 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
550 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
551 bfa_fcxp_free(fcxp);
552 } else {
553 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
554 bfa_cb_queue(bfa, &fcxp->hcb_qe,
555 __bfa_fcxp_send_cbfn, fcxp);
556 }
557 }
558}
559
560static struct bfa_fcxp_s *
561bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
562{
563 struct bfa_fcxp_s *fcxp;
564
565 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
566
567 if (fcxp)
568 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
569
570 return fcxp;
571}
572
573static void
574bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
575 struct bfa_s *bfa,
576 u8 *use_ibuf,
577 u32 *nr_sgles,
578 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
579 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
580 struct list_head *r_sgpg_q,
581 int n_sgles,
582 bfa_fcxp_get_sgaddr_t sga_cbfn,
583 bfa_fcxp_get_sglen_t sglen_cbfn)
584{
585
Jing Huangd4b671c2010-12-26 21:46:35 -0800586 WARN_ON(bfa == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700587
588 bfa_trc(bfa, fcxp->fcxp_tag);
589
590 if (n_sgles == 0) {
591 *use_ibuf = 1;
592 } else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800593 WARN_ON(*sga_cbfn == NULL);
594 WARN_ON(*sglen_cbfn == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700595
596 *use_ibuf = 0;
597 *r_sga_cbfn = sga_cbfn;
598 *r_sglen_cbfn = sglen_cbfn;
599
600 *nr_sgles = n_sgles;
601
602 /*
603 * alloc required sgpgs
604 */
605 if (n_sgles > BFI_SGE_INLINE)
Jing Huangd4b671c2010-12-26 21:46:35 -0800606 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700607 }
608
609}
610
611static void
612bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
613 void *caller, struct bfa_s *bfa, int nreq_sgles,
614 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
615 bfa_fcxp_get_sglen_t req_sglen_cbfn,
616 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
617 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
618{
619
Jing Huangd4b671c2010-12-26 21:46:35 -0800620 WARN_ON(bfa == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700621
622 bfa_trc(bfa, fcxp->fcxp_tag);
623
624 fcxp->caller = caller;
625
626 bfa_fcxp_init_reqrsp(fcxp, bfa,
627 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
628 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
629 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
630
631 bfa_fcxp_init_reqrsp(fcxp, bfa,
632 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
633 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
634 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
635
636}
637
638static void
639bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
640{
641 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
642 struct bfa_fcxp_wqe_s *wqe;
643
644 bfa_q_deq(&mod->wait_q, &wqe);
645 if (wqe) {
646 bfa_trc(mod->bfa, fcxp->fcxp_tag);
647
648 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
649 wqe->nrsp_sgles, wqe->req_sga_cbfn,
650 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
651 wqe->rsp_sglen_cbfn);
652
653 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
654 return;
655 }
656
Jing Huangd4b671c2010-12-26 21:46:35 -0800657 WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700658 list_del(&fcxp->qe);
659 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
660}
661
662static void
663bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
664 bfa_status_t req_status, u32 rsp_len,
665 u32 resid_len, struct fchs_s *rsp_fchs)
666{
667 /* discarded fcxp completion */
668}
669
670static void
671__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
672{
673 struct bfa_fcxp_s *fcxp = cbarg;
674
675 if (complete) {
676 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
677 fcxp->rsp_status, fcxp->rsp_len,
678 fcxp->residue_len, &fcxp->rsp_fchs);
679 } else {
680 bfa_fcxp_free(fcxp);
681 }
682}
683
684static void
685hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
686{
687 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
688 struct bfa_fcxp_s *fcxp;
Jing Huangba816ea2010-10-18 17:10:50 -0700689 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700690
691 bfa_trc(bfa, fcxp_tag);
692
Jing Huangba816ea2010-10-18 17:10:50 -0700693 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700694
Jing Huang5fbe25c2010-10-18 17:17:23 -0700695 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700696 * @todo f/w should not set residue to non-0 when everything
697 * is received.
698 */
699 if (fcxp_rsp->req_status == BFA_STATUS_OK)
700 fcxp_rsp->residue_len = 0;
701 else
Jing Huangba816ea2010-10-18 17:10:50 -0700702 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700703
704 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
705
Jing Huangd4b671c2010-12-26 21:46:35 -0800706 WARN_ON(fcxp->send_cbfn == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700707
708 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
709
710 if (fcxp->send_cbfn != NULL) {
711 bfa_trc(mod->bfa, (NULL == fcxp->caller));
712 if (fcxp->caller == NULL) {
713 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
714 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
715 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
716 /*
717 * fcxp automatically freed on return from the callback
718 */
719 bfa_fcxp_free(fcxp);
720 } else {
721 fcxp->rsp_status = fcxp_rsp->req_status;
722 fcxp->rsp_len = fcxp_rsp->rsp_len;
723 fcxp->residue_len = fcxp_rsp->residue_len;
724 fcxp->rsp_fchs = fcxp_rsp->fchs;
725
726 bfa_cb_queue(bfa, &fcxp->hcb_qe,
727 __bfa_fcxp_send_cbfn, fcxp);
728 }
729 } else {
730 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
731 }
732}
733
734static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700735hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
736 struct fchs_s *fchs)
737{
738 /*
739 * TODO: TX ox_id
740 */
741 if (reqlen > 0) {
742 if (fcxp->use_ireqbuf) {
743 u32 pld_w0 =
744 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
745
746 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
747 BFA_PL_EID_TX,
748 reqlen + sizeof(struct fchs_s), fchs,
749 pld_w0);
750 } else {
751 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
752 BFA_PL_EID_TX,
753 reqlen + sizeof(struct fchs_s),
754 fchs);
755 }
756 } else {
757 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
758 reqlen + sizeof(struct fchs_s), fchs);
759 }
760}
761
762static void
763hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
764 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
765{
766 if (fcxp_rsp->rsp_len > 0) {
767 if (fcxp->use_irspbuf) {
768 u32 pld_w0 =
769 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
770
771 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
772 BFA_PL_EID_RX,
773 (u16) fcxp_rsp->rsp_len,
774 &fcxp_rsp->fchs, pld_w0);
775 } else {
776 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
777 BFA_PL_EID_RX,
778 (u16) fcxp_rsp->rsp_len,
779 &fcxp_rsp->fchs);
780 }
781 } else {
782 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
783 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
784 }
785}
786
Jing Huang5fbe25c2010-10-18 17:17:23 -0700787/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700788 * Handler to resume sending fcxp when space in available in cpe queue.
789 */
790static void
791bfa_fcxp_qresume(void *cbarg)
792{
793 struct bfa_fcxp_s *fcxp = cbarg;
794 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
795 struct bfi_fcxp_send_req_s *send_req;
796
797 fcxp->reqq_waiting = BFA_FALSE;
798 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
799 bfa_fcxp_queue(fcxp, send_req);
800}
801
Jing Huang5fbe25c2010-10-18 17:17:23 -0700802/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700803 * Queue fcxp send request to foimrware.
804 */
805static void
806bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
807{
808 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
809 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
810 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
811 struct bfa_rport_s *rport = reqi->bfa_rport;
812
813 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700814 bfa_fn_lpu(bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700815
Jing Huangba816ea2010-10-18 17:10:50 -0700816 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700817 if (rport) {
818 send_req->rport_fw_hndl = rport->fw_handle;
Jing Huangba816ea2010-10-18 17:10:50 -0700819 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700820 if (send_req->max_frmsz == 0)
Jing Huangba816ea2010-10-18 17:10:50 -0700821 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700822 } else {
823 send_req->rport_fw_hndl = 0;
Jing Huangba816ea2010-10-18 17:10:50 -0700824 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700825 }
826
Jing Huangba816ea2010-10-18 17:10:50 -0700827 send_req->vf_id = cpu_to_be16(reqi->vf_id);
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700828 send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700829 send_req->class = reqi->class;
830 send_req->rsp_timeout = rspi->rsp_timeout;
831 send_req->cts = reqi->cts;
832 send_req->fchs = reqi->fchs;
833
Jing Huangba816ea2010-10-18 17:10:50 -0700834 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
835 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700836
837 /*
838 * setup req sgles
839 */
840 if (fcxp->use_ireqbuf == 1) {
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700841 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700842 BFA_FCXP_REQ_PLD_PA(fcxp));
843 } else {
844 if (fcxp->nreq_sgles > 0) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800845 WARN_ON(fcxp->nreq_sgles != 1);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700846 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
847 fcxp->req_sga_cbfn(fcxp->caller, 0));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700848 } else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800849 WARN_ON(reqi->req_tot_len != 0);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700850 bfa_alen_set(&send_req->rsp_alen, 0, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700851 }
852 }
853
854 /*
855 * setup rsp sgles
856 */
857 if (fcxp->use_irspbuf == 1) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800858 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700859
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700860 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700861 BFA_FCXP_RSP_PLD_PA(fcxp));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700862 } else {
863 if (fcxp->nrsp_sgles > 0) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800864 WARN_ON(fcxp->nrsp_sgles != 1);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700865 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
866 fcxp->rsp_sga_cbfn(fcxp->caller, 0));
867
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700868 } else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800869 WARN_ON(rspi->rsp_maxlen != 0);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700870 bfa_alen_set(&send_req->rsp_alen, 0, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700871 }
872 }
873
874 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
875
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700876 bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700877
878 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
879 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
880}
881
Jing Huang5fbe25c2010-10-18 17:17:23 -0700882/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700883 * Allocate an FCXP instance to send a response or to send a request
884 * that has a response. Request/response buffers are allocated by caller.
885 *
886 * @param[in] bfa BFA bfa instance
887 * @param[in] nreq_sgles Number of SG elements required for request
888 * buffer. 0, if fcxp internal buffers are used.
889 * Use bfa_fcxp_get_reqbuf() to get the
890 * internal req buffer.
891 * @param[in] req_sgles SG elements describing request buffer. Will be
892 * copied in by BFA and hence can be freed on
893 * return from this function.
894 * @param[in] get_req_sga function ptr to be called to get a request SG
895 * Address (given the sge index).
896 * @param[in] get_req_sglen function ptr to be called to get a request SG
897 * len (given the sge index).
898 * @param[in] get_rsp_sga function ptr to be called to get a response SG
899 * Address (given the sge index).
900 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
901 * len (given the sge index).
902 *
903 * @return FCXP instance. NULL on failure.
904 */
905struct bfa_fcxp_s *
906bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
907 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
908 bfa_fcxp_get_sglen_t req_sglen_cbfn,
909 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
910 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
911{
912 struct bfa_fcxp_s *fcxp = NULL;
913
Jing Huangd4b671c2010-12-26 21:46:35 -0800914 WARN_ON(bfa == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700915
916 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
917 if (fcxp == NULL)
918 return NULL;
919
920 bfa_trc(bfa, fcxp->fcxp_tag);
921
922 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
923 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
924
925 return fcxp;
926}
927
Jing Huang5fbe25c2010-10-18 17:17:23 -0700928/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700929 * Get the internal request buffer pointer
930 *
931 * @param[in] fcxp BFA fcxp pointer
932 *
933 * @return pointer to the internal request buffer
934 */
935void *
936bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
937{
938 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
939 void *reqbuf;
940
Jing Huangd4b671c2010-12-26 21:46:35 -0800941 WARN_ON(fcxp->use_ireqbuf != 1);
Krishna Gudipati45070252011-06-24 20:24:29 -0700942 reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
943 mod->req_pld_sz + mod->rsp_pld_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700944 return reqbuf;
945}
946
947u32
948bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
949{
950 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
951
952 return mod->req_pld_sz;
953}
954
Jing Huang5fbe25c2010-10-18 17:17:23 -0700955/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700956 * Get the internal response buffer pointer
957 *
958 * @param[in] fcxp BFA fcxp pointer
959 *
960 * @return pointer to the internal request buffer
961 */
962void *
963bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
964{
965 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
Krishna Gudipati45070252011-06-24 20:24:29 -0700966 void *fcxp_buf;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700967
Jing Huangd4b671c2010-12-26 21:46:35 -0800968 WARN_ON(fcxp->use_irspbuf != 1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700969
Krishna Gudipati45070252011-06-24 20:24:29 -0700970 fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
971 mod->req_pld_sz + mod->rsp_pld_sz);
972
973 /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
974 return ((u8 *) fcxp_buf) + mod->req_pld_sz;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700975}
976
Jing Huang5fbe25c2010-10-18 17:17:23 -0700977/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800978 * Free the BFA FCXP
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700979 *
980 * @param[in] fcxp BFA fcxp pointer
981 *
982 * @return void
983 */
984void
985bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
986{
987 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
988
Jing Huangd4b671c2010-12-26 21:46:35 -0800989 WARN_ON(fcxp == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700990 bfa_trc(mod->bfa, fcxp->fcxp_tag);
991 bfa_fcxp_put(fcxp);
992}
993
Jing Huang5fbe25c2010-10-18 17:17:23 -0700994/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700995 * Send a FCXP request
996 *
997 * @param[in] fcxp BFA fcxp pointer
998 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
999 * @param[in] vf_id virtual Fabric ID
1000 * @param[in] lp_tag lport tag
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001001 * @param[in] cts use Continuous sequence
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001002 * @param[in] cos fc Class of Service
1003 * @param[in] reqlen request length, does not include FCHS length
1004 * @param[in] fchs fc Header Pointer. The header content will be copied
1005 * in by BFA.
1006 *
1007 * @param[in] cbfn call back function to be called on receiving
1008 * the response
1009 * @param[in] cbarg arg for cbfn
1010 * @param[in] rsp_timeout
1011 * response timeout
1012 *
1013 * @return bfa_status_t
1014 */
1015void
1016bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1017 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1018 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1019 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1020{
1021 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1022 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1023 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1024 struct bfi_fcxp_send_req_s *send_req;
1025
1026 bfa_trc(bfa, fcxp->fcxp_tag);
1027
Jing Huang5fbe25c2010-10-18 17:17:23 -07001028 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001029 * setup request/response info
1030 */
1031 reqi->bfa_rport = rport;
1032 reqi->vf_id = vf_id;
1033 reqi->lp_tag = lp_tag;
1034 reqi->class = cos;
1035 rspi->rsp_timeout = rsp_timeout;
1036 reqi->cts = cts;
1037 reqi->fchs = *fchs;
1038 reqi->req_tot_len = reqlen;
1039 rspi->rsp_maxlen = rsp_maxlen;
1040 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1041 fcxp->send_cbarg = cbarg;
1042
Jing Huang5fbe25c2010-10-18 17:17:23 -07001043 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001044 * If no room in CPE queue, wait for space in request queue
1045 */
1046 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1047 if (!send_req) {
1048 bfa_trc(bfa, fcxp->fcxp_tag);
1049 fcxp->reqq_waiting = BFA_TRUE;
1050 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1051 return;
1052 }
1053
1054 bfa_fcxp_queue(fcxp, send_req);
1055}
1056
Jing Huang5fbe25c2010-10-18 17:17:23 -07001057/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001058 * Abort a BFA FCXP
1059 *
1060 * @param[in] fcxp BFA fcxp pointer
1061 *
1062 * @return void
1063 */
1064bfa_status_t
1065bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1066{
1067 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
Jing Huangd4b671c2010-12-26 21:46:35 -08001068 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001069 return BFA_STATUS_OK;
1070}
1071
1072void
1073bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1074 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1075 void *caller, int nreq_sgles,
1076 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1077 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1078 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1079 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1080{
1081 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1082
Jing Huangd4b671c2010-12-26 21:46:35 -08001083 WARN_ON(!list_empty(&mod->fcxp_free_q));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001084
1085 wqe->alloc_cbfn = alloc_cbfn;
1086 wqe->alloc_cbarg = alloc_cbarg;
1087 wqe->caller = caller;
1088 wqe->bfa = bfa;
1089 wqe->nreq_sgles = nreq_sgles;
1090 wqe->nrsp_sgles = nrsp_sgles;
1091 wqe->req_sga_cbfn = req_sga_cbfn;
1092 wqe->req_sglen_cbfn = req_sglen_cbfn;
1093 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1094 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1095
1096 list_add_tail(&wqe->qe, &mod->wait_q);
1097}
1098
1099void
1100bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1101{
1102 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1103
Jing Huangd4b671c2010-12-26 21:46:35 -08001104 WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001105 list_del(&wqe->qe);
1106}
1107
1108void
1109bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1110{
Jing Huang5fbe25c2010-10-18 17:17:23 -07001111 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001112 * If waiting for room in request queue, cancel reqq wait
1113 * and free fcxp.
1114 */
1115 if (fcxp->reqq_waiting) {
1116 fcxp->reqq_waiting = BFA_FALSE;
1117 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1118 bfa_fcxp_free(fcxp);
1119 return;
1120 }
1121
1122 fcxp->send_cbfn = bfa_fcxp_null_comp;
1123}
1124
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001125void
1126bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1127{
1128 switch (msg->mhdr.msg_id) {
1129 case BFI_FCXP_I2H_SEND_RSP:
1130 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1131 break;
1132
1133 default:
1134 bfa_trc(bfa, msg->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08001135 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001136 }
1137}
1138
1139u32
1140bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1141{
1142 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1143
1144 return mod->rsp_pld_sz;
1145}
1146
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001147void
1148bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1149{
1150 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1151 struct list_head *qe;
1152 int i;
1153
1154 for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1155 bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
1156 list_add_tail(qe, &mod->fcxp_unused_q);
1157 }
1158}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001159
Jing Huang5fbe25c2010-10-18 17:17:23 -07001160/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001161 * BFA LPS state machine functions
1162 */
1163
Jing Huang5fbe25c2010-10-18 17:17:23 -07001164/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001165 * Init state -- no login
1166 */
1167static void
1168bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1169{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001170 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001171 bfa_trc(lps->bfa, event);
1172
1173 switch (event) {
1174 case BFA_LPS_SM_LOGIN:
1175 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1176 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1177 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1178 } else {
1179 bfa_sm_set_state(lps, bfa_lps_sm_login);
1180 bfa_lps_send_login(lps);
1181 }
1182
1183 if (lps->fdisc)
1184 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1185 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1186 else
1187 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1188 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1189 break;
1190
1191 case BFA_LPS_SM_LOGOUT:
1192 bfa_lps_logout_comp(lps);
1193 break;
1194
1195 case BFA_LPS_SM_DELETE:
1196 bfa_lps_free(lps);
1197 break;
1198
1199 case BFA_LPS_SM_RX_CVL:
1200 case BFA_LPS_SM_OFFLINE:
1201 break;
1202
1203 case BFA_LPS_SM_FWRSP:
1204 /*
1205 * Could happen when fabric detects loopback and discards
1206 * the lps request. Fw will eventually sent out the timeout
1207 * Just ignore
1208 */
1209 break;
1210
1211 default:
1212 bfa_sm_fault(lps->bfa, event);
1213 }
1214}
1215
Jing Huang5fbe25c2010-10-18 17:17:23 -07001216/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001217 * login is in progress -- awaiting response from firmware
1218 */
1219static void
1220bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1221{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001222 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001223 bfa_trc(lps->bfa, event);
1224
1225 switch (event) {
1226 case BFA_LPS_SM_FWRSP:
1227 if (lps->status == BFA_STATUS_OK) {
1228 bfa_sm_set_state(lps, bfa_lps_sm_online);
1229 if (lps->fdisc)
1230 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1231 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1232 else
1233 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1234 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
Krishna Gudipatib7044952010-12-13 16:17:42 -08001235 /* If N2N, send the assigned PID to FW */
1236 bfa_trc(lps->bfa, lps->fport);
1237 bfa_trc(lps->bfa, lps->lp_pid);
1238
1239 if (!lps->fport && lps->lp_pid)
1240 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001241 } else {
1242 bfa_sm_set_state(lps, bfa_lps_sm_init);
1243 if (lps->fdisc)
1244 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1245 BFA_PL_EID_LOGIN, 0,
1246 "FDISC Fail (RJT or timeout)");
1247 else
1248 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1249 BFA_PL_EID_LOGIN, 0,
1250 "FLOGI Fail (RJT or timeout)");
1251 }
1252 bfa_lps_login_comp(lps);
1253 break;
1254
1255 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001256 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001257 bfa_sm_set_state(lps, bfa_lps_sm_init);
1258 break;
1259
Krishna Gudipatib7044952010-12-13 16:17:42 -08001260 case BFA_LPS_SM_SET_N2N_PID:
1261 bfa_trc(lps->bfa, lps->fport);
1262 bfa_trc(lps->bfa, lps->lp_pid);
1263 break;
1264
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001265 default:
1266 bfa_sm_fault(lps->bfa, event);
1267 }
1268}
1269
Jing Huang5fbe25c2010-10-18 17:17:23 -07001270/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001271 * login pending - awaiting space in request queue
1272 */
1273static void
1274bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1275{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001276 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001277 bfa_trc(lps->bfa, event);
1278
1279 switch (event) {
1280 case BFA_LPS_SM_RESUME:
1281 bfa_sm_set_state(lps, bfa_lps_sm_login);
1282 break;
1283
1284 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001285 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001286 bfa_sm_set_state(lps, bfa_lps_sm_init);
1287 bfa_reqq_wcancel(&lps->wqe);
1288 break;
1289
1290 case BFA_LPS_SM_RX_CVL:
1291 /*
1292 * Login was not even sent out; so when getting out
1293 * of this state, it will appear like a login retry
1294 * after Clear virtual link
1295 */
1296 break;
1297
1298 default:
1299 bfa_sm_fault(lps->bfa, event);
1300 }
1301}
1302
Jing Huang5fbe25c2010-10-18 17:17:23 -07001303/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001304 * login complete
1305 */
1306static void
1307bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1308{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001309 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001310 bfa_trc(lps->bfa, event);
1311
1312 switch (event) {
1313 case BFA_LPS_SM_LOGOUT:
1314 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1315 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1316 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1317 } else {
1318 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1319 bfa_lps_send_logout(lps);
1320 }
1321 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1322 BFA_PL_EID_LOGO, 0, "Logout");
1323 break;
1324
1325 case BFA_LPS_SM_RX_CVL:
1326 bfa_sm_set_state(lps, bfa_lps_sm_init);
1327
1328 /* Let the vport module know about this event */
1329 bfa_lps_cvl_event(lps);
1330 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1331 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1332 break;
1333
Krishna Gudipatib7044952010-12-13 16:17:42 -08001334 case BFA_LPS_SM_SET_N2N_PID:
1335 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1336 bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1337 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1338 } else
1339 bfa_lps_send_set_n2n_pid(lps);
1340 break;
1341
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001342 case BFA_LPS_SM_OFFLINE:
1343 case BFA_LPS_SM_DELETE:
1344 bfa_sm_set_state(lps, bfa_lps_sm_init);
1345 break;
1346
1347 default:
1348 bfa_sm_fault(lps->bfa, event);
1349 }
1350}
1351
Jing Huang8f4bfad2010-12-26 21:50:10 -08001352/*
Krishna Gudipatib7044952010-12-13 16:17:42 -08001353 * login complete
1354 */
1355static void
1356bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1357{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001358 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatib7044952010-12-13 16:17:42 -08001359 bfa_trc(lps->bfa, event);
1360
1361 switch (event) {
1362 case BFA_LPS_SM_RESUME:
1363 bfa_sm_set_state(lps, bfa_lps_sm_online);
1364 bfa_lps_send_set_n2n_pid(lps);
1365 break;
1366
1367 case BFA_LPS_SM_LOGOUT:
1368 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1369 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1370 BFA_PL_EID_LOGO, 0, "Logout");
1371 break;
1372
1373 case BFA_LPS_SM_RX_CVL:
1374 bfa_sm_set_state(lps, bfa_lps_sm_init);
1375 bfa_reqq_wcancel(&lps->wqe);
1376
1377 /* Let the vport module know about this event */
1378 bfa_lps_cvl_event(lps);
1379 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1380 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1381 break;
1382
1383 case BFA_LPS_SM_OFFLINE:
1384 case BFA_LPS_SM_DELETE:
1385 bfa_sm_set_state(lps, bfa_lps_sm_init);
1386 bfa_reqq_wcancel(&lps->wqe);
1387 break;
1388
1389 default:
1390 bfa_sm_fault(lps->bfa, event);
1391 }
1392}
1393
Jing Huang5fbe25c2010-10-18 17:17:23 -07001394/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001395 * logout in progress - awaiting firmware response
1396 */
1397static void
1398bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1399{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001400 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001401 bfa_trc(lps->bfa, event);
1402
1403 switch (event) {
1404 case BFA_LPS_SM_FWRSP:
1405 bfa_sm_set_state(lps, bfa_lps_sm_init);
1406 bfa_lps_logout_comp(lps);
1407 break;
1408
1409 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001410 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001411 bfa_sm_set_state(lps, bfa_lps_sm_init);
1412 break;
1413
1414 default:
1415 bfa_sm_fault(lps->bfa, event);
1416 }
1417}
1418
Jing Huang5fbe25c2010-10-18 17:17:23 -07001419/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001420 * logout pending -- awaiting space in request queue
1421 */
1422static void
1423bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1424{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001425 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001426 bfa_trc(lps->bfa, event);
1427
1428 switch (event) {
1429 case BFA_LPS_SM_RESUME:
1430 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1431 bfa_lps_send_logout(lps);
1432 break;
1433
1434 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001435 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001436 bfa_sm_set_state(lps, bfa_lps_sm_init);
1437 bfa_reqq_wcancel(&lps->wqe);
1438 break;
1439
1440 default:
1441 bfa_sm_fault(lps->bfa, event);
1442 }
1443}
1444
1445
1446
Jing Huang5fbe25c2010-10-18 17:17:23 -07001447/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001448 * lps_pvt BFA LPS private functions
1449 */
1450
Jing Huang5fbe25c2010-10-18 17:17:23 -07001451/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001452 * return memory requirement
1453 */
1454static void
Krishna Gudipati45070252011-06-24 20:24:29 -07001455bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1456 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001457{
Krishna Gudipati45070252011-06-24 20:24:29 -07001458 struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1459
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001460 if (cfg->drvcfg.min_cfg)
Krishna Gudipati45070252011-06-24 20:24:29 -07001461 bfa_mem_kva_setup(minfo, lps_kva,
1462 sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001463 else
Krishna Gudipati45070252011-06-24 20:24:29 -07001464 bfa_mem_kva_setup(minfo, lps_kva,
1465 sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001466}
1467
Jing Huang5fbe25c2010-10-18 17:17:23 -07001468/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001469 * bfa module attach at initialization time
1470 */
1471static void
1472bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07001473 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001474{
1475 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1476 struct bfa_lps_s *lps;
1477 int i;
1478
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001479 mod->num_lps = BFA_LPS_MAX_LPORTS;
1480 if (cfg->drvcfg.min_cfg)
1481 mod->num_lps = BFA_LPS_MIN_LPORTS;
1482 else
1483 mod->num_lps = BFA_LPS_MAX_LPORTS;
Krishna Gudipati45070252011-06-24 20:24:29 -07001484 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001485
Krishna Gudipati45070252011-06-24 20:24:29 -07001486 bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001487
1488 INIT_LIST_HEAD(&mod->lps_free_q);
1489 INIT_LIST_HEAD(&mod->lps_active_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001490 INIT_LIST_HEAD(&mod->lps_login_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001491
1492 for (i = 0; i < mod->num_lps; i++, lps++) {
1493 lps->bfa = bfa;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001494 lps->bfa_tag = (u8) i;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001495 lps->reqq = BFA_REQQ_LPS;
1496 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1497 list_add_tail(&lps->qe, &mod->lps_free_q);
1498 }
1499}
1500
1501static void
1502bfa_lps_detach(struct bfa_s *bfa)
1503{
1504}
1505
1506static void
1507bfa_lps_start(struct bfa_s *bfa)
1508{
1509}
1510
1511static void
1512bfa_lps_stop(struct bfa_s *bfa)
1513{
1514}
1515
Jing Huang5fbe25c2010-10-18 17:17:23 -07001516/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001517 * IOC in disabled state -- consider all lps offline
1518 */
1519static void
1520bfa_lps_iocdisable(struct bfa_s *bfa)
1521{
1522 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1523 struct bfa_lps_s *lps;
1524 struct list_head *qe, *qen;
1525
1526 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1527 lps = (struct bfa_lps_s *) qe;
1528 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1529 }
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001530 list_for_each_safe(qe, qen, &mod->lps_login_q) {
1531 lps = (struct bfa_lps_s *) qe;
1532 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1533 }
1534 list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001535}
1536
Jing Huang5fbe25c2010-10-18 17:17:23 -07001537/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001538 * Firmware login response
1539 */
1540static void
1541bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1542{
1543 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1544 struct bfa_lps_s *lps;
1545
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001546 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1547 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001548
1549 lps->status = rsp->status;
1550 switch (rsp->status) {
1551 case BFA_STATUS_OK:
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001552 lps->fw_tag = rsp->fw_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001553 lps->fport = rsp->f_port;
Krishna Gudipatib7044952010-12-13 16:17:42 -08001554 if (lps->fport)
1555 lps->lp_pid = rsp->lp_pid;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001556 lps->npiv_en = rsp->npiv_en;
Jing Huangba816ea2010-10-18 17:10:50 -07001557 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001558 lps->pr_pwwn = rsp->port_name;
1559 lps->pr_nwwn = rsp->node_name;
1560 lps->auth_req = rsp->auth_req;
1561 lps->lp_mac = rsp->lp_mac;
1562 lps->brcd_switch = rsp->brcd_switch;
1563 lps->fcf_mac = rsp->fcf_mac;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001564 lps->pr_bbscn = rsp->bb_scn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001565
1566 break;
1567
1568 case BFA_STATUS_FABRIC_RJT:
1569 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1570 lps->lsrjt_expl = rsp->lsrjt_expl;
1571
1572 break;
1573
1574 case BFA_STATUS_EPROTOCOL:
1575 lps->ext_status = rsp->ext_status;
1576
1577 break;
1578
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001579 case BFA_STATUS_VPORT_MAX:
1580 if (!rsp->ext_status)
1581 bfa_lps_no_res(lps, rsp->ext_status);
1582 break;
1583
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001584 default:
1585 /* Nothing to do with other status */
1586 break;
1587 }
1588
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001589 list_del(&lps->qe);
1590 list_add_tail(&lps->qe, &mod->lps_active_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001591 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1592}
1593
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001594static void
1595bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1596{
1597 struct bfa_s *bfa = first_lps->bfa;
1598 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1599 struct list_head *qe, *qe_next;
1600 struct bfa_lps_s *lps;
1601
1602 bfa_trc(bfa, count);
1603
1604 qe = bfa_q_next(first_lps);
1605
1606 while (count && qe) {
1607 qe_next = bfa_q_next(qe);
1608 lps = (struct bfa_lps_s *)qe;
1609 bfa_trc(bfa, lps->bfa_tag);
1610 lps->status = first_lps->status;
1611 list_del(&lps->qe);
1612 list_add_tail(&lps->qe, &mod->lps_active_q);
1613 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1614 qe = qe_next;
1615 count--;
1616 }
1617}
1618
Jing Huang5fbe25c2010-10-18 17:17:23 -07001619/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001620 * Firmware logout response
1621 */
1622static void
1623bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1624{
1625 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1626 struct bfa_lps_s *lps;
1627
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001628 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1629 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001630
1631 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1632}
1633
Jing Huang5fbe25c2010-10-18 17:17:23 -07001634/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001635 * Firmware received a Clear virtual link request (for FCoE)
1636 */
1637static void
1638bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1639{
1640 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1641 struct bfa_lps_s *lps;
1642
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001643 lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001644
1645 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1646}
1647
Jing Huang5fbe25c2010-10-18 17:17:23 -07001648/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001649 * Space is available in request queue, resume queueing request to firmware.
1650 */
1651static void
1652bfa_lps_reqq_resume(void *lps_arg)
1653{
1654 struct bfa_lps_s *lps = lps_arg;
1655
1656 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1657}
1658
Jing Huang5fbe25c2010-10-18 17:17:23 -07001659/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001660 * lps is freed -- triggered by vport delete
1661 */
1662static void
1663bfa_lps_free(struct bfa_lps_s *lps)
1664{
1665 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1666
1667 lps->lp_pid = 0;
1668 list_del(&lps->qe);
1669 list_add_tail(&lps->qe, &mod->lps_free_q);
1670}
1671
Jing Huang5fbe25c2010-10-18 17:17:23 -07001672/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001673 * send login request to firmware
1674 */
1675static void
1676bfa_lps_send_login(struct bfa_lps_s *lps)
1677{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001678 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001679 struct bfi_lps_login_req_s *m;
1680
1681 m = bfa_reqq_next(lps->bfa, lps->reqq);
Jing Huangd4b671c2010-12-26 21:46:35 -08001682 WARN_ON(!m);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001683
1684 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001685 bfa_fn_lpu(lps->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001686
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001687 m->bfa_tag = lps->bfa_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001688 m->alpa = lps->alpa;
Jing Huangba816ea2010-10-18 17:10:50 -07001689 m->pdu_size = cpu_to_be16(lps->pdusz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001690 m->pwwn = lps->pwwn;
1691 m->nwwn = lps->nwwn;
1692 m->fdisc = lps->fdisc;
1693 m->auth_en = lps->auth_en;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001694 m->bb_scn = lps->bb_scn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001695
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001696 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1697 list_del(&lps->qe);
1698 list_add_tail(&lps->qe, &mod->lps_login_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001699}
1700
Jing Huang5fbe25c2010-10-18 17:17:23 -07001701/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001702 * send logout request to firmware
1703 */
1704static void
1705bfa_lps_send_logout(struct bfa_lps_s *lps)
1706{
1707 struct bfi_lps_logout_req_s *m;
1708
1709 m = bfa_reqq_next(lps->bfa, lps->reqq);
Jing Huangd4b671c2010-12-26 21:46:35 -08001710 WARN_ON(!m);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001711
1712 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001713 bfa_fn_lpu(lps->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001714
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001715 m->fw_tag = lps->fw_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001716 m->port_name = lps->pwwn;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001717 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001718}
1719
Jing Huang8f4bfad2010-12-26 21:50:10 -08001720/*
Krishna Gudipatib7044952010-12-13 16:17:42 -08001721 * send n2n pid set request to firmware
1722 */
1723static void
1724bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1725{
1726 struct bfi_lps_n2n_pid_req_s *m;
1727
1728 m = bfa_reqq_next(lps->bfa, lps->reqq);
Jing Huangd4b671c2010-12-26 21:46:35 -08001729 WARN_ON(!m);
Krishna Gudipatib7044952010-12-13 16:17:42 -08001730
1731 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001732 bfa_fn_lpu(lps->bfa));
Krishna Gudipatib7044952010-12-13 16:17:42 -08001733
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001734 m->fw_tag = lps->fw_tag;
Krishna Gudipatib7044952010-12-13 16:17:42 -08001735 m->lp_pid = lps->lp_pid;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001736 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
Krishna Gudipatib7044952010-12-13 16:17:42 -08001737}
1738
Jing Huang5fbe25c2010-10-18 17:17:23 -07001739/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001740 * Indirect login completion handler for non-fcs
1741 */
1742static void
1743bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1744{
1745 struct bfa_lps_s *lps = arg;
1746
1747 if (!complete)
1748 return;
1749
1750 if (lps->fdisc)
1751 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1752 else
1753 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1754}
1755
Jing Huang5fbe25c2010-10-18 17:17:23 -07001756/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001757 * Login completion handler -- direct call for fcs, queue for others
1758 */
1759static void
1760bfa_lps_login_comp(struct bfa_lps_s *lps)
1761{
1762 if (!lps->bfa->fcs) {
1763 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1764 lps);
1765 return;
1766 }
1767
1768 if (lps->fdisc)
1769 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1770 else
1771 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1772}
1773
Jing Huang5fbe25c2010-10-18 17:17:23 -07001774/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001775 * Indirect logout completion handler for non-fcs
1776 */
1777static void
1778bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1779{
1780 struct bfa_lps_s *lps = arg;
1781
1782 if (!complete)
1783 return;
1784
1785 if (lps->fdisc)
1786 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1787}
1788
Jing Huang5fbe25c2010-10-18 17:17:23 -07001789/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001790 * Logout completion handler -- direct call for fcs, queue for others
1791 */
1792static void
1793bfa_lps_logout_comp(struct bfa_lps_s *lps)
1794{
1795 if (!lps->bfa->fcs) {
1796 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1797 lps);
1798 return;
1799 }
1800 if (lps->fdisc)
1801 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1802}
1803
Jing Huang5fbe25c2010-10-18 17:17:23 -07001804/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001805 * Clear virtual link completion handler for non-fcs
1806 */
1807static void
1808bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1809{
1810 struct bfa_lps_s *lps = arg;
1811
1812 if (!complete)
1813 return;
1814
1815 /* Clear virtual link to base port will result in link down */
1816 if (lps->fdisc)
1817 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1818}
1819
Jing Huang5fbe25c2010-10-18 17:17:23 -07001820/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001821 * Received Clear virtual link event --direct call for fcs,
1822 * queue for others
1823 */
1824static void
1825bfa_lps_cvl_event(struct bfa_lps_s *lps)
1826{
1827 if (!lps->bfa->fcs) {
1828 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1829 lps);
1830 return;
1831 }
1832
1833 /* Clear virtual link to base port will result in link down */
1834 if (lps->fdisc)
1835 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1836}
1837
1838
1839
Jing Huang5fbe25c2010-10-18 17:17:23 -07001840/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001841 * lps_public BFA LPS public functions
1842 */
1843
1844u32
1845bfa_lps_get_max_vport(struct bfa_s *bfa)
1846{
1847 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1848 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1849 else
1850 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1851}
1852
Jing Huang5fbe25c2010-10-18 17:17:23 -07001853/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001854 * Allocate a lport srvice tag.
1855 */
1856struct bfa_lps_s *
1857bfa_lps_alloc(struct bfa_s *bfa)
1858{
1859 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1860 struct bfa_lps_s *lps = NULL;
1861
1862 bfa_q_deq(&mod->lps_free_q, &lps);
1863
1864 if (lps == NULL)
1865 return NULL;
1866
1867 list_add_tail(&lps->qe, &mod->lps_active_q);
1868
1869 bfa_sm_set_state(lps, bfa_lps_sm_init);
1870 return lps;
1871}
1872
Jing Huang5fbe25c2010-10-18 17:17:23 -07001873/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001874 * Free lport service tag. This can be called anytime after an alloc.
1875 * No need to wait for any pending login/logout completions.
1876 */
1877void
1878bfa_lps_delete(struct bfa_lps_s *lps)
1879{
1880 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1881}
1882
Jing Huang5fbe25c2010-10-18 17:17:23 -07001883/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001884 * Initiate a lport login.
1885 */
1886void
1887bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001888 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001889{
1890 lps->uarg = uarg;
1891 lps->alpa = alpa;
1892 lps->pdusz = pdusz;
1893 lps->pwwn = pwwn;
1894 lps->nwwn = nwwn;
1895 lps->fdisc = BFA_FALSE;
1896 lps->auth_en = auth_en;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001897 lps->bb_scn = bb_scn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001898 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1899}
1900
Jing Huang5fbe25c2010-10-18 17:17:23 -07001901/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001902 * Initiate a lport fdisc login.
1903 */
1904void
1905bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1906 wwn_t nwwn)
1907{
1908 lps->uarg = uarg;
1909 lps->alpa = 0;
1910 lps->pdusz = pdusz;
1911 lps->pwwn = pwwn;
1912 lps->nwwn = nwwn;
1913 lps->fdisc = BFA_TRUE;
1914 lps->auth_en = BFA_FALSE;
1915 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1916}
1917
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001918
Jing Huang5fbe25c2010-10-18 17:17:23 -07001919/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001920 * Initiate a lport FDSIC logout.
1921 */
1922void
1923bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1924{
1925 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1926}
1927
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001928u8
1929bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1930{
1931 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1932
1933 return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1934}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001935
Jing Huang5fbe25c2010-10-18 17:17:23 -07001936/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001937 * Return lport services tag given the pid
1938 */
1939u8
1940bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1941{
1942 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1943 struct bfa_lps_s *lps;
1944 int i;
1945
1946 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1947 if (lps->lp_pid == pid)
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001948 return lps->bfa_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001949 }
1950
1951 /* Return base port tag anyway */
1952 return 0;
1953}
1954
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001955
Jing Huang5fbe25c2010-10-18 17:17:23 -07001956/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001957 * return port id assigned to the base lport
1958 */
1959u32
1960bfa_lps_get_base_pid(struct bfa_s *bfa)
1961{
1962 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1963
1964 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1965}
1966
Jing Huang8f4bfad2010-12-26 21:50:10 -08001967/*
Krishna Gudipatib7044952010-12-13 16:17:42 -08001968 * Set PID in case of n2n (which is assigned during PLOGI)
1969 */
1970void
1971bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1972{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001973 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatib7044952010-12-13 16:17:42 -08001974 bfa_trc(lps->bfa, n2n_pid);
1975
1976 lps->lp_pid = n2n_pid;
1977 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1978}
1979
Jing Huang5fbe25c2010-10-18 17:17:23 -07001980/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001981 * LPS firmware message class handler.
1982 */
1983void
1984bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1985{
1986 union bfi_lps_i2h_msg_u msg;
1987
1988 bfa_trc(bfa, m->mhdr.msg_id);
1989 msg.msg = m;
1990
1991 switch (m->mhdr.msg_id) {
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07001992 case BFI_LPS_I2H_LOGIN_RSP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001993 bfa_lps_login_rsp(bfa, msg.login_rsp);
1994 break;
1995
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07001996 case BFI_LPS_I2H_LOGOUT_RSP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001997 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1998 break;
1999
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07002000 case BFI_LPS_I2H_CVL_EVENT:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002001 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2002 break;
2003
2004 default:
2005 bfa_trc(bfa, m->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08002006 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002007 }
2008}
2009
Jing Huang5fbe25c2010-10-18 17:17:23 -07002010/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002011 * FC PORT state machine functions
2012 */
2013static void
2014bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2015 enum bfa_fcport_sm_event event)
2016{
2017 bfa_trc(fcport->bfa, event);
2018
2019 switch (event) {
2020 case BFA_FCPORT_SM_START:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002021 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002022 * Start event after IOC is configured and BFA is started.
2023 */
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08002024 fcport->use_flash_cfg = BFA_TRUE;
2025
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002026 if (bfa_fcport_send_enable(fcport)) {
2027 bfa_trc(fcport->bfa, BFA_TRUE);
2028 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2029 } else {
2030 bfa_trc(fcport->bfa, BFA_FALSE);
2031 bfa_sm_set_state(fcport,
2032 bfa_fcport_sm_enabling_qwait);
2033 }
2034 break;
2035
2036 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002037 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002038 * Port is persistently configured to be in enabled state. Do
2039 * not change state. Port enabling is done when START event is
2040 * received.
2041 */
2042 break;
2043
2044 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002045 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002046 * If a port is persistently configured to be disabled, the
2047 * first event will a port disable request.
2048 */
2049 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2050 break;
2051
2052 case BFA_FCPORT_SM_HWFAIL:
2053 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2054 break;
2055
2056 default:
2057 bfa_sm_fault(fcport->bfa, event);
2058 }
2059}
2060
2061static void
2062bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2063 enum bfa_fcport_sm_event event)
2064{
2065 char pwwn_buf[BFA_STRING_32];
2066 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2067 bfa_trc(fcport->bfa, event);
2068
2069 switch (event) {
2070 case BFA_FCPORT_SM_QRESUME:
2071 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2072 bfa_fcport_send_enable(fcport);
2073 break;
2074
2075 case BFA_FCPORT_SM_STOP:
2076 bfa_reqq_wcancel(&fcport->reqq_wait);
2077 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2078 break;
2079
2080 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002081 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002082 * Already enable is in progress.
2083 */
2084 break;
2085
2086 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002087 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002088 * Just send disable request to firmware when room becomes
2089 * available in request queue.
2090 */
2091 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2092 bfa_reqq_wcancel(&fcport->reqq_wait);
2093 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2094 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2095 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002096 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002097 "Base port disabled: WWN = %s\n", pwwn_buf);
2098 break;
2099
2100 case BFA_FCPORT_SM_LINKUP:
2101 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002102 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002103 * Possible to get link events when doing back-to-back
2104 * enable/disables.
2105 */
2106 break;
2107
2108 case BFA_FCPORT_SM_HWFAIL:
2109 bfa_reqq_wcancel(&fcport->reqq_wait);
2110 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2111 break;
2112
2113 default:
2114 bfa_sm_fault(fcport->bfa, event);
2115 }
2116}
2117
2118static void
2119bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2120 enum bfa_fcport_sm_event event)
2121{
2122 char pwwn_buf[BFA_STRING_32];
2123 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2124 bfa_trc(fcport->bfa, event);
2125
2126 switch (event) {
2127 case BFA_FCPORT_SM_FWRSP:
2128 case BFA_FCPORT_SM_LINKDOWN:
2129 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2130 break;
2131
2132 case BFA_FCPORT_SM_LINKUP:
2133 bfa_fcport_update_linkinfo(fcport);
2134 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2135
Jing Huangd4b671c2010-12-26 21:46:35 -08002136 WARN_ON(!fcport->event_cbfn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002137 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2138 break;
2139
2140 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002141 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002142 * Already being enabled.
2143 */
2144 break;
2145
2146 case BFA_FCPORT_SM_DISABLE:
2147 if (bfa_fcport_send_disable(fcport))
2148 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2149 else
2150 bfa_sm_set_state(fcport,
2151 bfa_fcport_sm_disabling_qwait);
2152
2153 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2154 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2155 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002156 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002157 "Base port disabled: WWN = %s\n", pwwn_buf);
2158 break;
2159
2160 case BFA_FCPORT_SM_STOP:
2161 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2162 break;
2163
2164 case BFA_FCPORT_SM_HWFAIL:
2165 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2166 break;
2167
2168 default:
2169 bfa_sm_fault(fcport->bfa, event);
2170 }
2171}
2172
2173static void
2174bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2175 enum bfa_fcport_sm_event event)
2176{
2177 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2178 char pwwn_buf[BFA_STRING_32];
2179 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2180
2181 bfa_trc(fcport->bfa, event);
2182
2183 switch (event) {
2184 case BFA_FCPORT_SM_LINKUP:
2185 bfa_fcport_update_linkinfo(fcport);
2186 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
Jing Huangd4b671c2010-12-26 21:46:35 -08002187 WARN_ON(!fcport->event_cbfn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002188 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2189 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2190 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2191
2192 bfa_trc(fcport->bfa,
2193 pevent->link_state.vc_fcf.fcf.fipenabled);
2194 bfa_trc(fcport->bfa,
2195 pevent->link_state.vc_fcf.fcf.fipfailed);
2196
2197 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2198 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2199 BFA_PL_EID_FIP_FCF_DISC, 0,
2200 "FIP FCF Discovery Failed");
2201 else
2202 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2203 BFA_PL_EID_FIP_FCF_DISC, 0,
2204 "FIP FCF Discovered");
2205 }
2206
2207 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2208 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002209 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002210 "Base port online: WWN = %s\n", pwwn_buf);
2211 break;
2212
2213 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002214 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002215 * Possible to get link down event.
2216 */
2217 break;
2218
2219 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002220 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002221 * Already enabled.
2222 */
2223 break;
2224
2225 case BFA_FCPORT_SM_DISABLE:
2226 if (bfa_fcport_send_disable(fcport))
2227 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2228 else
2229 bfa_sm_set_state(fcport,
2230 bfa_fcport_sm_disabling_qwait);
2231
2232 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2233 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2234 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002235 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002236 "Base port disabled: WWN = %s\n", pwwn_buf);
2237 break;
2238
2239 case BFA_FCPORT_SM_STOP:
2240 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2241 break;
2242
2243 case BFA_FCPORT_SM_HWFAIL:
2244 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2245 break;
2246
2247 default:
2248 bfa_sm_fault(fcport->bfa, event);
2249 }
2250}
2251
2252static void
2253bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2254 enum bfa_fcport_sm_event event)
2255{
2256 char pwwn_buf[BFA_STRING_32];
2257 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2258
2259 bfa_trc(fcport->bfa, event);
2260
2261 switch (event) {
2262 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002263 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002264 * Already enabled.
2265 */
2266 break;
2267
2268 case BFA_FCPORT_SM_DISABLE:
2269 if (bfa_fcport_send_disable(fcport))
2270 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2271 else
2272 bfa_sm_set_state(fcport,
2273 bfa_fcport_sm_disabling_qwait);
2274
2275 bfa_fcport_reset_linkinfo(fcport);
2276 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2277 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2278 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2279 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002280 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002281 "Base port offline: WWN = %s\n", pwwn_buf);
Jing Huang88166242010-12-09 17:11:53 -08002282 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002283 "Base port disabled: WWN = %s\n", pwwn_buf);
2284 break;
2285
2286 case BFA_FCPORT_SM_LINKDOWN:
2287 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2288 bfa_fcport_reset_linkinfo(fcport);
2289 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2290 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2291 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2292 wwn2str(pwwn_buf, fcport->pwwn);
2293 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002294 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002295 "Base port offline: WWN = %s\n", pwwn_buf);
2296 else
Jing Huang88166242010-12-09 17:11:53 -08002297 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002298 "Base port (WWN = %s) "
2299 "lost fabric connectivity\n", pwwn_buf);
2300 break;
2301
2302 case BFA_FCPORT_SM_STOP:
2303 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2304 bfa_fcport_reset_linkinfo(fcport);
2305 wwn2str(pwwn_buf, fcport->pwwn);
2306 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002307 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002308 "Base port offline: WWN = %s\n", pwwn_buf);
2309 else
Jing Huang88166242010-12-09 17:11:53 -08002310 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002311 "Base port (WWN = %s) "
2312 "lost fabric connectivity\n", pwwn_buf);
2313 break;
2314
2315 case BFA_FCPORT_SM_HWFAIL:
2316 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2317 bfa_fcport_reset_linkinfo(fcport);
2318 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2319 wwn2str(pwwn_buf, fcport->pwwn);
2320 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002321 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002322 "Base port offline: WWN = %s\n", pwwn_buf);
2323 else
Jing Huang88166242010-12-09 17:11:53 -08002324 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002325 "Base port (WWN = %s) "
2326 "lost fabric connectivity\n", pwwn_buf);
2327 break;
2328
2329 default:
2330 bfa_sm_fault(fcport->bfa, event);
2331 }
2332}
2333
2334static void
2335bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2336 enum bfa_fcport_sm_event event)
2337{
2338 bfa_trc(fcport->bfa, event);
2339
2340 switch (event) {
2341 case BFA_FCPORT_SM_QRESUME:
2342 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2343 bfa_fcport_send_disable(fcport);
2344 break;
2345
2346 case BFA_FCPORT_SM_STOP:
2347 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2348 bfa_reqq_wcancel(&fcport->reqq_wait);
2349 break;
2350
2351 case BFA_FCPORT_SM_ENABLE:
2352 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2353 break;
2354
2355 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002356 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002357 * Already being disabled.
2358 */
2359 break;
2360
2361 case BFA_FCPORT_SM_LINKUP:
2362 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002363 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002364 * Possible to get link events when doing back-to-back
2365 * enable/disables.
2366 */
2367 break;
2368
2369 case BFA_FCPORT_SM_HWFAIL:
2370 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2371 bfa_reqq_wcancel(&fcport->reqq_wait);
2372 break;
2373
2374 default:
2375 bfa_sm_fault(fcport->bfa, event);
2376 }
2377}
2378
2379static void
2380bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2381 enum bfa_fcport_sm_event event)
2382{
2383 bfa_trc(fcport->bfa, event);
2384
2385 switch (event) {
2386 case BFA_FCPORT_SM_QRESUME:
2387 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2388 bfa_fcport_send_disable(fcport);
2389 if (bfa_fcport_send_enable(fcport))
2390 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2391 else
2392 bfa_sm_set_state(fcport,
2393 bfa_fcport_sm_enabling_qwait);
2394 break;
2395
2396 case BFA_FCPORT_SM_STOP:
2397 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2398 bfa_reqq_wcancel(&fcport->reqq_wait);
2399 break;
2400
2401 case BFA_FCPORT_SM_ENABLE:
2402 break;
2403
2404 case BFA_FCPORT_SM_DISABLE:
2405 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2406 break;
2407
2408 case BFA_FCPORT_SM_LINKUP:
2409 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002410 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002411 * Possible to get link events when doing back-to-back
2412 * enable/disables.
2413 */
2414 break;
2415
2416 case BFA_FCPORT_SM_HWFAIL:
2417 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2418 bfa_reqq_wcancel(&fcport->reqq_wait);
2419 break;
2420
2421 default:
2422 bfa_sm_fault(fcport->bfa, event);
2423 }
2424}
2425
2426static void
2427bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2428 enum bfa_fcport_sm_event event)
2429{
2430 char pwwn_buf[BFA_STRING_32];
2431 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2432 bfa_trc(fcport->bfa, event);
2433
2434 switch (event) {
2435 case BFA_FCPORT_SM_FWRSP:
2436 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2437 break;
2438
2439 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002440 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002441 * Already being disabled.
2442 */
2443 break;
2444
2445 case BFA_FCPORT_SM_ENABLE:
2446 if (bfa_fcport_send_enable(fcport))
2447 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2448 else
2449 bfa_sm_set_state(fcport,
2450 bfa_fcport_sm_enabling_qwait);
2451
2452 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2453 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2454 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002455 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002456 "Base port enabled: WWN = %s\n", pwwn_buf);
2457 break;
2458
2459 case BFA_FCPORT_SM_STOP:
2460 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2461 break;
2462
2463 case BFA_FCPORT_SM_LINKUP:
2464 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002465 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002466 * Possible to get link events when doing back-to-back
2467 * enable/disables.
2468 */
2469 break;
2470
2471 case BFA_FCPORT_SM_HWFAIL:
2472 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2473 break;
2474
2475 default:
2476 bfa_sm_fault(fcport->bfa, event);
2477 }
2478}
2479
2480static void
2481bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2482 enum bfa_fcport_sm_event event)
2483{
2484 char pwwn_buf[BFA_STRING_32];
2485 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2486 bfa_trc(fcport->bfa, event);
2487
2488 switch (event) {
2489 case BFA_FCPORT_SM_START:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002490 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002491 * Ignore start event for a port that is disabled.
2492 */
2493 break;
2494
2495 case BFA_FCPORT_SM_STOP:
2496 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2497 break;
2498
2499 case BFA_FCPORT_SM_ENABLE:
2500 if (bfa_fcport_send_enable(fcport))
2501 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2502 else
2503 bfa_sm_set_state(fcport,
2504 bfa_fcport_sm_enabling_qwait);
2505
2506 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2507 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2508 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002509 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002510 "Base port enabled: WWN = %s\n", pwwn_buf);
2511 break;
2512
2513 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002514 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002515 * Already disabled.
2516 */
2517 break;
2518
2519 case BFA_FCPORT_SM_HWFAIL:
2520 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2521 break;
2522
2523 default:
2524 bfa_sm_fault(fcport->bfa, event);
2525 }
2526}
2527
2528static void
2529bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2530 enum bfa_fcport_sm_event event)
2531{
2532 bfa_trc(fcport->bfa, event);
2533
2534 switch (event) {
2535 case BFA_FCPORT_SM_START:
2536 if (bfa_fcport_send_enable(fcport))
2537 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2538 else
2539 bfa_sm_set_state(fcport,
2540 bfa_fcport_sm_enabling_qwait);
2541 break;
2542
2543 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002544 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002545 * Ignore all other events.
2546 */
2547 ;
2548 }
2549}
2550
Jing Huang5fbe25c2010-10-18 17:17:23 -07002551/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002552 * Port is enabled. IOC is down/failed.
2553 */
2554static void
2555bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2556 enum bfa_fcport_sm_event event)
2557{
2558 bfa_trc(fcport->bfa, event);
2559
2560 switch (event) {
2561 case BFA_FCPORT_SM_START:
2562 if (bfa_fcport_send_enable(fcport))
2563 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2564 else
2565 bfa_sm_set_state(fcport,
2566 bfa_fcport_sm_enabling_qwait);
2567 break;
2568
2569 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002570 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002571 * Ignore all events.
2572 */
2573 ;
2574 }
2575}
2576
Jing Huang5fbe25c2010-10-18 17:17:23 -07002577/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002578 * Port is disabled. IOC is down/failed.
2579 */
2580static void
2581bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2582 enum bfa_fcport_sm_event event)
2583{
2584 bfa_trc(fcport->bfa, event);
2585
2586 switch (event) {
2587 case BFA_FCPORT_SM_START:
2588 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2589 break;
2590
2591 case BFA_FCPORT_SM_ENABLE:
2592 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2593 break;
2594
2595 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002596 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002597 * Ignore all events.
2598 */
2599 ;
2600 }
2601}
2602
Jing Huang5fbe25c2010-10-18 17:17:23 -07002603/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002604 * Link state is down
2605 */
2606static void
2607bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2608 enum bfa_fcport_ln_sm_event event)
2609{
2610 bfa_trc(ln->fcport->bfa, event);
2611
2612 switch (event) {
2613 case BFA_FCPORT_LN_SM_LINKUP:
2614 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2615 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2616 break;
2617
2618 default:
2619 bfa_sm_fault(ln->fcport->bfa, event);
2620 }
2621}
2622
Jing Huang5fbe25c2010-10-18 17:17:23 -07002623/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002624 * Link state is waiting for down notification
2625 */
2626static void
2627bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2628 enum bfa_fcport_ln_sm_event event)
2629{
2630 bfa_trc(ln->fcport->bfa, event);
2631
2632 switch (event) {
2633 case BFA_FCPORT_LN_SM_LINKUP:
2634 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2635 break;
2636
2637 case BFA_FCPORT_LN_SM_NOTIFICATION:
2638 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2639 break;
2640
2641 default:
2642 bfa_sm_fault(ln->fcport->bfa, event);
2643 }
2644}
2645
Jing Huang5fbe25c2010-10-18 17:17:23 -07002646/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002647 * Link state is waiting for down notification and there is a pending up
2648 */
2649static void
2650bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2651 enum bfa_fcport_ln_sm_event event)
2652{
2653 bfa_trc(ln->fcport->bfa, event);
2654
2655 switch (event) {
2656 case BFA_FCPORT_LN_SM_LINKDOWN:
2657 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2658 break;
2659
2660 case BFA_FCPORT_LN_SM_NOTIFICATION:
2661 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2662 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2663 break;
2664
2665 default:
2666 bfa_sm_fault(ln->fcport->bfa, event);
2667 }
2668}
2669
Jing Huang5fbe25c2010-10-18 17:17:23 -07002670/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002671 * Link state is up
2672 */
2673static void
2674bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2675 enum bfa_fcport_ln_sm_event event)
2676{
2677 bfa_trc(ln->fcport->bfa, event);
2678
2679 switch (event) {
2680 case BFA_FCPORT_LN_SM_LINKDOWN:
2681 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2682 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2683 break;
2684
2685 default:
2686 bfa_sm_fault(ln->fcport->bfa, event);
2687 }
2688}
2689
Jing Huang5fbe25c2010-10-18 17:17:23 -07002690/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002691 * Link state is waiting for up notification
2692 */
2693static void
2694bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2695 enum bfa_fcport_ln_sm_event event)
2696{
2697 bfa_trc(ln->fcport->bfa, event);
2698
2699 switch (event) {
2700 case BFA_FCPORT_LN_SM_LINKDOWN:
2701 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2702 break;
2703
2704 case BFA_FCPORT_LN_SM_NOTIFICATION:
2705 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2706 break;
2707
2708 default:
2709 bfa_sm_fault(ln->fcport->bfa, event);
2710 }
2711}
2712
Jing Huang5fbe25c2010-10-18 17:17:23 -07002713/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002714 * Link state is waiting for up notification and there is a pending down
2715 */
2716static void
2717bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2718 enum bfa_fcport_ln_sm_event event)
2719{
2720 bfa_trc(ln->fcport->bfa, event);
2721
2722 switch (event) {
2723 case BFA_FCPORT_LN_SM_LINKUP:
2724 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2725 break;
2726
2727 case BFA_FCPORT_LN_SM_NOTIFICATION:
2728 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2729 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2730 break;
2731
2732 default:
2733 bfa_sm_fault(ln->fcport->bfa, event);
2734 }
2735}
2736
Jing Huang5fbe25c2010-10-18 17:17:23 -07002737/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002738 * Link state is waiting for up notification and there are pending down and up
2739 */
2740static void
2741bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2742 enum bfa_fcport_ln_sm_event event)
2743{
2744 bfa_trc(ln->fcport->bfa, event);
2745
2746 switch (event) {
2747 case BFA_FCPORT_LN_SM_LINKDOWN:
2748 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2749 break;
2750
2751 case BFA_FCPORT_LN_SM_NOTIFICATION:
2752 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2753 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2754 break;
2755
2756 default:
2757 bfa_sm_fault(ln->fcport->bfa, event);
2758 }
2759}
2760
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002761static void
2762__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2763{
2764 struct bfa_fcport_ln_s *ln = cbarg;
2765
2766 if (complete)
2767 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2768 else
2769 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2770}
2771
Jing Huang5fbe25c2010-10-18 17:17:23 -07002772/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002773 * Send SCN notification to upper layers.
2774 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2775 */
2776static void
2777bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2778 bfa_boolean_t trunk)
2779{
2780 if (fcport->cfg.trunked && !trunk)
2781 return;
2782
2783 switch (event) {
2784 case BFA_PORT_LINKUP:
2785 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2786 break;
2787 case BFA_PORT_LINKDOWN:
2788 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2789 break;
2790 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08002791 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002792 }
2793}
2794
2795static void
2796bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2797{
2798 struct bfa_fcport_s *fcport = ln->fcport;
2799
2800 if (fcport->bfa->fcs) {
2801 fcport->event_cbfn(fcport->event_cbarg, event);
2802 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2803 } else {
2804 ln->ln_event = event;
2805 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2806 __bfa_cb_fcport_event, ln);
2807 }
2808}
2809
2810#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2811 BFA_CACHELINE_SZ))
2812
2813static void
Krishna Gudipati45070252011-06-24 20:24:29 -07002814bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2815 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002816{
Krishna Gudipati45070252011-06-24 20:24:29 -07002817 struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2818
2819 bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002820}
2821
2822static void
2823bfa_fcport_qresume(void *cbarg)
2824{
2825 struct bfa_fcport_s *fcport = cbarg;
2826
2827 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2828}
2829
2830static void
Krishna Gudipati45070252011-06-24 20:24:29 -07002831bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002832{
Krishna Gudipati45070252011-06-24 20:24:29 -07002833 struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002834
Krishna Gudipati45070252011-06-24 20:24:29 -07002835 fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
2836 fcport->stats_pa = bfa_mem_dma_phys(fcport_dma);
2837 fcport->stats = (union bfa_fcport_stats_u *)
2838 bfa_mem_dma_virt(fcport_dma);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002839}
2840
Jing Huang5fbe25c2010-10-18 17:17:23 -07002841/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002842 * Memory initialization.
2843 */
2844static void
2845bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07002846 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002847{
2848 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2849 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2850 struct bfa_fcport_ln_s *ln = &fcport->ln;
Maggie Zhangf16a1752010-12-09 19:12:32 -08002851 struct timeval tv;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002852
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002853 fcport->bfa = bfa;
2854 ln->fcport = fcport;
2855
Krishna Gudipati45070252011-06-24 20:24:29 -07002856 bfa_fcport_mem_claim(fcport);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002857
2858 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2859 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2860
Jing Huang5fbe25c2010-10-18 17:17:23 -07002861 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002862 * initialize time stamp for stats reset
2863 */
Maggie Zhangf16a1752010-12-09 19:12:32 -08002864 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002865 fcport->stats_reset_time = tv.tv_sec;
2866
Jing Huang5fbe25c2010-10-18 17:17:23 -07002867 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002868 * initialize and set default configuration
2869 */
2870 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2871 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2872 port_cfg->trunked = BFA_FALSE;
2873 port_cfg->maxfrsize = 0;
2874
2875 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2876
2877 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2878}
2879
2880static void
2881bfa_fcport_detach(struct bfa_s *bfa)
2882{
2883}
2884
Jing Huang5fbe25c2010-10-18 17:17:23 -07002885/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002886 * Called when IOC is ready.
2887 */
2888static void
2889bfa_fcport_start(struct bfa_s *bfa)
2890{
2891 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2892}
2893
Jing Huang5fbe25c2010-10-18 17:17:23 -07002894/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002895 * Called before IOC is stopped.
2896 */
2897static void
2898bfa_fcport_stop(struct bfa_s *bfa)
2899{
2900 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2901 bfa_trunk_iocdisable(bfa);
2902}
2903
Jing Huang5fbe25c2010-10-18 17:17:23 -07002904/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002905 * Called when IOC failure is detected.
2906 */
2907static void
2908bfa_fcport_iocdisable(struct bfa_s *bfa)
2909{
2910 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2911
2912 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2913 bfa_trunk_iocdisable(bfa);
2914}
2915
2916static void
2917bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2918{
2919 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2920 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2921
2922 fcport->speed = pevent->link_state.speed;
2923 fcport->topology = pevent->link_state.topology;
2924
2925 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2926 fcport->myalpa = 0;
2927
2928 /* QoS Details */
Jing Huang6a18b162010-10-18 17:08:54 -07002929 fcport->qos_attr = pevent->link_state.qos_attr;
2930 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002931
Jing Huang5fbe25c2010-10-18 17:17:23 -07002932 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002933 * update trunk state if applicable
2934 */
2935 if (!fcport->cfg.trunked)
2936 trunk->attr.state = BFA_TRUNK_DISABLED;
2937
2938 /* update FCoE specific */
Jing Huangba816ea2010-10-18 17:10:50 -07002939 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002940
2941 bfa_trc(fcport->bfa, fcport->speed);
2942 bfa_trc(fcport->bfa, fcport->topology);
2943}
2944
2945static void
2946bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2947{
2948 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2949 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07002950 fcport->bbsc_op_state = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002951}
2952
Jing Huang5fbe25c2010-10-18 17:17:23 -07002953/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002954 * Send port enable message to firmware.
2955 */
2956static bfa_boolean_t
2957bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2958{
2959 struct bfi_fcport_enable_req_s *m;
2960
Jing Huang5fbe25c2010-10-18 17:17:23 -07002961 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002962 * Increment message tag before queue check, so that responses to old
2963 * requests are discarded.
2964 */
2965 fcport->msgtag++;
2966
Jing Huang5fbe25c2010-10-18 17:17:23 -07002967 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002968 * check for room in queue to send request now
2969 */
2970 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2971 if (!m) {
2972 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2973 &fcport->reqq_wait);
2974 return BFA_FALSE;
2975 }
2976
2977 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002978 bfa_fn_lpu(fcport->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002979 m->nwwn = fcport->nwwn;
2980 m->pwwn = fcport->pwwn;
2981 m->port_cfg = fcport->cfg;
2982 m->msgtag = fcport->msgtag;
Jing Huangba816ea2010-10-18 17:10:50 -07002983 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08002984 m->use_flash_cfg = fcport->use_flash_cfg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002985 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
2986 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
2987 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
2988
Jing Huang5fbe25c2010-10-18 17:17:23 -07002989 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002990 * queue I/O message to firmware
2991 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002992 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002993 return BFA_TRUE;
2994}
2995
Jing Huang5fbe25c2010-10-18 17:17:23 -07002996/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002997 * Send port disable message to firmware.
2998 */
2999static bfa_boolean_t
3000bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3001{
3002 struct bfi_fcport_req_s *m;
3003
Jing Huang5fbe25c2010-10-18 17:17:23 -07003004 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003005 * Increment message tag before queue check, so that responses to old
3006 * requests are discarded.
3007 */
3008 fcport->msgtag++;
3009
Jing Huang5fbe25c2010-10-18 17:17:23 -07003010 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003011 * check for room in queue to send request now
3012 */
3013 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3014 if (!m) {
3015 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3016 &fcport->reqq_wait);
3017 return BFA_FALSE;
3018 }
3019
3020 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003021 bfa_fn_lpu(fcport->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003022 m->msgtag = fcport->msgtag;
3023
Jing Huang5fbe25c2010-10-18 17:17:23 -07003024 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003025 * queue I/O message to firmware
3026 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003027 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003028
3029 return BFA_TRUE;
3030}
3031
3032static void
3033bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3034{
Maggie Zhangf7f738122010-12-09 19:08:43 -08003035 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3036 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003037
3038 bfa_trc(fcport->bfa, fcport->pwwn);
3039 bfa_trc(fcport->bfa, fcport->nwwn);
3040}
3041
3042static void
3043bfa_fcport_send_txcredit(void *port_cbarg)
3044{
3045
3046 struct bfa_fcport_s *fcport = port_cbarg;
3047 struct bfi_fcport_set_svc_params_req_s *m;
3048
Jing Huang5fbe25c2010-10-18 17:17:23 -07003049 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003050 * check for room in queue to send request now
3051 */
3052 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3053 if (!m) {
3054 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3055 return;
3056 }
3057
3058 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003059 bfa_fn_lpu(fcport->bfa));
Jing Huangba816ea2010-10-18 17:10:50 -07003060 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003061 m->bb_scn = fcport->cfg.bb_scn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003062
Jing Huang5fbe25c2010-10-18 17:17:23 -07003063 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003064 * queue I/O message to firmware
3065 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003066 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003067}
3068
3069static void
3070bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3071 struct bfa_qos_stats_s *s)
3072{
3073 u32 *dip = (u32 *) d;
Maggie50444a32010-11-29 18:26:32 -08003074 __be32 *sip = (__be32 *) s;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003075 int i;
3076
3077 /* Now swap the 32 bit fields */
3078 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
Jing Huangba816ea2010-10-18 17:10:50 -07003079 dip[i] = be32_to_cpu(sip[i]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003080}
3081
3082static void
3083bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3084 struct bfa_fcoe_stats_s *s)
3085{
3086 u32 *dip = (u32 *) d;
Maggie50444a32010-11-29 18:26:32 -08003087 __be32 *sip = (__be32 *) s;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003088 int i;
3089
3090 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3091 i = i + 2) {
Maggie Zhangf16a1752010-12-09 19:12:32 -08003092#ifdef __BIG_ENDIAN
Jing Huangba816ea2010-10-18 17:10:50 -07003093 dip[i] = be32_to_cpu(sip[i]);
3094 dip[i + 1] = be32_to_cpu(sip[i + 1]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003095#else
Jing Huangba816ea2010-10-18 17:10:50 -07003096 dip[i] = be32_to_cpu(sip[i + 1]);
3097 dip[i + 1] = be32_to_cpu(sip[i]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003098#endif
3099 }
3100}
3101
3102static void
3103__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3104{
3105 struct bfa_fcport_s *fcport = cbarg;
3106
3107 if (complete) {
3108 if (fcport->stats_status == BFA_STATUS_OK) {
Maggie Zhangf16a1752010-12-09 19:12:32 -08003109 struct timeval tv;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003110
3111 /* Swap FC QoS or FCoE stats */
3112 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3113 bfa_fcport_qos_stats_swap(
3114 &fcport->stats_ret->fcqos,
3115 &fcport->stats->fcqos);
3116 } else {
3117 bfa_fcport_fcoe_stats_swap(
3118 &fcport->stats_ret->fcoe,
3119 &fcport->stats->fcoe);
3120
Maggie Zhangf16a1752010-12-09 19:12:32 -08003121 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003122 fcport->stats_ret->fcoe.secs_reset =
3123 tv.tv_sec - fcport->stats_reset_time;
3124 }
3125 }
3126 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3127 } else {
3128 fcport->stats_busy = BFA_FALSE;
3129 fcport->stats_status = BFA_STATUS_OK;
3130 }
3131}
3132
3133static void
3134bfa_fcport_stats_get_timeout(void *cbarg)
3135{
3136 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3137
3138 bfa_trc(fcport->bfa, fcport->stats_qfull);
3139
3140 if (fcport->stats_qfull) {
3141 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3142 fcport->stats_qfull = BFA_FALSE;
3143 }
3144
3145 fcport->stats_status = BFA_STATUS_ETIMER;
3146 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3147 fcport);
3148}
3149
3150static void
3151bfa_fcport_send_stats_get(void *cbarg)
3152{
3153 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3154 struct bfi_fcport_req_s *msg;
3155
3156 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3157
3158 if (!msg) {
3159 fcport->stats_qfull = BFA_TRUE;
3160 bfa_reqq_winit(&fcport->stats_reqq_wait,
3161 bfa_fcport_send_stats_get, fcport);
3162 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3163 &fcport->stats_reqq_wait);
3164 return;
3165 }
3166 fcport->stats_qfull = BFA_FALSE;
3167
Jing Huang6a18b162010-10-18 17:08:54 -07003168 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003169 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003170 bfa_fn_lpu(fcport->bfa));
3171 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003172}
3173
3174static void
3175__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3176{
3177 struct bfa_fcport_s *fcport = cbarg;
3178
3179 if (complete) {
Maggie Zhangf16a1752010-12-09 19:12:32 -08003180 struct timeval tv;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003181
Jing Huang5fbe25c2010-10-18 17:17:23 -07003182 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003183 * re-initialize time stamp for stats reset
3184 */
Maggie Zhangf16a1752010-12-09 19:12:32 -08003185 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003186 fcport->stats_reset_time = tv.tv_sec;
3187
3188 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3189 } else {
3190 fcport->stats_busy = BFA_FALSE;
3191 fcport->stats_status = BFA_STATUS_OK;
3192 }
3193}
3194
3195static void
3196bfa_fcport_stats_clr_timeout(void *cbarg)
3197{
3198 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3199
3200 bfa_trc(fcport->bfa, fcport->stats_qfull);
3201
3202 if (fcport->stats_qfull) {
3203 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3204 fcport->stats_qfull = BFA_FALSE;
3205 }
3206
3207 fcport->stats_status = BFA_STATUS_ETIMER;
3208 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3209 __bfa_cb_fcport_stats_clr, fcport);
3210}
3211
3212static void
3213bfa_fcport_send_stats_clear(void *cbarg)
3214{
3215 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3216 struct bfi_fcport_req_s *msg;
3217
3218 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3219
3220 if (!msg) {
3221 fcport->stats_qfull = BFA_TRUE;
3222 bfa_reqq_winit(&fcport->stats_reqq_wait,
3223 bfa_fcport_send_stats_clear, fcport);
3224 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3225 &fcport->stats_reqq_wait);
3226 return;
3227 }
3228 fcport->stats_qfull = BFA_FALSE;
3229
Jing Huang6a18b162010-10-18 17:08:54 -07003230 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003231 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003232 bfa_fn_lpu(fcport->bfa));
3233 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003234}
3235
Jing Huang5fbe25c2010-10-18 17:17:23 -07003236/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003237 * Handle trunk SCN event from firmware.
3238 */
3239static void
3240bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3241{
3242 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3243 struct bfi_fcport_trunk_link_s *tlink;
3244 struct bfa_trunk_link_attr_s *lattr;
3245 enum bfa_trunk_state state_prev;
3246 int i;
3247 int link_bm = 0;
3248
3249 bfa_trc(fcport->bfa, fcport->cfg.trunked);
Jing Huangd4b671c2010-12-26 21:46:35 -08003250 WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3251 scn->trunk_state != BFA_TRUNK_OFFLINE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003252
3253 bfa_trc(fcport->bfa, trunk->attr.state);
3254 bfa_trc(fcport->bfa, scn->trunk_state);
3255 bfa_trc(fcport->bfa, scn->trunk_speed);
3256
Jing Huang5fbe25c2010-10-18 17:17:23 -07003257 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003258 * Save off new state for trunk attribute query
3259 */
3260 state_prev = trunk->attr.state;
3261 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3262 trunk->attr.state = scn->trunk_state;
3263 trunk->attr.speed = scn->trunk_speed;
3264 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3265 lattr = &trunk->attr.link_attr[i];
3266 tlink = &scn->tlink[i];
3267
3268 lattr->link_state = tlink->state;
3269 lattr->trunk_wwn = tlink->trunk_wwn;
3270 lattr->fctl = tlink->fctl;
3271 lattr->speed = tlink->speed;
Jing Huangba816ea2010-10-18 17:10:50 -07003272 lattr->deskew = be32_to_cpu(tlink->deskew);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003273
3274 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3275 fcport->speed = tlink->speed;
3276 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3277 link_bm |= 1 << i;
3278 }
3279
3280 bfa_trc(fcport->bfa, lattr->link_state);
3281 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3282 bfa_trc(fcport->bfa, lattr->fctl);
3283 bfa_trc(fcport->bfa, lattr->speed);
3284 bfa_trc(fcport->bfa, lattr->deskew);
3285 }
3286
3287 switch (link_bm) {
3288 case 3:
3289 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3290 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3291 break;
3292 case 2:
3293 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3294 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3295 break;
3296 case 1:
3297 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3298 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3299 break;
3300 default:
3301 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3302 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3303 }
3304
Jing Huang5fbe25c2010-10-18 17:17:23 -07003305 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003306 * Notify upper layers if trunk state changed.
3307 */
3308 if ((state_prev != trunk->attr.state) ||
3309 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3310 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3311 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3312 }
3313}
3314
3315static void
3316bfa_trunk_iocdisable(struct bfa_s *bfa)
3317{
3318 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3319 int i = 0;
3320
Jing Huang5fbe25c2010-10-18 17:17:23 -07003321 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003322 * In trunked mode, notify upper layers that link is down
3323 */
3324 if (fcport->cfg.trunked) {
3325 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3326 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3327
3328 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3329 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3330 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3331 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3332 fcport->trunk.attr.link_attr[i].fctl =
3333 BFA_TRUNK_LINK_FCTL_NORMAL;
3334 fcport->trunk.attr.link_attr[i].link_state =
3335 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3336 fcport->trunk.attr.link_attr[i].speed =
3337 BFA_PORT_SPEED_UNKNOWN;
3338 fcport->trunk.attr.link_attr[i].deskew = 0;
3339 }
3340 }
3341}
3342
Jing Huang5fbe25c2010-10-18 17:17:23 -07003343/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003344 * Called to initialize port attributes
3345 */
3346void
3347bfa_fcport_init(struct bfa_s *bfa)
3348{
3349 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3350
Jing Huang5fbe25c2010-10-18 17:17:23 -07003351 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003352 * Initialize port attributes from IOC hardware data.
3353 */
3354 bfa_fcport_set_wwns(fcport);
3355 if (fcport->cfg.maxfrsize == 0)
3356 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3357 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3358 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3359
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003360 if (bfa_fcport_is_pbcdisabled(bfa))
3361 bfa->modules.port.pbc_disabled = BFA_TRUE;
3362
Jing Huangd4b671c2010-12-26 21:46:35 -08003363 WARN_ON(!fcport->cfg.maxfrsize);
3364 WARN_ON(!fcport->cfg.rx_bbcredit);
3365 WARN_ON(!fcport->speed_sup);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003366}
3367
Jing Huang5fbe25c2010-10-18 17:17:23 -07003368/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003369 * Firmware message handler.
3370 */
3371void
3372bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3373{
3374 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3375 union bfi_fcport_i2h_msg_u i2hmsg;
3376
3377 i2hmsg.msg = msg;
3378 fcport->event_arg.i2hmsg = i2hmsg;
3379
3380 bfa_trc(bfa, msg->mhdr.msg_id);
3381 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3382
3383 switch (msg->mhdr.msg_id) {
3384 case BFI_FCPORT_I2H_ENABLE_RSP:
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08003385 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3386
3387 if (fcport->use_flash_cfg) {
3388 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3389 fcport->cfg.maxfrsize =
3390 cpu_to_be16(fcport->cfg.maxfrsize);
3391 fcport->cfg.path_tov =
3392 cpu_to_be16(fcport->cfg.path_tov);
3393 fcport->cfg.q_depth =
3394 cpu_to_be16(fcport->cfg.q_depth);
3395
3396 if (fcport->cfg.trunked)
3397 fcport->trunk.attr.state =
3398 BFA_TRUNK_OFFLINE;
3399 else
3400 fcport->trunk.attr.state =
3401 BFA_TRUNK_DISABLED;
3402 fcport->use_flash_cfg = BFA_FALSE;
3403 }
3404
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003405 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08003406 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003407 break;
3408
3409 case BFI_FCPORT_I2H_DISABLE_RSP:
3410 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3411 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3412 break;
3413
3414 case BFI_FCPORT_I2H_EVENT:
3415 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3416 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3417 else
3418 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3419 break;
3420
3421 case BFI_FCPORT_I2H_TRUNK_SCN:
3422 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3423 break;
3424
3425 case BFI_FCPORT_I2H_STATS_GET_RSP:
3426 /*
3427 * check for timer pop before processing the rsp
3428 */
3429 if (fcport->stats_busy == BFA_FALSE ||
3430 fcport->stats_status == BFA_STATUS_ETIMER)
3431 break;
3432
3433 bfa_timer_stop(&fcport->timer);
3434 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3435 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3436 __bfa_cb_fcport_stats_get, fcport);
3437 break;
3438
3439 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3440 /*
3441 * check for timer pop before processing the rsp
3442 */
3443 if (fcport->stats_busy == BFA_FALSE ||
3444 fcport->stats_status == BFA_STATUS_ETIMER)
3445 break;
3446
3447 bfa_timer_stop(&fcport->timer);
3448 fcport->stats_status = BFA_STATUS_OK;
3449 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3450 __bfa_cb_fcport_stats_clr, fcport);
3451 break;
3452
3453 case BFI_FCPORT_I2H_ENABLE_AEN:
3454 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3455 break;
3456
3457 case BFI_FCPORT_I2H_DISABLE_AEN:
3458 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3459 break;
3460
3461 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08003462 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003463 break;
3464 }
3465}
3466
Jing Huang5fbe25c2010-10-18 17:17:23 -07003467/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003468 * Registered callback for port events.
3469 */
3470void
3471bfa_fcport_event_register(struct bfa_s *bfa,
3472 void (*cbfn) (void *cbarg,
3473 enum bfa_port_linkstate event),
3474 void *cbarg)
3475{
3476 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3477
3478 fcport->event_cbfn = cbfn;
3479 fcport->event_cbarg = cbarg;
3480}
3481
3482bfa_status_t
3483bfa_fcport_enable(struct bfa_s *bfa)
3484{
3485 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3486
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003487 if (bfa_fcport_is_pbcdisabled(bfa))
3488 return BFA_STATUS_PBC;
3489
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003490 if (bfa_ioc_is_disabled(&bfa->ioc))
3491 return BFA_STATUS_IOC_DISABLED;
3492
3493 if (fcport->diag_busy)
3494 return BFA_STATUS_DIAG_BUSY;
3495
3496 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3497 return BFA_STATUS_OK;
3498}
3499
3500bfa_status_t
3501bfa_fcport_disable(struct bfa_s *bfa)
3502{
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003503 if (bfa_fcport_is_pbcdisabled(bfa))
3504 return BFA_STATUS_PBC;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003505
3506 if (bfa_ioc_is_disabled(&bfa->ioc))
3507 return BFA_STATUS_IOC_DISABLED;
3508
3509 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3510 return BFA_STATUS_OK;
3511}
3512
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003513/* If PBC is disabled on port, return error */
3514bfa_status_t
3515bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3516{
3517 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3518 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3519 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3520
3521 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3522 bfa_trc(bfa, fcport->pwwn);
3523 return BFA_STATUS_PBC;
3524 }
3525 return BFA_STATUS_OK;
3526}
3527
Jing Huang5fbe25c2010-10-18 17:17:23 -07003528/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003529 * Configure port speed.
3530 */
3531bfa_status_t
3532bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3533{
3534 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3535
3536 bfa_trc(bfa, speed);
3537
3538 if (fcport->cfg.trunked == BFA_TRUE)
3539 return BFA_STATUS_TRUNK_ENABLED;
3540 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3541 bfa_trc(bfa, fcport->speed_sup);
3542 return BFA_STATUS_UNSUPP_SPEED;
3543 }
3544
Krishna Gudipatia7141342011-06-24 20:23:19 -07003545 /* For Mezz card, port speed entered needs to be checked */
3546 if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
3547 if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3548 /* For CT2, 1G is not supported */
3549 if ((speed == BFA_PORT_SPEED_1GBPS) &&
3550 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3551 return BFA_STATUS_UNSUPP_SPEED;
3552
3553 /* Already checked for Auto Speed and Max Speed supp */
3554 if (!(speed == BFA_PORT_SPEED_1GBPS ||
3555 speed == BFA_PORT_SPEED_2GBPS ||
3556 speed == BFA_PORT_SPEED_4GBPS ||
3557 speed == BFA_PORT_SPEED_8GBPS ||
3558 speed == BFA_PORT_SPEED_16GBPS ||
3559 speed == BFA_PORT_SPEED_AUTO))
3560 return BFA_STATUS_UNSUPP_SPEED;
3561 } else {
3562 if (speed != BFA_PORT_SPEED_10GBPS)
3563 return BFA_STATUS_UNSUPP_SPEED;
3564 }
3565 }
3566
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003567 fcport->cfg.speed = speed;
3568
3569 return BFA_STATUS_OK;
3570}
3571
Jing Huang5fbe25c2010-10-18 17:17:23 -07003572/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003573 * Get current speed.
3574 */
3575enum bfa_port_speed
3576bfa_fcport_get_speed(struct bfa_s *bfa)
3577{
3578 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3579
3580 return fcport->speed;
3581}
3582
Jing Huang5fbe25c2010-10-18 17:17:23 -07003583/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003584 * Configure port topology.
3585 */
3586bfa_status_t
3587bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3588{
3589 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3590
3591 bfa_trc(bfa, topology);
3592 bfa_trc(bfa, fcport->cfg.topology);
3593
3594 switch (topology) {
3595 case BFA_PORT_TOPOLOGY_P2P:
3596 case BFA_PORT_TOPOLOGY_LOOP:
3597 case BFA_PORT_TOPOLOGY_AUTO:
3598 break;
3599
3600 default:
3601 return BFA_STATUS_EINVAL;
3602 }
3603
3604 fcport->cfg.topology = topology;
3605 return BFA_STATUS_OK;
3606}
3607
Jing Huang5fbe25c2010-10-18 17:17:23 -07003608/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003609 * Get current topology.
3610 */
3611enum bfa_port_topology
3612bfa_fcport_get_topology(struct bfa_s *bfa)
3613{
3614 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3615
3616 return fcport->topology;
3617}
3618
3619bfa_status_t
3620bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3621{
3622 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3623
3624 bfa_trc(bfa, alpa);
3625 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3626 bfa_trc(bfa, fcport->cfg.hardalpa);
3627
3628 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3629 fcport->cfg.hardalpa = alpa;
3630
3631 return BFA_STATUS_OK;
3632}
3633
3634bfa_status_t
3635bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3636{
3637 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3638
3639 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3640 bfa_trc(bfa, fcport->cfg.hardalpa);
3641
3642 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3643 return BFA_STATUS_OK;
3644}
3645
3646bfa_boolean_t
3647bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3648{
3649 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3650
3651 *alpa = fcport->cfg.hardalpa;
3652 return fcport->cfg.cfg_hardalpa;
3653}
3654
3655u8
3656bfa_fcport_get_myalpa(struct bfa_s *bfa)
3657{
3658 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3659
3660 return fcport->myalpa;
3661}
3662
3663bfa_status_t
3664bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3665{
3666 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3667
3668 bfa_trc(bfa, maxfrsize);
3669 bfa_trc(bfa, fcport->cfg.maxfrsize);
3670
3671 /* with in range */
3672 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3673 return BFA_STATUS_INVLD_DFSZ;
3674
3675 /* power of 2, if not the max frame size of 2112 */
3676 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3677 return BFA_STATUS_INVLD_DFSZ;
3678
3679 fcport->cfg.maxfrsize = maxfrsize;
3680 return BFA_STATUS_OK;
3681}
3682
3683u16
3684bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3685{
3686 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3687
3688 return fcport->cfg.maxfrsize;
3689}
3690
3691u8
3692bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3693{
3694 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3695
3696 return fcport->cfg.rx_bbcredit;
3697}
3698
3699void
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003700bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003701{
3702 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3703
3704 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003705 fcport->cfg.bb_scn = bb_scn;
3706 if (bb_scn)
3707 fcport->bbsc_op_state = BFA_TRUE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003708 bfa_fcport_send_txcredit(fcport);
3709}
3710
Jing Huang5fbe25c2010-10-18 17:17:23 -07003711/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003712 * Get port attributes.
3713 */
3714
3715wwn_t
3716bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3717{
3718 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3719 if (node)
3720 return fcport->nwwn;
3721 else
3722 return fcport->pwwn;
3723}
3724
3725void
3726bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3727{
3728 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3729
Jing Huang6a18b162010-10-18 17:08:54 -07003730 memset(attr, 0, sizeof(struct bfa_port_attr_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003731
3732 attr->nwwn = fcport->nwwn;
3733 attr->pwwn = fcport->pwwn;
3734
Maggie Zhangf7f738122010-12-09 19:08:43 -08003735 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3736 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003737
Jing Huang6a18b162010-10-18 17:08:54 -07003738 memcpy(&attr->pport_cfg, &fcport->cfg,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003739 sizeof(struct bfa_port_cfg_s));
3740 /* speed attributes */
3741 attr->pport_cfg.speed = fcport->cfg.speed;
3742 attr->speed_supported = fcport->speed_sup;
3743 attr->speed = fcport->speed;
3744 attr->cos_supported = FC_CLASS_3;
3745
3746 /* topology attributes */
3747 attr->pport_cfg.topology = fcport->cfg.topology;
3748 attr->topology = fcport->topology;
3749 attr->pport_cfg.trunked = fcport->cfg.trunked;
3750
3751 /* beacon attributes */
3752 attr->beacon = fcport->beacon;
3753 attr->link_e2e_beacon = fcport->link_e2e_beacon;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003754
3755 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3756 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3757 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003758 attr->bbsc_op_status = fcport->bbsc_op_state;
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003759
3760 /* PBC Disabled State */
3761 if (bfa_fcport_is_pbcdisabled(bfa))
3762 attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3763 else {
3764 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3765 attr->port_state = BFA_PORT_ST_IOCDIS;
3766 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3767 attr->port_state = BFA_PORT_ST_FWMISMATCH;
Krishna Gudipatia7141342011-06-24 20:23:19 -07003768 else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
3769 attr->port_state = BFA_PORT_ST_ACQ_ADDR;
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003770 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003771
3772 /* FCoE vlan */
3773 attr->fcoe_vlan = fcport->fcoe_vlan;
3774}
3775
3776#define BFA_FCPORT_STATS_TOV 1000
3777
Jing Huang5fbe25c2010-10-18 17:17:23 -07003778/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003779 * Fetch port statistics (FCQoS or FCoE).
3780 */
3781bfa_status_t
3782bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3783 bfa_cb_port_t cbfn, void *cbarg)
3784{
3785 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3786
3787 if (fcport->stats_busy) {
3788 bfa_trc(bfa, fcport->stats_busy);
3789 return BFA_STATUS_DEVBUSY;
3790 }
3791
3792 fcport->stats_busy = BFA_TRUE;
3793 fcport->stats_ret = stats;
3794 fcport->stats_cbfn = cbfn;
3795 fcport->stats_cbarg = cbarg;
3796
3797 bfa_fcport_send_stats_get(fcport);
3798
3799 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3800 fcport, BFA_FCPORT_STATS_TOV);
3801 return BFA_STATUS_OK;
3802}
3803
Jing Huang5fbe25c2010-10-18 17:17:23 -07003804/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003805 * Reset port statistics (FCQoS or FCoE).
3806 */
3807bfa_status_t
3808bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3809{
3810 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3811
3812 if (fcport->stats_busy) {
3813 bfa_trc(bfa, fcport->stats_busy);
3814 return BFA_STATUS_DEVBUSY;
3815 }
3816
3817 fcport->stats_busy = BFA_TRUE;
3818 fcport->stats_cbfn = cbfn;
3819 fcport->stats_cbarg = cbarg;
3820
3821 bfa_fcport_send_stats_clear(fcport);
3822
3823 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3824 fcport, BFA_FCPORT_STATS_TOV);
3825 return BFA_STATUS_OK;
3826}
3827
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003828
Jing Huang5fbe25c2010-10-18 17:17:23 -07003829/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003830 * Fetch port attributes.
3831 */
3832bfa_boolean_t
3833bfa_fcport_is_disabled(struct bfa_s *bfa)
3834{
3835 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3836
3837 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3838 BFA_PORT_ST_DISABLED;
3839
3840}
3841
3842bfa_boolean_t
3843bfa_fcport_is_ratelim(struct bfa_s *bfa)
3844{
3845 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3846
3847 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3848
3849}
3850
Jing Huang5fbe25c2010-10-18 17:17:23 -07003851/*
Krishna Gudipatia7141342011-06-24 20:23:19 -07003852 * Enable/Disable FAA feature in port config
3853 */
3854void
3855bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
3856{
3857 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3858
3859 bfa_trc(bfa, state);
3860 fcport->cfg.faa_state = state;
3861}
3862
3863/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003864 * Get default minimum ratelim speed
3865 */
3866enum bfa_port_speed
3867bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3868{
3869 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3870
3871 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3872 return fcport->cfg.trl_def_speed;
3873
3874}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003875
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07003876void
3877bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
3878 bfa_boolean_t link_e2e_beacon)
3879{
3880 struct bfa_s *bfa = dev;
3881 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3882
3883 bfa_trc(bfa, beacon);
3884 bfa_trc(bfa, link_e2e_beacon);
3885 bfa_trc(bfa, fcport->beacon);
3886 bfa_trc(bfa, fcport->link_e2e_beacon);
3887
3888 fcport->beacon = beacon;
3889 fcport->link_e2e_beacon = link_e2e_beacon;
3890}
3891
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003892bfa_boolean_t
3893bfa_fcport_is_linkup(struct bfa_s *bfa)
3894{
3895 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3896
3897 return (!fcport->cfg.trunked &&
3898 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3899 (fcport->cfg.trunked &&
3900 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3901}
3902
3903bfa_boolean_t
3904bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3905{
3906 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3907
3908 return fcport->cfg.qos_enabled;
3909}
3910
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003911bfa_boolean_t
3912bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
3913{
3914 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3915
3916 return fcport->cfg.trunked;
3917}
3918
Jing Huang5fbe25c2010-10-18 17:17:23 -07003919/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003920 * Rport State machine functions
3921 */
Jing Huang5fbe25c2010-10-18 17:17:23 -07003922/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003923 * Beginning state, only online event expected.
3924 */
3925static void
3926bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3927{
3928 bfa_trc(rp->bfa, rp->rport_tag);
3929 bfa_trc(rp->bfa, event);
3930
3931 switch (event) {
3932 case BFA_RPORT_SM_CREATE:
3933 bfa_stats(rp, sm_un_cr);
3934 bfa_sm_set_state(rp, bfa_rport_sm_created);
3935 break;
3936
3937 default:
3938 bfa_stats(rp, sm_un_unexp);
3939 bfa_sm_fault(rp->bfa, event);
3940 }
3941}
3942
3943static void
3944bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3945{
3946 bfa_trc(rp->bfa, rp->rport_tag);
3947 bfa_trc(rp->bfa, event);
3948
3949 switch (event) {
3950 case BFA_RPORT_SM_ONLINE:
3951 bfa_stats(rp, sm_cr_on);
3952 if (bfa_rport_send_fwcreate(rp))
3953 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3954 else
3955 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3956 break;
3957
3958 case BFA_RPORT_SM_DELETE:
3959 bfa_stats(rp, sm_cr_del);
3960 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3961 bfa_rport_free(rp);
3962 break;
3963
3964 case BFA_RPORT_SM_HWFAIL:
3965 bfa_stats(rp, sm_cr_hwf);
3966 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3967 break;
3968
3969 default:
3970 bfa_stats(rp, sm_cr_unexp);
3971 bfa_sm_fault(rp->bfa, event);
3972 }
3973}
3974
Jing Huang5fbe25c2010-10-18 17:17:23 -07003975/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003976 * Waiting for rport create response from firmware.
3977 */
3978static void
3979bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3980{
3981 bfa_trc(rp->bfa, rp->rport_tag);
3982 bfa_trc(rp->bfa, event);
3983
3984 switch (event) {
3985 case BFA_RPORT_SM_FWRSP:
3986 bfa_stats(rp, sm_fwc_rsp);
3987 bfa_sm_set_state(rp, bfa_rport_sm_online);
3988 bfa_rport_online_cb(rp);
3989 break;
3990
3991 case BFA_RPORT_SM_DELETE:
3992 bfa_stats(rp, sm_fwc_del);
3993 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3994 break;
3995
3996 case BFA_RPORT_SM_OFFLINE:
3997 bfa_stats(rp, sm_fwc_off);
3998 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
3999 break;
4000
4001 case BFA_RPORT_SM_HWFAIL:
4002 bfa_stats(rp, sm_fwc_hwf);
4003 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4004 break;
4005
4006 default:
4007 bfa_stats(rp, sm_fwc_unexp);
4008 bfa_sm_fault(rp->bfa, event);
4009 }
4010}
4011
Jing Huang5fbe25c2010-10-18 17:17:23 -07004012/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004013 * Request queue is full, awaiting queue resume to send create request.
4014 */
4015static void
4016bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4017{
4018 bfa_trc(rp->bfa, rp->rport_tag);
4019 bfa_trc(rp->bfa, event);
4020
4021 switch (event) {
4022 case BFA_RPORT_SM_QRESUME:
4023 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4024 bfa_rport_send_fwcreate(rp);
4025 break;
4026
4027 case BFA_RPORT_SM_DELETE:
4028 bfa_stats(rp, sm_fwc_del);
4029 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4030 bfa_reqq_wcancel(&rp->reqq_wait);
4031 bfa_rport_free(rp);
4032 break;
4033
4034 case BFA_RPORT_SM_OFFLINE:
4035 bfa_stats(rp, sm_fwc_off);
4036 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4037 bfa_reqq_wcancel(&rp->reqq_wait);
4038 bfa_rport_offline_cb(rp);
4039 break;
4040
4041 case BFA_RPORT_SM_HWFAIL:
4042 bfa_stats(rp, sm_fwc_hwf);
4043 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4044 bfa_reqq_wcancel(&rp->reqq_wait);
4045 break;
4046
4047 default:
4048 bfa_stats(rp, sm_fwc_unexp);
4049 bfa_sm_fault(rp->bfa, event);
4050 }
4051}
4052
Jing Huang5fbe25c2010-10-18 17:17:23 -07004053/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004054 * Online state - normal parking state.
4055 */
4056static void
4057bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4058{
4059 struct bfi_rport_qos_scn_s *qos_scn;
4060
4061 bfa_trc(rp->bfa, rp->rport_tag);
4062 bfa_trc(rp->bfa, event);
4063
4064 switch (event) {
4065 case BFA_RPORT_SM_OFFLINE:
4066 bfa_stats(rp, sm_on_off);
4067 if (bfa_rport_send_fwdelete(rp))
4068 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4069 else
4070 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4071 break;
4072
4073 case BFA_RPORT_SM_DELETE:
4074 bfa_stats(rp, sm_on_del);
4075 if (bfa_rport_send_fwdelete(rp))
4076 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4077 else
4078 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4079 break;
4080
4081 case BFA_RPORT_SM_HWFAIL:
4082 bfa_stats(rp, sm_on_hwf);
4083 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4084 break;
4085
4086 case BFA_RPORT_SM_SET_SPEED:
4087 bfa_rport_send_fwspeed(rp);
4088 break;
4089
4090 case BFA_RPORT_SM_QOS_SCN:
4091 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4092 rp->qos_attr = qos_scn->new_qos_attr;
4093 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4094 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4095 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4096 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4097
4098 qos_scn->old_qos_attr.qos_flow_id =
Jing Huangba816ea2010-10-18 17:10:50 -07004099 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004100 qos_scn->new_qos_attr.qos_flow_id =
Jing Huangba816ea2010-10-18 17:10:50 -07004101 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004102
4103 if (qos_scn->old_qos_attr.qos_flow_id !=
4104 qos_scn->new_qos_attr.qos_flow_id)
4105 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4106 qos_scn->old_qos_attr,
4107 qos_scn->new_qos_attr);
4108 if (qos_scn->old_qos_attr.qos_priority !=
4109 qos_scn->new_qos_attr.qos_priority)
4110 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4111 qos_scn->old_qos_attr,
4112 qos_scn->new_qos_attr);
4113 break;
4114
4115 default:
4116 bfa_stats(rp, sm_on_unexp);
4117 bfa_sm_fault(rp->bfa, event);
4118 }
4119}
4120
Jing Huang5fbe25c2010-10-18 17:17:23 -07004121/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004122 * Firmware rport is being deleted - awaiting f/w response.
4123 */
4124static void
4125bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4126{
4127 bfa_trc(rp->bfa, rp->rport_tag);
4128 bfa_trc(rp->bfa, event);
4129
4130 switch (event) {
4131 case BFA_RPORT_SM_FWRSP:
4132 bfa_stats(rp, sm_fwd_rsp);
4133 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4134 bfa_rport_offline_cb(rp);
4135 break;
4136
4137 case BFA_RPORT_SM_DELETE:
4138 bfa_stats(rp, sm_fwd_del);
4139 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4140 break;
4141
4142 case BFA_RPORT_SM_HWFAIL:
4143 bfa_stats(rp, sm_fwd_hwf);
4144 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4145 bfa_rport_offline_cb(rp);
4146 break;
4147
4148 default:
4149 bfa_stats(rp, sm_fwd_unexp);
4150 bfa_sm_fault(rp->bfa, event);
4151 }
4152}
4153
4154static void
4155bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4156{
4157 bfa_trc(rp->bfa, rp->rport_tag);
4158 bfa_trc(rp->bfa, event);
4159
4160 switch (event) {
4161 case BFA_RPORT_SM_QRESUME:
4162 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4163 bfa_rport_send_fwdelete(rp);
4164 break;
4165
4166 case BFA_RPORT_SM_DELETE:
4167 bfa_stats(rp, sm_fwd_del);
4168 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4169 break;
4170
4171 case BFA_RPORT_SM_HWFAIL:
4172 bfa_stats(rp, sm_fwd_hwf);
4173 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4174 bfa_reqq_wcancel(&rp->reqq_wait);
4175 bfa_rport_offline_cb(rp);
4176 break;
4177
4178 default:
4179 bfa_stats(rp, sm_fwd_unexp);
4180 bfa_sm_fault(rp->bfa, event);
4181 }
4182}
4183
Jing Huang5fbe25c2010-10-18 17:17:23 -07004184/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004185 * Offline state.
4186 */
4187static void
4188bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4189{
4190 bfa_trc(rp->bfa, rp->rport_tag);
4191 bfa_trc(rp->bfa, event);
4192
4193 switch (event) {
4194 case BFA_RPORT_SM_DELETE:
4195 bfa_stats(rp, sm_off_del);
4196 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4197 bfa_rport_free(rp);
4198 break;
4199
4200 case BFA_RPORT_SM_ONLINE:
4201 bfa_stats(rp, sm_off_on);
4202 if (bfa_rport_send_fwcreate(rp))
4203 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4204 else
4205 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4206 break;
4207
4208 case BFA_RPORT_SM_HWFAIL:
4209 bfa_stats(rp, sm_off_hwf);
4210 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4211 break;
4212
4213 default:
4214 bfa_stats(rp, sm_off_unexp);
4215 bfa_sm_fault(rp->bfa, event);
4216 }
4217}
4218
Jing Huang5fbe25c2010-10-18 17:17:23 -07004219/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004220 * Rport is deleted, waiting for firmware response to delete.
4221 */
4222static void
4223bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4224{
4225 bfa_trc(rp->bfa, rp->rport_tag);
4226 bfa_trc(rp->bfa, event);
4227
4228 switch (event) {
4229 case BFA_RPORT_SM_FWRSP:
4230 bfa_stats(rp, sm_del_fwrsp);
4231 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4232 bfa_rport_free(rp);
4233 break;
4234
4235 case BFA_RPORT_SM_HWFAIL:
4236 bfa_stats(rp, sm_del_hwf);
4237 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4238 bfa_rport_free(rp);
4239 break;
4240
4241 default:
4242 bfa_sm_fault(rp->bfa, event);
4243 }
4244}
4245
4246static void
4247bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4248{
4249 bfa_trc(rp->bfa, rp->rport_tag);
4250 bfa_trc(rp->bfa, event);
4251
4252 switch (event) {
4253 case BFA_RPORT_SM_QRESUME:
4254 bfa_stats(rp, sm_del_fwrsp);
4255 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4256 bfa_rport_send_fwdelete(rp);
4257 break;
4258
4259 case BFA_RPORT_SM_HWFAIL:
4260 bfa_stats(rp, sm_del_hwf);
4261 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4262 bfa_reqq_wcancel(&rp->reqq_wait);
4263 bfa_rport_free(rp);
4264 break;
4265
4266 default:
4267 bfa_sm_fault(rp->bfa, event);
4268 }
4269}
4270
Jing Huang5fbe25c2010-10-18 17:17:23 -07004271/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004272 * Waiting for rport create response from firmware. A delete is pending.
4273 */
4274static void
4275bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4276 enum bfa_rport_event event)
4277{
4278 bfa_trc(rp->bfa, rp->rport_tag);
4279 bfa_trc(rp->bfa, event);
4280
4281 switch (event) {
4282 case BFA_RPORT_SM_FWRSP:
4283 bfa_stats(rp, sm_delp_fwrsp);
4284 if (bfa_rport_send_fwdelete(rp))
4285 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4286 else
4287 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4288 break;
4289
4290 case BFA_RPORT_SM_HWFAIL:
4291 bfa_stats(rp, sm_delp_hwf);
4292 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4293 bfa_rport_free(rp);
4294 break;
4295
4296 default:
4297 bfa_stats(rp, sm_delp_unexp);
4298 bfa_sm_fault(rp->bfa, event);
4299 }
4300}
4301
Jing Huang5fbe25c2010-10-18 17:17:23 -07004302/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004303 * Waiting for rport create response from firmware. Rport offline is pending.
4304 */
4305static void
4306bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4307 enum bfa_rport_event event)
4308{
4309 bfa_trc(rp->bfa, rp->rport_tag);
4310 bfa_trc(rp->bfa, event);
4311
4312 switch (event) {
4313 case BFA_RPORT_SM_FWRSP:
4314 bfa_stats(rp, sm_offp_fwrsp);
4315 if (bfa_rport_send_fwdelete(rp))
4316 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4317 else
4318 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4319 break;
4320
4321 case BFA_RPORT_SM_DELETE:
4322 bfa_stats(rp, sm_offp_del);
4323 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4324 break;
4325
4326 case BFA_RPORT_SM_HWFAIL:
4327 bfa_stats(rp, sm_offp_hwf);
4328 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4329 break;
4330
4331 default:
4332 bfa_stats(rp, sm_offp_unexp);
4333 bfa_sm_fault(rp->bfa, event);
4334 }
4335}
4336
Jing Huang5fbe25c2010-10-18 17:17:23 -07004337/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004338 * IOC h/w failed.
4339 */
4340static void
4341bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4342{
4343 bfa_trc(rp->bfa, rp->rport_tag);
4344 bfa_trc(rp->bfa, event);
4345
4346 switch (event) {
4347 case BFA_RPORT_SM_OFFLINE:
4348 bfa_stats(rp, sm_iocd_off);
4349 bfa_rport_offline_cb(rp);
4350 break;
4351
4352 case BFA_RPORT_SM_DELETE:
4353 bfa_stats(rp, sm_iocd_del);
4354 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4355 bfa_rport_free(rp);
4356 break;
4357
4358 case BFA_RPORT_SM_ONLINE:
4359 bfa_stats(rp, sm_iocd_on);
4360 if (bfa_rport_send_fwcreate(rp))
4361 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4362 else
4363 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4364 break;
4365
4366 case BFA_RPORT_SM_HWFAIL:
4367 break;
4368
4369 default:
4370 bfa_stats(rp, sm_iocd_unexp);
4371 bfa_sm_fault(rp->bfa, event);
4372 }
4373}
4374
4375
4376
Jing Huang5fbe25c2010-10-18 17:17:23 -07004377/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004378 * bfa_rport_private BFA rport private functions
4379 */
4380
4381static void
4382__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4383{
4384 struct bfa_rport_s *rp = cbarg;
4385
4386 if (complete)
4387 bfa_cb_rport_online(rp->rport_drv);
4388}
4389
4390static void
4391__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4392{
4393 struct bfa_rport_s *rp = cbarg;
4394
4395 if (complete)
4396 bfa_cb_rport_offline(rp->rport_drv);
4397}
4398
4399static void
4400bfa_rport_qresume(void *cbarg)
4401{
4402 struct bfa_rport_s *rp = cbarg;
4403
4404 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4405}
4406
4407static void
Krishna Gudipati45070252011-06-24 20:24:29 -07004408bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4409 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004410{
Krishna Gudipati45070252011-06-24 20:24:29 -07004411 struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4412
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004413 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4414 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4415
Krishna Gudipati45070252011-06-24 20:24:29 -07004416 /* kva memory */
4417 bfa_mem_kva_setup(minfo, rport_kva,
4418 cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004419}
4420
4421static void
4422bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07004423 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004424{
4425 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4426 struct bfa_rport_s *rp;
4427 u16 i;
4428
4429 INIT_LIST_HEAD(&mod->rp_free_q);
4430 INIT_LIST_HEAD(&mod->rp_active_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004431 INIT_LIST_HEAD(&mod->rp_unused_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004432
Krishna Gudipati45070252011-06-24 20:24:29 -07004433 rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004434 mod->rps_list = rp;
4435 mod->num_rports = cfg->fwcfg.num_rports;
4436
Jing Huangd4b671c2010-12-26 21:46:35 -08004437 WARN_ON(!mod->num_rports ||
4438 (mod->num_rports & (mod->num_rports - 1)));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004439
4440 for (i = 0; i < mod->num_rports; i++, rp++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004441 memset(rp, 0, sizeof(struct bfa_rport_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004442 rp->bfa = bfa;
4443 rp->rport_tag = i;
4444 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4445
Jing Huang5fbe25c2010-10-18 17:17:23 -07004446 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004447 * - is unused
4448 */
4449 if (i)
4450 list_add_tail(&rp->qe, &mod->rp_free_q);
4451
4452 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4453 }
4454
Jing Huang5fbe25c2010-10-18 17:17:23 -07004455 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004456 * consume memory
4457 */
Krishna Gudipati45070252011-06-24 20:24:29 -07004458 bfa_mem_kva_curp(mod) = (u8 *) rp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004459}
4460
4461static void
4462bfa_rport_detach(struct bfa_s *bfa)
4463{
4464}
4465
4466static void
4467bfa_rport_start(struct bfa_s *bfa)
4468{
4469}
4470
4471static void
4472bfa_rport_stop(struct bfa_s *bfa)
4473{
4474}
4475
4476static void
4477bfa_rport_iocdisable(struct bfa_s *bfa)
4478{
4479 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4480 struct bfa_rport_s *rport;
4481 struct list_head *qe, *qen;
4482
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004483 /* Enqueue unused rport resources to free_q */
4484 list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4485
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004486 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4487 rport = (struct bfa_rport_s *) qe;
4488 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4489 }
4490}
4491
4492static struct bfa_rport_s *
4493bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4494{
4495 struct bfa_rport_s *rport;
4496
4497 bfa_q_deq(&mod->rp_free_q, &rport);
4498 if (rport)
4499 list_add_tail(&rport->qe, &mod->rp_active_q);
4500
4501 return rport;
4502}
4503
4504static void
4505bfa_rport_free(struct bfa_rport_s *rport)
4506{
4507 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4508
Jing Huangd4b671c2010-12-26 21:46:35 -08004509 WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004510 list_del(&rport->qe);
4511 list_add_tail(&rport->qe, &mod->rp_free_q);
4512}
4513
4514static bfa_boolean_t
4515bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4516{
4517 struct bfi_rport_create_req_s *m;
4518
Jing Huang5fbe25c2010-10-18 17:17:23 -07004519 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004520 * check for room in queue to send request now
4521 */
4522 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4523 if (!m) {
4524 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4525 return BFA_FALSE;
4526 }
4527
4528 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004529 bfa_fn_lpu(rp->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004530 m->bfa_handle = rp->rport_tag;
Jing Huangba816ea2010-10-18 17:10:50 -07004531 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004532 m->pid = rp->rport_info.pid;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004533 m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004534 m->local_pid = rp->rport_info.local_pid;
4535 m->fc_class = rp->rport_info.fc_class;
4536 m->vf_en = rp->rport_info.vf_en;
4537 m->vf_id = rp->rport_info.vf_id;
4538 m->cisc = rp->rport_info.cisc;
4539
Jing Huang5fbe25c2010-10-18 17:17:23 -07004540 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004541 * queue I/O message to firmware
4542 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004543 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004544 return BFA_TRUE;
4545}
4546
4547static bfa_boolean_t
4548bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4549{
4550 struct bfi_rport_delete_req_s *m;
4551
Jing Huang5fbe25c2010-10-18 17:17:23 -07004552 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004553 * check for room in queue to send request now
4554 */
4555 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4556 if (!m) {
4557 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4558 return BFA_FALSE;
4559 }
4560
4561 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004562 bfa_fn_lpu(rp->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004563 m->fw_handle = rp->fw_handle;
4564
Jing Huang5fbe25c2010-10-18 17:17:23 -07004565 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004566 * queue I/O message to firmware
4567 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004568 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004569 return BFA_TRUE;
4570}
4571
4572static bfa_boolean_t
4573bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4574{
4575 struct bfa_rport_speed_req_s *m;
4576
Jing Huang5fbe25c2010-10-18 17:17:23 -07004577 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004578 * check for room in queue to send request now
4579 */
4580 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4581 if (!m) {
4582 bfa_trc(rp->bfa, rp->rport_info.speed);
4583 return BFA_FALSE;
4584 }
4585
4586 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004587 bfa_fn_lpu(rp->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004588 m->fw_handle = rp->fw_handle;
4589 m->speed = (u8)rp->rport_info.speed;
4590
Jing Huang5fbe25c2010-10-18 17:17:23 -07004591 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004592 * queue I/O message to firmware
4593 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004594 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004595 return BFA_TRUE;
4596}
4597
4598
4599
Jing Huang5fbe25c2010-10-18 17:17:23 -07004600/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004601 * bfa_rport_public
4602 */
4603
Jing Huang5fbe25c2010-10-18 17:17:23 -07004604/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004605 * Rport interrupt processing.
4606 */
4607void
4608bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4609{
4610 union bfi_rport_i2h_msg_u msg;
4611 struct bfa_rport_s *rp;
4612
4613 bfa_trc(bfa, m->mhdr.msg_id);
4614
4615 msg.msg = m;
4616
4617 switch (m->mhdr.msg_id) {
4618 case BFI_RPORT_I2H_CREATE_RSP:
4619 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4620 rp->fw_handle = msg.create_rsp->fw_handle;
4621 rp->qos_attr = msg.create_rsp->qos_attr;
Jing Huangd4b671c2010-12-26 21:46:35 -08004622 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004623 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4624 break;
4625
4626 case BFI_RPORT_I2H_DELETE_RSP:
4627 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
Jing Huangd4b671c2010-12-26 21:46:35 -08004628 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004629 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4630 break;
4631
4632 case BFI_RPORT_I2H_QOS_SCN:
4633 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4634 rp->event_arg.fw_msg = msg.qos_scn_evt;
4635 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4636 break;
4637
4638 default:
4639 bfa_trc(bfa, m->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08004640 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004641 }
4642}
4643
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004644void
4645bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
4646{
4647 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4648 struct list_head *qe;
4649 int i;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004650
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004651 for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
4652 bfa_q_deq_tail(&mod->rp_free_q, &qe);
4653 list_add_tail(qe, &mod->rp_unused_q);
4654 }
4655}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004656
Jing Huang5fbe25c2010-10-18 17:17:23 -07004657/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004658 * bfa_rport_api
4659 */
4660
4661struct bfa_rport_s *
4662bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4663{
4664 struct bfa_rport_s *rp;
4665
4666 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4667
4668 if (rp == NULL)
4669 return NULL;
4670
4671 rp->bfa = bfa;
4672 rp->rport_drv = rport_drv;
Maggie Zhangf7f738122010-12-09 19:08:43 -08004673 memset(&rp->stats, 0, sizeof(rp->stats));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004674
Jing Huangd4b671c2010-12-26 21:46:35 -08004675 WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004676 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4677
4678 return rp;
4679}
4680
4681void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004682bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4683{
Jing Huangd4b671c2010-12-26 21:46:35 -08004684 WARN_ON(rport_info->max_frmsz == 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004685
Jing Huang5fbe25c2010-10-18 17:17:23 -07004686 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004687 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4688 * responses. Default to minimum size.
4689 */
4690 if (rport_info->max_frmsz == 0) {
4691 bfa_trc(rport->bfa, rport->rport_tag);
4692 rport_info->max_frmsz = FC_MIN_PDUSZ;
4693 }
4694
Jing Huang6a18b162010-10-18 17:08:54 -07004695 rport->rport_info = *rport_info;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004696 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4697}
4698
4699void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004700bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4701{
Jing Huangd4b671c2010-12-26 21:46:35 -08004702 WARN_ON(speed == 0);
4703 WARN_ON(speed == BFA_PORT_SPEED_AUTO);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004704
4705 rport->rport_info.speed = speed;
4706 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4707}
4708
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004709
Jing Huang5fbe25c2010-10-18 17:17:23 -07004710/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004711 * SGPG related functions
4712 */
4713
Jing Huang5fbe25c2010-10-18 17:17:23 -07004714/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004715 * Compute and return memory needed by FCP(im) module.
4716 */
4717static void
Krishna Gudipati45070252011-06-24 20:24:29 -07004718bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4719 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004720{
Krishna Gudipati45070252011-06-24 20:24:29 -07004721 struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
4722 struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
4723 struct bfa_mem_dma_s *seg_ptr;
4724 u16 nsegs, idx, per_seg_sgpg, num_sgpg;
4725 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
4726
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004727 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4728 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
Krishna Gudipati45070252011-06-24 20:24:29 -07004729 else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
4730 cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004731
Krishna Gudipati45070252011-06-24 20:24:29 -07004732 num_sgpg = cfg->drvcfg.num_sgpgs;
4733
4734 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4735 per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
4736
4737 bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
4738 if (num_sgpg >= per_seg_sgpg) {
4739 num_sgpg -= per_seg_sgpg;
4740 bfa_mem_dma_setup(minfo, seg_ptr,
4741 per_seg_sgpg * sgpg_sz);
4742 } else
4743 bfa_mem_dma_setup(minfo, seg_ptr,
4744 num_sgpg * sgpg_sz);
4745 }
4746
4747 /* kva memory */
4748 bfa_mem_kva_setup(minfo, sgpg_kva,
4749 cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004750}
4751
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004752static void
4753bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07004754 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004755{
4756 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004757 struct bfa_sgpg_s *hsgpg;
4758 struct bfi_sgpg_s *sgpg;
4759 u64 align_len;
Krishna Gudipati45070252011-06-24 20:24:29 -07004760 struct bfa_mem_dma_s *seg_ptr;
4761 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
4762 u16 i, idx, nsegs, per_seg_sgpg, num_sgpg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004763
4764 union {
4765 u64 pa;
4766 union bfi_addr_u addr;
4767 } sgpg_pa, sgpg_pa_tmp;
4768
4769 INIT_LIST_HEAD(&mod->sgpg_q);
4770 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4771
4772 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4773
Krishna Gudipati45070252011-06-24 20:24:29 -07004774 mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004775
Krishna Gudipati45070252011-06-24 20:24:29 -07004776 num_sgpg = cfg->drvcfg.num_sgpgs;
4777 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004778
Krishna Gudipati45070252011-06-24 20:24:29 -07004779 /* dma/kva mem claim */
4780 hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004781
Krishna Gudipati45070252011-06-24 20:24:29 -07004782 bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004783
Krishna Gudipati45070252011-06-24 20:24:29 -07004784 if (!bfa_mem_dma_virt(seg_ptr))
4785 break;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004786
Krishna Gudipati45070252011-06-24 20:24:29 -07004787 align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
4788 bfa_mem_dma_phys(seg_ptr);
4789
4790 sgpg = (struct bfi_sgpg_s *)
4791 (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
4792 sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
4793 WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
4794
4795 per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
4796
4797 for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
4798 memset(hsgpg, 0, sizeof(*hsgpg));
4799 memset(sgpg, 0, sizeof(*sgpg));
4800
4801 hsgpg->sgpg = sgpg;
4802 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4803 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4804 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4805
4806 sgpg++;
4807 hsgpg++;
4808 sgpg_pa.pa += sgpg_sz;
4809 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004810 }
4811
Krishna Gudipati45070252011-06-24 20:24:29 -07004812 bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004813}
4814
4815static void
4816bfa_sgpg_detach(struct bfa_s *bfa)
4817{
4818}
4819
4820static void
4821bfa_sgpg_start(struct bfa_s *bfa)
4822{
4823}
4824
4825static void
4826bfa_sgpg_stop(struct bfa_s *bfa)
4827{
4828}
4829
4830static void
4831bfa_sgpg_iocdisable(struct bfa_s *bfa)
4832{
4833}
4834
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004835bfa_status_t
4836bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4837{
4838 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4839 struct bfa_sgpg_s *hsgpg;
4840 int i;
4841
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004842 if (mod->free_sgpgs < nsgpgs)
4843 return BFA_STATUS_ENOMEM;
4844
4845 for (i = 0; i < nsgpgs; i++) {
4846 bfa_q_deq(&mod->sgpg_q, &hsgpg);
Jing Huangd4b671c2010-12-26 21:46:35 -08004847 WARN_ON(!hsgpg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004848 list_add_tail(&hsgpg->qe, sgpg_q);
4849 }
4850
4851 mod->free_sgpgs -= nsgpgs;
4852 return BFA_STATUS_OK;
4853}
4854
4855void
4856bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4857{
4858 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4859 struct bfa_sgpg_wqe_s *wqe;
4860
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004861 mod->free_sgpgs += nsgpg;
Jing Huangd4b671c2010-12-26 21:46:35 -08004862 WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004863
4864 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4865
4866 if (list_empty(&mod->sgpg_wait_q))
4867 return;
4868
Jing Huang5fbe25c2010-10-18 17:17:23 -07004869 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004870 * satisfy as many waiting requests as possible
4871 */
4872 do {
4873 wqe = bfa_q_first(&mod->sgpg_wait_q);
4874 if (mod->free_sgpgs < wqe->nsgpg)
4875 nsgpg = mod->free_sgpgs;
4876 else
4877 nsgpg = wqe->nsgpg;
4878 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4879 wqe->nsgpg -= nsgpg;
4880 if (wqe->nsgpg == 0) {
4881 list_del(&wqe->qe);
4882 wqe->cbfn(wqe->cbarg);
4883 }
4884 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4885}
4886
4887void
4888bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4889{
4890 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4891
Jing Huangd4b671c2010-12-26 21:46:35 -08004892 WARN_ON(nsgpg <= 0);
4893 WARN_ON(nsgpg <= mod->free_sgpgs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004894
4895 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4896
Jing Huang5fbe25c2010-10-18 17:17:23 -07004897 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004898 * allocate any left to this one first
4899 */
4900 if (mod->free_sgpgs) {
Jing Huang5fbe25c2010-10-18 17:17:23 -07004901 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004902 * no one else is waiting for SGPG
4903 */
Jing Huangd4b671c2010-12-26 21:46:35 -08004904 WARN_ON(!list_empty(&mod->sgpg_wait_q));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004905 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4906 wqe->nsgpg -= mod->free_sgpgs;
4907 mod->free_sgpgs = 0;
4908 }
4909
4910 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4911}
4912
4913void
4914bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4915{
4916 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4917
Jing Huangd4b671c2010-12-26 21:46:35 -08004918 WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004919 list_del(&wqe->qe);
4920
4921 if (wqe->nsgpg_total != wqe->nsgpg)
4922 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4923 wqe->nsgpg_total - wqe->nsgpg);
4924}
4925
4926void
4927bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4928 void *cbarg)
4929{
4930 INIT_LIST_HEAD(&wqe->sgpg_q);
4931 wqe->cbfn = cbfn;
4932 wqe->cbarg = cbarg;
4933}
4934
Jing Huang5fbe25c2010-10-18 17:17:23 -07004935/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004936 * UF related functions
4937 */
4938/*
4939 *****************************************************************************
4940 * Internal functions
4941 *****************************************************************************
4942 */
4943static void
4944__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4945{
4946 struct bfa_uf_s *uf = cbarg;
4947 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4948
4949 if (complete)
4950 ufm->ufrecv(ufm->cbarg, uf);
4951}
4952
4953static void
Krishna Gudipati45070252011-06-24 20:24:29 -07004954claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004955{
4956 struct bfi_uf_buf_post_s *uf_bp_msg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004957 u16 i;
4958 u16 buf_len;
4959
Krishna Gudipati45070252011-06-24 20:24:29 -07004960 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004961 uf_bp_msg = ufm->uf_buf_posts;
4962
4963 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4964 i++, uf_bp_msg++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004965 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004966
4967 uf_bp_msg->buf_tag = i;
4968 buf_len = sizeof(struct bfa_uf_buf_s);
Jing Huangba816ea2010-10-18 17:10:50 -07004969 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004970 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004971 bfa_fn_lpu(ufm->bfa));
Krishna Gudipati85ce9282011-06-13 15:39:36 -07004972 bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004973 }
4974
Jing Huang5fbe25c2010-10-18 17:17:23 -07004975 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004976 * advance pointer beyond consumed memory
4977 */
Krishna Gudipati45070252011-06-24 20:24:29 -07004978 bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004979}
4980
4981static void
Krishna Gudipati45070252011-06-24 20:24:29 -07004982claim_ufs(struct bfa_uf_mod_s *ufm)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004983{
4984 u16 i;
4985 struct bfa_uf_s *uf;
4986
4987 /*
4988 * Claim block of memory for UF list
4989 */
Krishna Gudipati45070252011-06-24 20:24:29 -07004990 ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004991
4992 /*
4993 * Initialize UFs and queue it in UF free queue
4994 */
4995 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004996 memset(uf, 0, sizeof(struct bfa_uf_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004997 uf->bfa = ufm->bfa;
4998 uf->uf_tag = i;
Krishna Gudipati45070252011-06-24 20:24:29 -07004999 uf->pb_len = BFA_PER_UF_DMA_SZ;
5000 uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005001 uf->buf_pa = ufm_pbs_pa(ufm, i);
5002 list_add_tail(&uf->qe, &ufm->uf_free_q);
5003 }
5004
Jing Huang5fbe25c2010-10-18 17:17:23 -07005005 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005006 * advance memory pointer
5007 */
Krishna Gudipati45070252011-06-24 20:24:29 -07005008 bfa_mem_kva_curp(ufm) = (u8 *) uf;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005009}
5010
5011static void
Krishna Gudipati45070252011-06-24 20:24:29 -07005012uf_mem_claim(struct bfa_uf_mod_s *ufm)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005013{
Krishna Gudipati45070252011-06-24 20:24:29 -07005014 claim_ufs(ufm);
5015 claim_uf_post_msgs(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005016}
5017
5018static void
Krishna Gudipati45070252011-06-24 20:24:29 -07005019bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5020 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005021{
Krishna Gudipati45070252011-06-24 20:24:29 -07005022 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5023 struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5024 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
5025 struct bfa_mem_dma_s *seg_ptr;
5026 u16 nsegs, idx, per_seg_uf = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005027
Krishna Gudipati45070252011-06-24 20:24:29 -07005028 nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5029 per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005030
Krishna Gudipati45070252011-06-24 20:24:29 -07005031 bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5032 if (num_ufs >= per_seg_uf) {
5033 num_ufs -= per_seg_uf;
5034 bfa_mem_dma_setup(minfo, seg_ptr,
5035 per_seg_uf * BFA_PER_UF_DMA_SZ);
5036 } else
5037 bfa_mem_dma_setup(minfo, seg_ptr,
5038 num_ufs * BFA_PER_UF_DMA_SZ);
5039 }
5040
5041 /* kva memory */
5042 bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5043 (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005044}
5045
5046static void
5047bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07005048 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005049{
5050 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5051
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005052 ufm->bfa = bfa;
5053 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5054 INIT_LIST_HEAD(&ufm->uf_free_q);
5055 INIT_LIST_HEAD(&ufm->uf_posted_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005056 INIT_LIST_HEAD(&ufm->uf_unused_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005057
Krishna Gudipati45070252011-06-24 20:24:29 -07005058 uf_mem_claim(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005059}
5060
5061static void
5062bfa_uf_detach(struct bfa_s *bfa)
5063{
5064}
5065
5066static struct bfa_uf_s *
5067bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5068{
5069 struct bfa_uf_s *uf;
5070
5071 bfa_q_deq(&uf_mod->uf_free_q, &uf);
5072 return uf;
5073}
5074
5075static void
5076bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5077{
5078 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5079}
5080
5081static bfa_status_t
5082bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5083{
5084 struct bfi_uf_buf_post_s *uf_post_msg;
5085
5086 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5087 if (!uf_post_msg)
5088 return BFA_STATUS_FAILED;
5089
Jing Huang6a18b162010-10-18 17:08:54 -07005090 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005091 sizeof(struct bfi_uf_buf_post_s));
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005092 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005093
5094 bfa_trc(ufm->bfa, uf->uf_tag);
5095
5096 list_add_tail(&uf->qe, &ufm->uf_posted_q);
5097 return BFA_STATUS_OK;
5098}
5099
5100static void
5101bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5102{
5103 struct bfa_uf_s *uf;
5104
5105 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5106 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5107 break;
5108 }
5109}
5110
5111static void
5112uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5113{
5114 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5115 u16 uf_tag = m->buf_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005116 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
Krishna Gudipati45070252011-06-24 20:24:29 -07005117 struct bfa_uf_buf_s *uf_buf;
5118 uint8_t *buf;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005119 struct fchs_s *fchs;
5120
Krishna Gudipati45070252011-06-24 20:24:29 -07005121 uf_buf = (struct bfa_uf_buf_s *)
5122 bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5123 buf = &uf_buf->d[0];
5124
Jing Huangba816ea2010-10-18 17:10:50 -07005125 m->frm_len = be16_to_cpu(m->frm_len);
5126 m->xfr_len = be16_to_cpu(m->xfr_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005127
5128 fchs = (struct fchs_s *)uf_buf;
5129
5130 list_del(&uf->qe); /* dequeue from posted queue */
5131
5132 uf->data_ptr = buf;
5133 uf->data_len = m->xfr_len;
5134
Jing Huangd4b671c2010-12-26 21:46:35 -08005135 WARN_ON(uf->data_len < sizeof(struct fchs_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005136
5137 if (uf->data_len == sizeof(struct fchs_s)) {
5138 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5139 uf->data_len, (struct fchs_s *)buf);
5140 } else {
5141 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5142 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5143 BFA_PL_EID_RX, uf->data_len,
5144 (struct fchs_s *)buf, pld_w0);
5145 }
5146
5147 if (bfa->fcs)
5148 __bfa_cb_uf_recv(uf, BFA_TRUE);
5149 else
5150 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5151}
5152
5153static void
5154bfa_uf_stop(struct bfa_s *bfa)
5155{
5156}
5157
5158static void
5159bfa_uf_iocdisable(struct bfa_s *bfa)
5160{
5161 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5162 struct bfa_uf_s *uf;
5163 struct list_head *qe, *qen;
5164
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005165 /* Enqueue unused uf resources to free_q */
5166 list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5167
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005168 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5169 uf = (struct bfa_uf_s *) qe;
5170 list_del(&uf->qe);
5171 bfa_uf_put(ufm, uf);
5172 }
5173}
5174
5175static void
5176bfa_uf_start(struct bfa_s *bfa)
5177{
5178 bfa_uf_post_all(BFA_UF_MOD(bfa));
5179}
5180
Jing Huang5fbe25c2010-10-18 17:17:23 -07005181/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005182 * Register handler for all unsolicted receive frames.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005183 *
5184 * @param[in] bfa BFA instance
5185 * @param[in] ufrecv receive handler function
5186 * @param[in] cbarg receive handler arg
5187 */
5188void
5189bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5190{
5191 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5192
5193 ufm->ufrecv = ufrecv;
5194 ufm->cbarg = cbarg;
5195}
5196
Jing Huang5fbe25c2010-10-18 17:17:23 -07005197/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005198 * Free an unsolicited frame back to BFA.
5199 *
5200 * @param[in] uf unsolicited frame to be freed
5201 *
5202 * @return None
5203 */
5204void
5205bfa_uf_free(struct bfa_uf_s *uf)
5206{
5207 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5208 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5209}
5210
5211
5212
Jing Huang5fbe25c2010-10-18 17:17:23 -07005213/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005214 * uf_pub BFA uf module public functions
5215 */
5216void
5217bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5218{
5219 bfa_trc(bfa, msg->mhdr.msg_id);
5220
5221 switch (msg->mhdr.msg_id) {
5222 case BFI_UF_I2H_FRM_RCVD:
5223 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5224 break;
5225
5226 default:
5227 bfa_trc(bfa, msg->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08005228 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005229 }
5230}
5231
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005232void
5233bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5234{
5235 struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa);
5236 struct list_head *qe;
5237 int i;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005238
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005239 for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5240 bfa_q_deq_tail(&mod->uf_free_q, &qe);
5241 list_add_tail(qe, &mod->uf_unused_q);
5242 }
5243}
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07005244
5245/*
5246 * BFA fcdiag module
5247 */
5248#define BFA_DIAG_QTEST_TOV 1000 /* msec */
5249
5250/*
5251 * Set port status to busy
5252 */
5253static void
5254bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5255{
5256 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5257
5258 if (fcdiag->lb.lock)
5259 fcport->diag_busy = BFA_TRUE;
5260 else
5261 fcport->diag_busy = BFA_FALSE;
5262}
5263
5264static void
5265bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5266 struct bfa_s *bfa)
5267{
5268}
5269
5270static void
5271bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5272 struct bfa_pcidev_s *pcidev)
5273{
5274 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5275 fcdiag->bfa = bfa;
5276 fcdiag->trcmod = bfa->trcmod;
5277 /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5278}
5279
5280static void
5281bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5282{
5283 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5284 bfa_trc(fcdiag, fcdiag->lb.lock);
5285 if (fcdiag->lb.lock) {
5286 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5287 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5288 fcdiag->lb.lock = 0;
5289 bfa_fcdiag_set_busy_status(fcdiag);
5290 }
5291}
5292
5293static void
5294bfa_fcdiag_detach(struct bfa_s *bfa)
5295{
5296}
5297
5298static void
5299bfa_fcdiag_start(struct bfa_s *bfa)
5300{
5301}
5302
5303static void
5304bfa_fcdiag_stop(struct bfa_s *bfa)
5305{
5306}
5307
5308static void
5309bfa_fcdiag_queuetest_timeout(void *cbarg)
5310{
5311 struct bfa_fcdiag_s *fcdiag = cbarg;
5312 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5313
5314 bfa_trc(fcdiag, fcdiag->qtest.all);
5315 bfa_trc(fcdiag, fcdiag->qtest.count);
5316
5317 fcdiag->qtest.timer_active = 0;
5318
5319 res->status = BFA_STATUS_ETIMER;
5320 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5321 if (fcdiag->qtest.all)
5322 res->queue = fcdiag->qtest.all;
5323
5324 bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5325 fcdiag->qtest.status = BFA_STATUS_ETIMER;
5326 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5327 fcdiag->qtest.lock = 0;
5328}
5329
5330static bfa_status_t
5331bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5332{
5333 u32 i;
5334 struct bfi_diag_qtest_req_s *req;
5335
5336 req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5337 if (!req)
5338 return BFA_STATUS_DEVBUSY;
5339
5340 /* build host command */
5341 bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5342 bfa_fn_lpu(fcdiag->bfa));
5343
5344 for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5345 req->data[i] = QTEST_PAT_DEFAULT;
5346
5347 bfa_trc(fcdiag, fcdiag->qtest.queue);
5348 /* ring door bell */
5349 bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5350 return BFA_STATUS_OK;
5351}
5352
5353static void
5354bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5355 bfi_diag_qtest_rsp_t *rsp)
5356{
5357 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5358 bfa_status_t status = BFA_STATUS_OK;
5359 int i;
5360
5361 /* Check timer, should still be active */
5362 if (!fcdiag->qtest.timer_active) {
5363 bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5364 return;
5365 }
5366
5367 /* update count */
5368 fcdiag->qtest.count--;
5369
5370 /* Check result */
5371 for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5372 if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5373 res->status = BFA_STATUS_DATACORRUPTED;
5374 break;
5375 }
5376 }
5377
5378 if (res->status == BFA_STATUS_OK) {
5379 if (fcdiag->qtest.count > 0) {
5380 status = bfa_fcdiag_queuetest_send(fcdiag);
5381 if (status == BFA_STATUS_OK)
5382 return;
5383 else
5384 res->status = status;
5385 } else if (fcdiag->qtest.all > 0 &&
5386 fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5387 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5388 fcdiag->qtest.queue++;
5389 status = bfa_fcdiag_queuetest_send(fcdiag);
5390 if (status == BFA_STATUS_OK)
5391 return;
5392 else
5393 res->status = status;
5394 }
5395 }
5396
5397 /* Stop timer when we comp all queue */
5398 if (fcdiag->qtest.timer_active) {
5399 bfa_timer_stop(&fcdiag->qtest.timer);
5400 fcdiag->qtest.timer_active = 0;
5401 }
5402 res->queue = fcdiag->qtest.queue;
5403 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5404 bfa_trc(fcdiag, res->count);
5405 bfa_trc(fcdiag, res->status);
5406 fcdiag->qtest.status = res->status;
5407 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5408 fcdiag->qtest.lock = 0;
5409}
5410
5411static void
5412bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5413 struct bfi_diag_lb_rsp_s *rsp)
5414{
5415 struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5416
5417 res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm);
5418 res->numosffrm = be32_to_cpu(rsp->res.numosffrm);
5419 res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm);
5420 res->badfrminf = be32_to_cpu(rsp->res.badfrminf);
5421 res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum);
5422 res->status = rsp->res.status;
5423 fcdiag->lb.status = rsp->res.status;
5424 bfa_trc(fcdiag, fcdiag->lb.status);
5425 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5426 fcdiag->lb.lock = 0;
5427 bfa_fcdiag_set_busy_status(fcdiag);
5428}
5429
5430static bfa_status_t
5431bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5432 struct bfa_diag_loopback_s *loopback)
5433{
5434 struct bfi_diag_lb_req_s *lb_req;
5435
5436 lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5437 if (!lb_req)
5438 return BFA_STATUS_DEVBUSY;
5439
5440 /* build host command */
5441 bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5442 bfa_fn_lpu(fcdiag->bfa));
5443
5444 lb_req->lb_mode = loopback->lb_mode;
5445 lb_req->speed = loopback->speed;
5446 lb_req->loopcnt = loopback->loopcnt;
5447 lb_req->pattern = loopback->pattern;
5448
5449 /* ring door bell */
5450 bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5451
5452 bfa_trc(fcdiag, loopback->lb_mode);
5453 bfa_trc(fcdiag, loopback->speed);
5454 bfa_trc(fcdiag, loopback->loopcnt);
5455 bfa_trc(fcdiag, loopback->pattern);
5456 return BFA_STATUS_OK;
5457}
5458
5459/*
5460 * cpe/rme intr handler
5461 */
5462void
5463bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5464{
5465 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5466
5467 switch (msg->mhdr.msg_id) {
5468 case BFI_DIAG_I2H_LOOPBACK:
5469 bfa_fcdiag_loopback_comp(fcdiag,
5470 (struct bfi_diag_lb_rsp_s *) msg);
5471 break;
5472 case BFI_DIAG_I2H_QTEST:
5473 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5474 break;
5475 default:
5476 bfa_trc(fcdiag, msg->mhdr.msg_id);
5477 WARN_ON(1);
5478 }
5479}
5480
5481/*
5482 * Loopback test
5483 *
5484 * @param[in] *bfa - bfa data struct
5485 * @param[in] opmode - port operation mode
5486 * @param[in] speed - port speed
5487 * @param[in] lpcnt - loop count
5488 * @param[in] pat - pattern to build packet
5489 * @param[in] *result - pt to bfa_diag_loopback_result_t data struct
5490 * @param[in] cbfn - callback function
5491 * @param[in] cbarg - callback functioin arg
5492 *
5493 * @param[out]
5494 */
5495bfa_status_t
5496bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5497 enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5498 struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5499 void *cbarg)
5500{
5501 struct bfa_diag_loopback_s loopback;
5502 struct bfa_port_attr_s attr;
5503 bfa_status_t status;
5504 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5505
5506 if (!bfa_iocfc_is_operational(bfa))
5507 return BFA_STATUS_IOC_NON_OP;
5508
5509 /* if port is PBC disabled, return error */
5510 if (bfa_fcport_is_pbcdisabled(bfa)) {
5511 bfa_trc(fcdiag, BFA_STATUS_PBC);
5512 return BFA_STATUS_PBC;
5513 }
5514
5515 if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5516 bfa_trc(fcdiag, opmode);
5517 return BFA_STATUS_PORT_NOT_DISABLED;
5518 }
5519
5520 /* Check if the speed is supported */
5521 bfa_fcport_get_attr(bfa, &attr);
5522 bfa_trc(fcdiag, attr.speed_supported);
5523 if (speed > attr.speed_supported)
5524 return BFA_STATUS_UNSUPP_SPEED;
5525
5526 /* For Mezz card, port speed entered needs to be checked */
5527 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5528 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5529 if ((speed == BFA_PORT_SPEED_1GBPS) &&
5530 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
5531 return BFA_STATUS_UNSUPP_SPEED;
5532 if (!(speed == BFA_PORT_SPEED_1GBPS ||
5533 speed == BFA_PORT_SPEED_2GBPS ||
5534 speed == BFA_PORT_SPEED_4GBPS ||
5535 speed == BFA_PORT_SPEED_8GBPS ||
5536 speed == BFA_PORT_SPEED_16GBPS ||
5537 speed == BFA_PORT_SPEED_AUTO))
5538 return BFA_STATUS_UNSUPP_SPEED;
5539 } else {
5540 if (speed != BFA_PORT_SPEED_10GBPS)
5541 return BFA_STATUS_UNSUPP_SPEED;
5542 }
5543 }
5544
5545 /* check to see if there is another destructive diag cmd running */
5546 if (fcdiag->lb.lock) {
5547 bfa_trc(fcdiag, fcdiag->lb.lock);
5548 return BFA_STATUS_DEVBUSY;
5549 }
5550
5551 fcdiag->lb.lock = 1;
5552 loopback.lb_mode = opmode;
5553 loopback.speed = speed;
5554 loopback.loopcnt = lpcnt;
5555 loopback.pattern = pat;
5556 fcdiag->lb.result = result;
5557 fcdiag->lb.cbfn = cbfn;
5558 fcdiag->lb.cbarg = cbarg;
5559 memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
5560 bfa_fcdiag_set_busy_status(fcdiag);
5561
5562 /* Send msg to fw */
5563 status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
5564 return status;
5565}
5566
5567/*
5568 * DIAG queue test command
5569 *
5570 * @param[in] *bfa - bfa data struct
5571 * @param[in] force - 1: don't do ioc op checking
5572 * @param[in] queue - queue no. to test
5573 * @param[in] *result - pt to bfa_diag_qtest_result_t data struct
5574 * @param[in] cbfn - callback function
5575 * @param[in] *cbarg - callback functioin arg
5576 *
5577 * @param[out]
5578 */
5579bfa_status_t
5580bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
5581 struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
5582 void *cbarg)
5583{
5584 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5585 bfa_status_t status;
5586 bfa_trc(fcdiag, force);
5587 bfa_trc(fcdiag, queue);
5588
5589 if (!force && !bfa_iocfc_is_operational(bfa))
5590 return BFA_STATUS_IOC_NON_OP;
5591
5592 /* check to see if there is another destructive diag cmd running */
5593 if (fcdiag->qtest.lock) {
5594 bfa_trc(fcdiag, fcdiag->qtest.lock);
5595 return BFA_STATUS_DEVBUSY;
5596 }
5597
5598 /* Initialization */
5599 fcdiag->qtest.lock = 1;
5600 fcdiag->qtest.cbfn = cbfn;
5601 fcdiag->qtest.cbarg = cbarg;
5602 fcdiag->qtest.result = result;
5603 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5604
5605 /* Init test results */
5606 fcdiag->qtest.result->status = BFA_STATUS_OK;
5607 fcdiag->qtest.result->count = 0;
5608
5609 /* send */
5610 if (queue < BFI_IOC_MAX_CQS) {
5611 fcdiag->qtest.result->queue = (u8)queue;
5612 fcdiag->qtest.queue = (u8)queue;
5613 fcdiag->qtest.all = 0;
5614 } else {
5615 fcdiag->qtest.result->queue = 0;
5616 fcdiag->qtest.queue = 0;
5617 fcdiag->qtest.all = 1;
5618 }
5619 status = bfa_fcdiag_queuetest_send(fcdiag);
5620
5621 /* Start a timer */
5622 if (status == BFA_STATUS_OK) {
5623 bfa_timer_start(bfa, &fcdiag->qtest.timer,
5624 bfa_fcdiag_queuetest_timeout, fcdiag,
5625 BFA_DIAG_QTEST_TOV);
5626 fcdiag->qtest.timer_active = 1;
5627 }
5628 return status;
5629}
5630
5631/*
5632 * DIAG PLB is running
5633 *
5634 * @param[in] *bfa - bfa data struct
5635 *
5636 * @param[out]
5637 */
5638bfa_status_t
5639bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
5640{
5641 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5642 return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
5643}