blob: 506f1d326e5b58df9dfd6628d0c310f2e8912dc6 [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_modules.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070020
21BFA_TRC_FILE(HAL, FCPIM);
22BFA_MODULE(fcpim);
23
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070024
25#define bfa_fcpim_add_iostats(__l, __r, __stats) \
26 (__l->__stats += __r->__stats)
27
28
Jing Huang5fbe25c2010-10-18 17:17:23 -070029/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070030 * BFA ITNIM Related definitions
31 */
32static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
33
34#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
35 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
36
37#define bfa_fcpim_additn(__itnim) \
38 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
39#define bfa_fcpim_delitn(__itnim) do { \
40 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
41 bfa_itnim_update_del_itn_stats(__itnim); \
42 list_del(&(__itnim)->qe); \
43 bfa_assert(list_empty(&(__itnim)->io_q)); \
44 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
45 bfa_assert(list_empty(&(__itnim)->pending_q)); \
46} while (0)
47
48#define bfa_itnim_online_cb(__itnim) do { \
49 if ((__itnim)->bfa->fcs) \
50 bfa_cb_itnim_online((__itnim)->ditn); \
51 else { \
52 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
53 __bfa_cb_itnim_online, (__itnim)); \
54 } \
55} while (0)
56
57#define bfa_itnim_offline_cb(__itnim) do { \
58 if ((__itnim)->bfa->fcs) \
59 bfa_cb_itnim_offline((__itnim)->ditn); \
60 else { \
61 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
62 __bfa_cb_itnim_offline, (__itnim)); \
63 } \
64} while (0)
65
66#define bfa_itnim_sler_cb(__itnim) do { \
67 if ((__itnim)->bfa->fcs) \
68 bfa_cb_itnim_sler((__itnim)->ditn); \
69 else { \
70 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
71 __bfa_cb_itnim_sler, (__itnim)); \
72 } \
73} while (0)
74
Jing Huang5fbe25c2010-10-18 17:17:23 -070075/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070076 * bfa_itnim_sm BFA itnim state machine
77 */
78
79
80enum bfa_itnim_event {
81 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
82 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
83 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
84 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
85 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
86 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
87 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
88 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
90};
91
Jing Huang5fbe25c2010-10-18 17:17:23 -070092/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070093 * BFA IOIM related definitions
94 */
95#define bfa_ioim_move_to_comp_q(__ioim) do { \
96 list_del(&(__ioim)->qe); \
97 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
98} while (0)
99
100
101#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
102 if ((__fcpim)->profile_comp) \
103 (__fcpim)->profile_comp(__ioim); \
104} while (0)
105
106#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
107 if ((__fcpim)->profile_start) \
108 (__fcpim)->profile_start(__ioim); \
109} while (0)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700110
Jing Huang5fbe25c2010-10-18 17:17:23 -0700111/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700112 * IO state machine events
113 */
114enum bfa_ioim_event {
115 BFA_IOIM_SM_START = 1, /* io start request from host */
116 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
117 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
118 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
119 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
120 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
121 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
122 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
123 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
124 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
125 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
126 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
127 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
128 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
129 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
130 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
131 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
132 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
133};
134
135
Jing Huang5fbe25c2010-10-18 17:17:23 -0700136/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700137 * BFA TSKIM related definitions
138 */
139
Jing Huang5fbe25c2010-10-18 17:17:23 -0700140/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700141 * task management completion handling
142 */
143#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
144 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
145 bfa_tskim_notify_comp(__tskim); \
146} while (0)
147
148#define bfa_tskim_notify_comp(__tskim) do { \
149 if ((__tskim)->notify) \
150 bfa_itnim_tskdone((__tskim)->itnim); \
151} while (0)
152
153
154enum bfa_tskim_event {
155 BFA_TSKIM_SM_START = 1, /* TM command start */
156 BFA_TSKIM_SM_DONE = 2, /* TM completion */
157 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
158 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
159 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
160 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
161 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
162 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
163};
164
Jing Huang5fbe25c2010-10-18 17:17:23 -0700165/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700166 * forward declaration for BFA ITNIM functions
167 */
168static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
169static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
170static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
171static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
172static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
173static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
174static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
175static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
176static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
177static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
178static void bfa_itnim_iotov(void *itnim_arg);
179static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
180static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
181static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
182
Jing Huang5fbe25c2010-10-18 17:17:23 -0700183/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700184 * forward declaration of ITNIM state machine
185 */
186static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
187 enum bfa_itnim_event event);
188static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
189 enum bfa_itnim_event event);
190static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
191 enum bfa_itnim_event event);
192static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
193 enum bfa_itnim_event event);
194static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
195 enum bfa_itnim_event event);
196static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
197 enum bfa_itnim_event event);
198static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
199 enum bfa_itnim_event event);
200static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
201 enum bfa_itnim_event event);
202static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
203 enum bfa_itnim_event event);
204static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
205 enum bfa_itnim_event event);
206static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
207 enum bfa_itnim_event event);
208static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
209 enum bfa_itnim_event event);
210static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
211 enum bfa_itnim_event event);
212static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
213 enum bfa_itnim_event event);
214static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
215 enum bfa_itnim_event event);
216
Jing Huang5fbe25c2010-10-18 17:17:23 -0700217/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700218 * forward declaration for BFA IOIM functions
219 */
220static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
Maggie Zhange3e7d3e2010-12-09 19:10:27 -0800221static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700222static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
223static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
224static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
225static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
226static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
227static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
228static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
229static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
230
231
Jing Huang5fbe25c2010-10-18 17:17:23 -0700232/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700233 * forward declaration of BFA IO state machine
234 */
235static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
236 enum bfa_ioim_event event);
237static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
238 enum bfa_ioim_event event);
239static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
240 enum bfa_ioim_event event);
241static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
242 enum bfa_ioim_event event);
243static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
244 enum bfa_ioim_event event);
245static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
246 enum bfa_ioim_event event);
247static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
248 enum bfa_ioim_event event);
249static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
250 enum bfa_ioim_event event);
251static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
252 enum bfa_ioim_event event);
253static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
254 enum bfa_ioim_event event);
255static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
256 enum bfa_ioim_event event);
257static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
258 enum bfa_ioim_event event);
259
Jing Huang5fbe25c2010-10-18 17:17:23 -0700260/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700261 * forward declaration for BFA TSKIM functions
262 */
263static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
264static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
265static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
Maggie Zhangf3148782010-12-09 19:11:39 -0800266 struct scsi_lun lun);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700267static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
268static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
269static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
270static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
271static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
272static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
273
274
Jing Huang5fbe25c2010-10-18 17:17:23 -0700275/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700276 * forward declaration of BFA TSKIM state machine
277 */
278static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
279 enum bfa_tskim_event event);
280static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
281 enum bfa_tskim_event event);
282static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
283 enum bfa_tskim_event event);
284static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
285 enum bfa_tskim_event event);
286static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
287 enum bfa_tskim_event event);
288static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
289 enum bfa_tskim_event event);
290static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
291 enum bfa_tskim_event event);
292
Jing Huang5fbe25c2010-10-18 17:17:23 -0700293/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800294 * BFA FCP Initiator Mode module
Jing Huang7725ccf2009-09-23 17:46:15 -0700295 */
296
Jing Huang5fbe25c2010-10-18 17:17:23 -0700297/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700298 * Compute and return memory needed by FCP(im) module.
Jing Huang7725ccf2009-09-23 17:46:15 -0700299 */
300static void
301bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
302 u32 *dm_len)
303{
304 bfa_itnim_meminfo(cfg, km_len, dm_len);
305
Jing Huang5fbe25c2010-10-18 17:17:23 -0700306 /*
Jing Huang7725ccf2009-09-23 17:46:15 -0700307 * IO memory
308 */
309 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
310 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
311 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
312 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
313
314 *km_len += cfg->fwcfg.num_ioim_reqs *
315 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
316
317 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
318
Jing Huang5fbe25c2010-10-18 17:17:23 -0700319 /*
Jing Huang7725ccf2009-09-23 17:46:15 -0700320 * task management command memory
321 */
322 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
323 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
324 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
325}
326
327
328static void
329bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700330 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
Jing Huang7725ccf2009-09-23 17:46:15 -0700331{
332 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
333
334 bfa_trc(bfa, cfg->drvcfg.path_tov);
335 bfa_trc(bfa, cfg->fwcfg.num_rports);
336 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
337 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
338
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700339 fcpim->bfa = bfa;
340 fcpim->num_itnims = cfg->fwcfg.num_rports;
Jing Huang7725ccf2009-09-23 17:46:15 -0700341 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
342 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700343 fcpim->path_tov = cfg->drvcfg.path_tov;
344 fcpim->delay_comp = cfg->drvcfg.delay_comp;
345 fcpim->profile_comp = NULL;
346 fcpim->profile_start = NULL;
Jing Huang7725ccf2009-09-23 17:46:15 -0700347
348 bfa_itnim_attach(fcpim, meminfo);
349 bfa_tskim_attach(fcpim, meminfo);
350 bfa_ioim_attach(fcpim, meminfo);
351}
352
353static void
Jing Huang7725ccf2009-09-23 17:46:15 -0700354bfa_fcpim_detach(struct bfa_s *bfa)
355{
Jing Huang7725ccf2009-09-23 17:46:15 -0700356}
357
358static void
359bfa_fcpim_start(struct bfa_s *bfa)
360{
361}
362
363static void
364bfa_fcpim_stop(struct bfa_s *bfa)
365{
366}
367
368static void
369bfa_fcpim_iocdisable(struct bfa_s *bfa)
370{
371 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
372 struct bfa_itnim_s *itnim;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700373 struct list_head *qe, *qen;
Jing Huang7725ccf2009-09-23 17:46:15 -0700374
375 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
376 itnim = (struct bfa_itnim_s *) qe;
377 bfa_itnim_iocdisable(itnim);
378 }
379}
380
381void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700382bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
383 struct bfa_itnim_iostats_s *rstats)
384{
385 bfa_fcpim_add_iostats(lstats, rstats, total_ios);
386 bfa_fcpim_add_iostats(lstats, rstats, qresumes);
387 bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
388 bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
389 bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
390 bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
391 bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
392 bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
393 bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
394 bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
395 bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
396 bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
397 bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
398 bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
399 bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
400 bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
401 bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
402 bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
403 bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
404 bfa_fcpim_add_iostats(lstats, rstats, onlines);
405 bfa_fcpim_add_iostats(lstats, rstats, offlines);
406 bfa_fcpim_add_iostats(lstats, rstats, creates);
407 bfa_fcpim_add_iostats(lstats, rstats, deletes);
408 bfa_fcpim_add_iostats(lstats, rstats, create_comps);
409 bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
410 bfa_fcpim_add_iostats(lstats, rstats, sler_events);
411 bfa_fcpim_add_iostats(lstats, rstats, fw_create);
412 bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
413 bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
414 bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
415 bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
416 bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
417 bfa_fcpim_add_iostats(lstats, rstats, tm_success);
418 bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
419 bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
420 bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
421 bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
422 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
423 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
424 bfa_fcpim_add_iostats(lstats, rstats, io_comps);
425 bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
426 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
427 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
428 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
429}
430
431void
Jing Huang7725ccf2009-09-23 17:46:15 -0700432bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
433{
434 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
435
436 fcpim->path_tov = path_tov * 1000;
437 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
438 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
439}
440
441u16
442bfa_fcpim_path_tov_get(struct bfa_s *bfa)
443{
444 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
445
Jing Huangf8ceafd2009-09-25 12:29:54 -0700446 return fcpim->path_tov / 1000;
Jing Huang7725ccf2009-09-23 17:46:15 -0700447}
448
449bfa_status_t
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700450bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
451 u8 lp_tag)
452{
453 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
454 struct list_head *qe, *qen;
455 struct bfa_itnim_s *itnim;
456
457 /* accumulate IO stats from itnim */
Jing Huang6a18b162010-10-18 17:08:54 -0700458 memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700459 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
460 itnim = (struct bfa_itnim_s *) qe;
461 if (itnim->rport->rport_info.lp_tag != lp_tag)
462 continue;
463 bfa_fcpim_add_stats(stats, &(itnim->stats));
464 }
465 return BFA_STATUS_OK;
466}
467bfa_status_t
468bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
469{
470 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
471 struct list_head *qe, *qen;
472 struct bfa_itnim_s *itnim;
473
474 /* accumulate IO stats from itnim */
Jing Huang6a18b162010-10-18 17:08:54 -0700475 memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700476 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
477 itnim = (struct bfa_itnim_s *) qe;
478 bfa_fcpim_add_stats(modstats, &(itnim->stats));
479 }
480 return BFA_STATUS_OK;
481}
482
483bfa_status_t
484bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
485 struct bfa_fcpim_del_itn_stats_s *modstats)
Jing Huang7725ccf2009-09-23 17:46:15 -0700486{
487 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
488
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700489 *modstats = fcpim->del_itn_stats;
Jing Huang7725ccf2009-09-23 17:46:15 -0700490
491 return BFA_STATUS_OK;
492}
493
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700494
495bfa_status_t
496bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
497{
498 struct bfa_itnim_s *itnim;
499 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
500 struct list_head *qe, *qen;
501
502 /* accumulate IO stats from itnim */
503 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
504 itnim = (struct bfa_itnim_s *) qe;
505 bfa_itnim_clear_stats(itnim);
506 }
507 fcpim->io_profile = BFA_TRUE;
508 fcpim->io_profile_start_time = time;
509 fcpim->profile_comp = bfa_ioim_profile_comp;
510 fcpim->profile_start = bfa_ioim_profile_start;
511
512 return BFA_STATUS_OK;
513}
514bfa_status_t
515bfa_fcpim_profile_off(struct bfa_s *bfa)
516{
517 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
518 fcpim->io_profile = BFA_FALSE;
519 fcpim->io_profile_start_time = 0;
520 fcpim->profile_comp = NULL;
521 fcpim->profile_start = NULL;
522 return BFA_STATUS_OK;
523}
524
525bfa_status_t
526bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
527{
528 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
529 struct list_head *qe, *qen;
530 struct bfa_itnim_s *itnim;
531
532 /* clear IO stats from all active itnims */
533 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
534 itnim = (struct bfa_itnim_s *) qe;
535 if (itnim->rport->rport_info.lp_tag != lp_tag)
536 continue;
537 bfa_itnim_clear_stats(itnim);
538 }
539 return BFA_STATUS_OK;
540
541}
542
Jing Huang7725ccf2009-09-23 17:46:15 -0700543bfa_status_t
544bfa_fcpim_clr_modstats(struct bfa_s *bfa)
545{
546 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700547 struct list_head *qe, *qen;
548 struct bfa_itnim_s *itnim;
Jing Huang7725ccf2009-09-23 17:46:15 -0700549
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700550 /* clear IO stats from all active itnims */
551 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
552 itnim = (struct bfa_itnim_s *) qe;
553 bfa_itnim_clear_stats(itnim);
554 }
Jing Huang6a18b162010-10-18 17:08:54 -0700555 memset(&fcpim->del_itn_stats, 0,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700556 sizeof(struct bfa_fcpim_del_itn_stats_s));
Jing Huang7725ccf2009-09-23 17:46:15 -0700557
558 return BFA_STATUS_OK;
559}
560
561void
562bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth)
563{
564 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
565
566 bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX);
567
568 fcpim->q_depth = q_depth;
569}
570
571u16
572bfa_fcpim_qdepth_get(struct bfa_s *bfa)
573{
574 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
575
Jing Huangf8ceafd2009-09-25 12:29:54 -0700576 return fcpim->q_depth;
Jing Huang7725ccf2009-09-23 17:46:15 -0700577}
578
Jing Huang36d345a2010-07-08 19:57:33 -0700579void
580bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
581{
582 bfa_boolean_t ioredirect;
Jing Huang7725ccf2009-09-23 17:46:15 -0700583
Jing Huang36d345a2010-07-08 19:57:33 -0700584 /*
585 * IO redirection is turned off when QoS is enabled and vice versa
586 */
587 ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
Jing Huang36d345a2010-07-08 19:57:33 -0700588}
589
590void
591bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
592{
593 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
594 fcpim->ioredirect = state;
595}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700596
597
598
Jing Huang5fbe25c2010-10-18 17:17:23 -0700599/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700600 * BFA ITNIM module state machine functions
601 */
602
Jing Huang5fbe25c2010-10-18 17:17:23 -0700603/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700604 * Beginning/unallocated state - no events expected.
605 */
606static void
607bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
608{
609 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
610 bfa_trc(itnim->bfa, event);
611
612 switch (event) {
613 case BFA_ITNIM_SM_CREATE:
614 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
615 itnim->is_online = BFA_FALSE;
616 bfa_fcpim_additn(itnim);
617 break;
618
619 default:
620 bfa_sm_fault(itnim->bfa, event);
621 }
622}
623
Jing Huang5fbe25c2010-10-18 17:17:23 -0700624/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700625 * Beginning state, only online event expected.
626 */
627static void
628bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
629{
630 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
631 bfa_trc(itnim->bfa, event);
632
633 switch (event) {
634 case BFA_ITNIM_SM_ONLINE:
635 if (bfa_itnim_send_fwcreate(itnim))
636 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
637 else
638 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
639 break;
640
641 case BFA_ITNIM_SM_DELETE:
642 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
643 bfa_fcpim_delitn(itnim);
644 break;
645
646 case BFA_ITNIM_SM_HWFAIL:
647 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
648 break;
649
650 default:
651 bfa_sm_fault(itnim->bfa, event);
652 }
653}
654
Jing Huang5fbe25c2010-10-18 17:17:23 -0700655/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700656 * Waiting for itnim create response from firmware.
657 */
658static void
659bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
660{
661 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
662 bfa_trc(itnim->bfa, event);
663
664 switch (event) {
665 case BFA_ITNIM_SM_FWRSP:
666 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
667 itnim->is_online = BFA_TRUE;
668 bfa_itnim_iotov_online(itnim);
669 bfa_itnim_online_cb(itnim);
670 break;
671
672 case BFA_ITNIM_SM_DELETE:
673 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
674 break;
675
676 case BFA_ITNIM_SM_OFFLINE:
677 if (bfa_itnim_send_fwdelete(itnim))
678 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
679 else
680 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
681 break;
682
683 case BFA_ITNIM_SM_HWFAIL:
684 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
685 break;
686
687 default:
688 bfa_sm_fault(itnim->bfa, event);
689 }
690}
691
692static void
693bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
694 enum bfa_itnim_event event)
695{
696 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
697 bfa_trc(itnim->bfa, event);
698
699 switch (event) {
700 case BFA_ITNIM_SM_QRESUME:
701 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
702 bfa_itnim_send_fwcreate(itnim);
703 break;
704
705 case BFA_ITNIM_SM_DELETE:
706 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
707 bfa_reqq_wcancel(&itnim->reqq_wait);
708 bfa_fcpim_delitn(itnim);
709 break;
710
711 case BFA_ITNIM_SM_OFFLINE:
712 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
713 bfa_reqq_wcancel(&itnim->reqq_wait);
714 bfa_itnim_offline_cb(itnim);
715 break;
716
717 case BFA_ITNIM_SM_HWFAIL:
718 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
719 bfa_reqq_wcancel(&itnim->reqq_wait);
720 break;
721
722 default:
723 bfa_sm_fault(itnim->bfa, event);
724 }
725}
726
Jing Huang5fbe25c2010-10-18 17:17:23 -0700727/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700728 * Waiting for itnim create response from firmware, a delete is pending.
729 */
730static void
731bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
732 enum bfa_itnim_event event)
733{
734 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
735 bfa_trc(itnim->bfa, event);
736
737 switch (event) {
738 case BFA_ITNIM_SM_FWRSP:
739 if (bfa_itnim_send_fwdelete(itnim))
740 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
741 else
742 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
743 break;
744
745 case BFA_ITNIM_SM_HWFAIL:
746 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
747 bfa_fcpim_delitn(itnim);
748 break;
749
750 default:
751 bfa_sm_fault(itnim->bfa, event);
752 }
753}
754
Jing Huang5fbe25c2010-10-18 17:17:23 -0700755/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700756 * Online state - normal parking state.
757 */
758static void
759bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
760{
761 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
762 bfa_trc(itnim->bfa, event);
763
764 switch (event) {
765 case BFA_ITNIM_SM_OFFLINE:
766 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
767 itnim->is_online = BFA_FALSE;
768 bfa_itnim_iotov_start(itnim);
769 bfa_itnim_cleanup(itnim);
770 break;
771
772 case BFA_ITNIM_SM_DELETE:
773 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
774 itnim->is_online = BFA_FALSE;
775 bfa_itnim_cleanup(itnim);
776 break;
777
778 case BFA_ITNIM_SM_SLER:
779 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
780 itnim->is_online = BFA_FALSE;
781 bfa_itnim_iotov_start(itnim);
782 bfa_itnim_sler_cb(itnim);
783 break;
784
785 case BFA_ITNIM_SM_HWFAIL:
786 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
787 itnim->is_online = BFA_FALSE;
788 bfa_itnim_iotov_start(itnim);
789 bfa_itnim_iocdisable_cleanup(itnim);
790 break;
791
792 default:
793 bfa_sm_fault(itnim->bfa, event);
794 }
795}
796
Jing Huang5fbe25c2010-10-18 17:17:23 -0700797/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700798 * Second level error recovery need.
799 */
800static void
801bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
802{
803 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
804 bfa_trc(itnim->bfa, event);
805
806 switch (event) {
807 case BFA_ITNIM_SM_OFFLINE:
808 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
809 bfa_itnim_cleanup(itnim);
810 break;
811
812 case BFA_ITNIM_SM_DELETE:
813 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
814 bfa_itnim_cleanup(itnim);
815 bfa_itnim_iotov_delete(itnim);
816 break;
817
818 case BFA_ITNIM_SM_HWFAIL:
819 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
820 bfa_itnim_iocdisable_cleanup(itnim);
821 break;
822
823 default:
824 bfa_sm_fault(itnim->bfa, event);
825 }
826}
827
Jing Huang5fbe25c2010-10-18 17:17:23 -0700828/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700829 * Going offline. Waiting for active IO cleanup.
830 */
831static void
832bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
833 enum bfa_itnim_event event)
834{
835 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
836 bfa_trc(itnim->bfa, event);
837
838 switch (event) {
839 case BFA_ITNIM_SM_CLEANUP:
840 if (bfa_itnim_send_fwdelete(itnim))
841 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
842 else
843 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
844 break;
845
846 case BFA_ITNIM_SM_DELETE:
847 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
848 bfa_itnim_iotov_delete(itnim);
849 break;
850
851 case BFA_ITNIM_SM_HWFAIL:
852 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
853 bfa_itnim_iocdisable_cleanup(itnim);
854 bfa_itnim_offline_cb(itnim);
855 break;
856
857 case BFA_ITNIM_SM_SLER:
858 break;
859
860 default:
861 bfa_sm_fault(itnim->bfa, event);
862 }
863}
864
Jing Huang5fbe25c2010-10-18 17:17:23 -0700865/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700866 * Deleting itnim. Waiting for active IO cleanup.
867 */
868static void
869bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
870 enum bfa_itnim_event event)
871{
872 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
873 bfa_trc(itnim->bfa, event);
874
875 switch (event) {
876 case BFA_ITNIM_SM_CLEANUP:
877 if (bfa_itnim_send_fwdelete(itnim))
878 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
879 else
880 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
881 break;
882
883 case BFA_ITNIM_SM_HWFAIL:
884 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
885 bfa_itnim_iocdisable_cleanup(itnim);
886 break;
887
888 default:
889 bfa_sm_fault(itnim->bfa, event);
890 }
891}
892
Jing Huang5fbe25c2010-10-18 17:17:23 -0700893/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700894 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
895 */
896static void
897bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
898{
899 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
900 bfa_trc(itnim->bfa, event);
901
902 switch (event) {
903 case BFA_ITNIM_SM_FWRSP:
904 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
905 bfa_itnim_offline_cb(itnim);
906 break;
907
908 case BFA_ITNIM_SM_DELETE:
909 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
910 break;
911
912 case BFA_ITNIM_SM_HWFAIL:
913 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
914 bfa_itnim_offline_cb(itnim);
915 break;
916
917 default:
918 bfa_sm_fault(itnim->bfa, event);
919 }
920}
921
922static void
923bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
924 enum bfa_itnim_event event)
925{
926 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
927 bfa_trc(itnim->bfa, event);
928
929 switch (event) {
930 case BFA_ITNIM_SM_QRESUME:
931 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
932 bfa_itnim_send_fwdelete(itnim);
933 break;
934
935 case BFA_ITNIM_SM_DELETE:
936 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
937 break;
938
939 case BFA_ITNIM_SM_HWFAIL:
940 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
941 bfa_reqq_wcancel(&itnim->reqq_wait);
942 bfa_itnim_offline_cb(itnim);
943 break;
944
945 default:
946 bfa_sm_fault(itnim->bfa, event);
947 }
948}
949
Jing Huang5fbe25c2010-10-18 17:17:23 -0700950/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700951 * Offline state.
952 */
953static void
954bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
955{
956 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
957 bfa_trc(itnim->bfa, event);
958
959 switch (event) {
960 case BFA_ITNIM_SM_DELETE:
961 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
962 bfa_itnim_iotov_delete(itnim);
963 bfa_fcpim_delitn(itnim);
964 break;
965
966 case BFA_ITNIM_SM_ONLINE:
967 if (bfa_itnim_send_fwcreate(itnim))
968 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
969 else
970 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
971 break;
972
973 case BFA_ITNIM_SM_HWFAIL:
974 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
975 break;
976
977 default:
978 bfa_sm_fault(itnim->bfa, event);
979 }
980}
981
Jing Huang5fbe25c2010-10-18 17:17:23 -0700982/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700983 * IOC h/w failed state.
984 */
985static void
986bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
987 enum bfa_itnim_event event)
988{
989 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
990 bfa_trc(itnim->bfa, event);
991
992 switch (event) {
993 case BFA_ITNIM_SM_DELETE:
994 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
995 bfa_itnim_iotov_delete(itnim);
996 bfa_fcpim_delitn(itnim);
997 break;
998
999 case BFA_ITNIM_SM_OFFLINE:
1000 bfa_itnim_offline_cb(itnim);
1001 break;
1002
1003 case BFA_ITNIM_SM_ONLINE:
1004 if (bfa_itnim_send_fwcreate(itnim))
1005 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
1006 else
1007 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
1008 break;
1009
1010 case BFA_ITNIM_SM_HWFAIL:
1011 break;
1012
1013 default:
1014 bfa_sm_fault(itnim->bfa, event);
1015 }
1016}
1017
Jing Huang5fbe25c2010-10-18 17:17:23 -07001018/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001019 * Itnim is deleted, waiting for firmware response to delete.
1020 */
1021static void
1022bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
1023{
1024 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1025 bfa_trc(itnim->bfa, event);
1026
1027 switch (event) {
1028 case BFA_ITNIM_SM_FWRSP:
1029 case BFA_ITNIM_SM_HWFAIL:
1030 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1031 bfa_fcpim_delitn(itnim);
1032 break;
1033
1034 default:
1035 bfa_sm_fault(itnim->bfa, event);
1036 }
1037}
1038
1039static void
1040bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
1041 enum bfa_itnim_event event)
1042{
1043 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1044 bfa_trc(itnim->bfa, event);
1045
1046 switch (event) {
1047 case BFA_ITNIM_SM_QRESUME:
1048 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
1049 bfa_itnim_send_fwdelete(itnim);
1050 break;
1051
1052 case BFA_ITNIM_SM_HWFAIL:
1053 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1054 bfa_reqq_wcancel(&itnim->reqq_wait);
1055 bfa_fcpim_delitn(itnim);
1056 break;
1057
1058 default:
1059 bfa_sm_fault(itnim->bfa, event);
1060 }
1061}
1062
Jing Huang5fbe25c2010-10-18 17:17:23 -07001063/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001064 * Initiate cleanup of all IOs on an IOC failure.
1065 */
1066static void
1067bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1068{
1069 struct bfa_tskim_s *tskim;
1070 struct bfa_ioim_s *ioim;
1071 struct list_head *qe, *qen;
1072
1073 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1074 tskim = (struct bfa_tskim_s *) qe;
1075 bfa_tskim_iocdisable(tskim);
1076 }
1077
1078 list_for_each_safe(qe, qen, &itnim->io_q) {
1079 ioim = (struct bfa_ioim_s *) qe;
1080 bfa_ioim_iocdisable(ioim);
1081 }
1082
Jing Huang5fbe25c2010-10-18 17:17:23 -07001083 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001084 * For IO request in pending queue, we pretend an early timeout.
1085 */
1086 list_for_each_safe(qe, qen, &itnim->pending_q) {
1087 ioim = (struct bfa_ioim_s *) qe;
1088 bfa_ioim_tov(ioim);
1089 }
1090
1091 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
1092 ioim = (struct bfa_ioim_s *) qe;
1093 bfa_ioim_iocdisable(ioim);
1094 }
1095}
1096
Jing Huang5fbe25c2010-10-18 17:17:23 -07001097/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001098 * IO cleanup completion
1099 */
1100static void
1101bfa_itnim_cleanp_comp(void *itnim_cbarg)
1102{
1103 struct bfa_itnim_s *itnim = itnim_cbarg;
1104
1105 bfa_stats(itnim, cleanup_comps);
1106 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1107}
1108
Jing Huang5fbe25c2010-10-18 17:17:23 -07001109/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001110 * Initiate cleanup of all IOs.
1111 */
1112static void
1113bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1114{
1115 struct bfa_ioim_s *ioim;
1116 struct bfa_tskim_s *tskim;
1117 struct list_head *qe, *qen;
1118
1119 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1120
1121 list_for_each_safe(qe, qen, &itnim->io_q) {
1122 ioim = (struct bfa_ioim_s *) qe;
1123
Jing Huang5fbe25c2010-10-18 17:17:23 -07001124 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001125 * Move IO to a cleanup queue from active queue so that a later
1126 * TM will not pickup this IO.
1127 */
1128 list_del(&ioim->qe);
1129 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1130
1131 bfa_wc_up(&itnim->wc);
1132 bfa_ioim_cleanup(ioim);
1133 }
1134
1135 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1136 tskim = (struct bfa_tskim_s *) qe;
1137 bfa_wc_up(&itnim->wc);
1138 bfa_tskim_cleanup(tskim);
1139 }
1140
1141 bfa_wc_wait(&itnim->wc);
1142}
1143
1144static void
1145__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1146{
1147 struct bfa_itnim_s *itnim = cbarg;
1148
1149 if (complete)
1150 bfa_cb_itnim_online(itnim->ditn);
1151}
1152
1153static void
1154__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1155{
1156 struct bfa_itnim_s *itnim = cbarg;
1157
1158 if (complete)
1159 bfa_cb_itnim_offline(itnim->ditn);
1160}
1161
1162static void
1163__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1164{
1165 struct bfa_itnim_s *itnim = cbarg;
1166
1167 if (complete)
1168 bfa_cb_itnim_sler(itnim->ditn);
1169}
1170
Jing Huang5fbe25c2010-10-18 17:17:23 -07001171/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001172 * Call to resume any I/O requests waiting for room in request queue.
1173 */
1174static void
1175bfa_itnim_qresume(void *cbarg)
1176{
1177 struct bfa_itnim_s *itnim = cbarg;
1178
1179 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1180}
1181
1182
1183
1184
Jing Huang5fbe25c2010-10-18 17:17:23 -07001185/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001186 * bfa_itnim_public
1187 */
1188
1189void
1190bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1191{
1192 bfa_wc_down(&itnim->wc);
1193}
1194
1195void
1196bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1197{
1198 bfa_wc_down(&itnim->wc);
1199}
1200
1201void
1202bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1203 u32 *dm_len)
1204{
Jing Huang5fbe25c2010-10-18 17:17:23 -07001205 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001206 * ITN memory
1207 */
1208 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1209}
1210
1211void
1212bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1213{
1214 struct bfa_s *bfa = fcpim->bfa;
1215 struct bfa_itnim_s *itnim;
1216 int i, j;
1217
1218 INIT_LIST_HEAD(&fcpim->itnim_q);
1219
1220 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1221 fcpim->itnim_arr = itnim;
1222
1223 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
Jing Huang6a18b162010-10-18 17:08:54 -07001224 memset(itnim, 0, sizeof(struct bfa_itnim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001225 itnim->bfa = bfa;
1226 itnim->fcpim = fcpim;
1227 itnim->reqq = BFA_REQQ_QOS_LO;
1228 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1229 itnim->iotov_active = BFA_FALSE;
1230 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1231
1232 INIT_LIST_HEAD(&itnim->io_q);
1233 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1234 INIT_LIST_HEAD(&itnim->pending_q);
1235 INIT_LIST_HEAD(&itnim->tsk_q);
1236 INIT_LIST_HEAD(&itnim->delay_comp_q);
1237 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1238 itnim->ioprofile.io_latency.min[j] = ~0;
1239 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1240 }
1241
1242 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1243}
1244
1245void
1246bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1247{
1248 bfa_stats(itnim, ioc_disabled);
1249 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1250}
1251
1252static bfa_boolean_t
1253bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1254{
1255 struct bfi_itnim_create_req_s *m;
1256
1257 itnim->msg_no++;
1258
Jing Huang5fbe25c2010-10-18 17:17:23 -07001259 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001260 * check for room in queue to send request now
1261 */
1262 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1263 if (!m) {
1264 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1265 return BFA_FALSE;
1266 }
1267
1268 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1269 bfa_lpuid(itnim->bfa));
1270 m->fw_handle = itnim->rport->fw_handle;
1271 m->class = FC_CLASS_3;
1272 m->seq_rec = itnim->seq_rec;
1273 m->msg_no = itnim->msg_no;
1274 bfa_stats(itnim, fw_create);
1275
Jing Huang5fbe25c2010-10-18 17:17:23 -07001276 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001277 * queue I/O message to firmware
1278 */
1279 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1280 return BFA_TRUE;
1281}
1282
1283static bfa_boolean_t
1284bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1285{
1286 struct bfi_itnim_delete_req_s *m;
1287
Jing Huang5fbe25c2010-10-18 17:17:23 -07001288 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001289 * check for room in queue to send request now
1290 */
1291 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1292 if (!m) {
1293 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1294 return BFA_FALSE;
1295 }
1296
1297 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1298 bfa_lpuid(itnim->bfa));
1299 m->fw_handle = itnim->rport->fw_handle;
1300 bfa_stats(itnim, fw_delete);
1301
Jing Huang5fbe25c2010-10-18 17:17:23 -07001302 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001303 * queue I/O message to firmware
1304 */
1305 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1306 return BFA_TRUE;
1307}
1308
Jing Huang5fbe25c2010-10-18 17:17:23 -07001309/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001310 * Cleanup all pending failed inflight requests.
1311 */
1312static void
1313bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1314{
1315 struct bfa_ioim_s *ioim;
1316 struct list_head *qe, *qen;
1317
1318 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1319 ioim = (struct bfa_ioim_s *)qe;
1320 bfa_ioim_delayed_comp(ioim, iotov);
1321 }
1322}
1323
Jing Huang5fbe25c2010-10-18 17:17:23 -07001324/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001325 * Start all pending IO requests.
1326 */
1327static void
1328bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1329{
1330 struct bfa_ioim_s *ioim;
1331
1332 bfa_itnim_iotov_stop(itnim);
1333
Jing Huang5fbe25c2010-10-18 17:17:23 -07001334 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001335 * Abort all inflight IO requests in the queue
1336 */
1337 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1338
Jing Huang5fbe25c2010-10-18 17:17:23 -07001339 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001340 * Start all pending IO requests.
1341 */
1342 while (!list_empty(&itnim->pending_q)) {
1343 bfa_q_deq(&itnim->pending_q, &ioim);
1344 list_add_tail(&ioim->qe, &itnim->io_q);
1345 bfa_ioim_start(ioim);
1346 }
1347}
1348
Jing Huang5fbe25c2010-10-18 17:17:23 -07001349/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001350 * Fail all pending IO requests
1351 */
1352static void
1353bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1354{
1355 struct bfa_ioim_s *ioim;
1356
Jing Huang5fbe25c2010-10-18 17:17:23 -07001357 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001358 * Fail all inflight IO requests in the queue
1359 */
1360 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1361
Jing Huang5fbe25c2010-10-18 17:17:23 -07001362 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001363 * Fail any pending IO requests.
1364 */
1365 while (!list_empty(&itnim->pending_q)) {
1366 bfa_q_deq(&itnim->pending_q, &ioim);
1367 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1368 bfa_ioim_tov(ioim);
1369 }
1370}
1371
Jing Huang5fbe25c2010-10-18 17:17:23 -07001372/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001373 * IO TOV timer callback. Fail any pending IO requests.
1374 */
1375static void
1376bfa_itnim_iotov(void *itnim_arg)
1377{
1378 struct bfa_itnim_s *itnim = itnim_arg;
1379
1380 itnim->iotov_active = BFA_FALSE;
1381
1382 bfa_cb_itnim_tov_begin(itnim->ditn);
1383 bfa_itnim_iotov_cleanup(itnim);
1384 bfa_cb_itnim_tov(itnim->ditn);
1385}
1386
Jing Huang5fbe25c2010-10-18 17:17:23 -07001387/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001388 * Start IO TOV timer for failing back pending IO requests in offline state.
1389 */
1390static void
1391bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1392{
1393 if (itnim->fcpim->path_tov > 0) {
1394
1395 itnim->iotov_active = BFA_TRUE;
1396 bfa_assert(bfa_itnim_hold_io(itnim));
1397 bfa_timer_start(itnim->bfa, &itnim->timer,
1398 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1399 }
1400}
1401
Jing Huang5fbe25c2010-10-18 17:17:23 -07001402/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001403 * Stop IO TOV timer.
1404 */
1405static void
1406bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1407{
1408 if (itnim->iotov_active) {
1409 itnim->iotov_active = BFA_FALSE;
1410 bfa_timer_stop(&itnim->timer);
1411 }
1412}
1413
Jing Huang5fbe25c2010-10-18 17:17:23 -07001414/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001415 * Stop IO TOV timer.
1416 */
1417static void
1418bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1419{
1420 bfa_boolean_t pathtov_active = BFA_FALSE;
1421
1422 if (itnim->iotov_active)
1423 pathtov_active = BFA_TRUE;
1424
1425 bfa_itnim_iotov_stop(itnim);
1426 if (pathtov_active)
1427 bfa_cb_itnim_tov_begin(itnim->ditn);
1428 bfa_itnim_iotov_cleanup(itnim);
1429 if (pathtov_active)
1430 bfa_cb_itnim_tov(itnim->ditn);
1431}
1432
1433static void
1434bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1435{
1436 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1437 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1438 itnim->stats.iocomp_aborted;
1439 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1440 itnim->stats.iocomp_timedout;
1441 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1442 itnim->stats.iocom_sqer_needed;
1443 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1444 itnim->stats.iocom_res_free;
1445 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1446 itnim->stats.iocom_hostabrts;
1447 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1448 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1449 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1450}
1451
1452
1453
Jing Huang5fbe25c2010-10-18 17:17:23 -07001454/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001455 * bfa_itnim_public
1456 */
1457
Jing Huang5fbe25c2010-10-18 17:17:23 -07001458/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001459 * Itnim interrupt processing.
1460 */
1461void
1462bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1463{
1464 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1465 union bfi_itnim_i2h_msg_u msg;
1466 struct bfa_itnim_s *itnim;
1467
1468 bfa_trc(bfa, m->mhdr.msg_id);
1469
1470 msg.msg = m;
1471
1472 switch (m->mhdr.msg_id) {
1473 case BFI_ITNIM_I2H_CREATE_RSP:
1474 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1475 msg.create_rsp->bfa_handle);
1476 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
1477 bfa_stats(itnim, create_comps);
1478 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1479 break;
1480
1481 case BFI_ITNIM_I2H_DELETE_RSP:
1482 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1483 msg.delete_rsp->bfa_handle);
1484 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
1485 bfa_stats(itnim, delete_comps);
1486 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1487 break;
1488
1489 case BFI_ITNIM_I2H_SLER_EVENT:
1490 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1491 msg.sler_event->bfa_handle);
1492 bfa_stats(itnim, sler_events);
1493 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1494 break;
1495
1496 default:
1497 bfa_trc(bfa, m->mhdr.msg_id);
1498 bfa_assert(0);
1499 }
1500}
1501
1502
1503
Jing Huang5fbe25c2010-10-18 17:17:23 -07001504/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001505 * bfa_itnim_api
1506 */
1507
1508struct bfa_itnim_s *
1509bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1510{
1511 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1512 struct bfa_itnim_s *itnim;
1513
1514 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1515 bfa_assert(itnim->rport == rport);
1516
1517 itnim->ditn = ditn;
1518
1519 bfa_stats(itnim, creates);
1520 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1521
1522 return itnim;
1523}
1524
1525void
1526bfa_itnim_delete(struct bfa_itnim_s *itnim)
1527{
1528 bfa_stats(itnim, deletes);
1529 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1530}
1531
1532void
1533bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1534{
1535 itnim->seq_rec = seq_rec;
1536 bfa_stats(itnim, onlines);
1537 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1538}
1539
1540void
1541bfa_itnim_offline(struct bfa_itnim_s *itnim)
1542{
1543 bfa_stats(itnim, offlines);
1544 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1545}
1546
Jing Huang5fbe25c2010-10-18 17:17:23 -07001547/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001548 * Return true if itnim is considered offline for holding off IO request.
1549 * IO is not held if itnim is being deleted.
1550 */
1551bfa_boolean_t
1552bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1553{
1554 return itnim->fcpim->path_tov && itnim->iotov_active &&
1555 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1556 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1557 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1558 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1559 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1560 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1561}
1562
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001563void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001564bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1565{
1566 int j;
Jing Huang6a18b162010-10-18 17:08:54 -07001567 memset(&itnim->stats, 0, sizeof(itnim->stats));
1568 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001569 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1570 itnim->ioprofile.io_latency.min[j] = ~0;
1571}
1572
Jing Huang5fbe25c2010-10-18 17:17:23 -07001573/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001574 * BFA IO module state machine functions
1575 */
1576
Jing Huang5fbe25c2010-10-18 17:17:23 -07001577/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001578 * IO is not started (unallocated).
1579 */
1580static void
1581bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1582{
1583 bfa_trc_fp(ioim->bfa, ioim->iotag);
1584 bfa_trc_fp(ioim->bfa, event);
1585
1586 switch (event) {
1587 case BFA_IOIM_SM_START:
1588 if (!bfa_itnim_is_online(ioim->itnim)) {
1589 if (!bfa_itnim_hold_io(ioim->itnim)) {
1590 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1591 list_del(&ioim->qe);
1592 list_add_tail(&ioim->qe,
1593 &ioim->fcpim->ioim_comp_q);
1594 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1595 __bfa_cb_ioim_pathtov, ioim);
1596 } else {
1597 list_del(&ioim->qe);
1598 list_add_tail(&ioim->qe,
1599 &ioim->itnim->pending_q);
1600 }
1601 break;
1602 }
1603
1604 if (ioim->nsges > BFI_SGE_INLINE) {
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08001605 if (!bfa_ioim_sgpg_alloc(ioim)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001606 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1607 return;
1608 }
1609 }
1610
1611 if (!bfa_ioim_send_ioreq(ioim)) {
1612 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1613 break;
1614 }
1615
1616 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1617 break;
1618
1619 case BFA_IOIM_SM_IOTOV:
1620 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1621 bfa_ioim_move_to_comp_q(ioim);
1622 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1623 __bfa_cb_ioim_pathtov, ioim);
1624 break;
1625
1626 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001627 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001628 * IO in pending queue can get abort requests. Complete abort
1629 * requests immediately.
1630 */
1631 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1632 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1633 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1634 __bfa_cb_ioim_abort, ioim);
1635 break;
1636
1637 default:
1638 bfa_sm_fault(ioim->bfa, event);
1639 }
1640}
1641
Jing Huang5fbe25c2010-10-18 17:17:23 -07001642/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001643 * IO is waiting for SG pages.
1644 */
1645static void
1646bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1647{
1648 bfa_trc(ioim->bfa, ioim->iotag);
1649 bfa_trc(ioim->bfa, event);
1650
1651 switch (event) {
1652 case BFA_IOIM_SM_SGALLOCED:
1653 if (!bfa_ioim_send_ioreq(ioim)) {
1654 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1655 break;
1656 }
1657 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1658 break;
1659
1660 case BFA_IOIM_SM_CLEANUP:
1661 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1662 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1663 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1664 ioim);
1665 bfa_ioim_notify_cleanup(ioim);
1666 break;
1667
1668 case BFA_IOIM_SM_ABORT:
1669 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1670 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1671 bfa_ioim_move_to_comp_q(ioim);
1672 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1673 ioim);
1674 break;
1675
1676 case BFA_IOIM_SM_HWFAIL:
1677 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1678 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1679 bfa_ioim_move_to_comp_q(ioim);
1680 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1681 ioim);
1682 break;
1683
1684 default:
1685 bfa_sm_fault(ioim->bfa, event);
1686 }
1687}
1688
Jing Huang5fbe25c2010-10-18 17:17:23 -07001689/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001690 * IO is active.
1691 */
1692static void
1693bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1694{
1695 bfa_trc_fp(ioim->bfa, ioim->iotag);
1696 bfa_trc_fp(ioim->bfa, event);
1697
1698 switch (event) {
1699 case BFA_IOIM_SM_COMP_GOOD:
1700 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1701 bfa_ioim_move_to_comp_q(ioim);
1702 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1703 __bfa_cb_ioim_good_comp, ioim);
1704 break;
1705
1706 case BFA_IOIM_SM_COMP:
1707 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1708 bfa_ioim_move_to_comp_q(ioim);
1709 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1710 ioim);
1711 break;
1712
1713 case BFA_IOIM_SM_DONE:
1714 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1715 bfa_ioim_move_to_comp_q(ioim);
1716 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1717 ioim);
1718 break;
1719
1720 case BFA_IOIM_SM_ABORT:
1721 ioim->iosp->abort_explicit = BFA_TRUE;
1722 ioim->io_cbfn = __bfa_cb_ioim_abort;
1723
1724 if (bfa_ioim_send_abort(ioim))
1725 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1726 else {
1727 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1728 bfa_stats(ioim->itnim, qwait);
1729 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1730 &ioim->iosp->reqq_wait);
1731 }
1732 break;
1733
1734 case BFA_IOIM_SM_CLEANUP:
1735 ioim->iosp->abort_explicit = BFA_FALSE;
1736 ioim->io_cbfn = __bfa_cb_ioim_failed;
1737
1738 if (bfa_ioim_send_abort(ioim))
1739 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1740 else {
1741 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1742 bfa_stats(ioim->itnim, qwait);
1743 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1744 &ioim->iosp->reqq_wait);
1745 }
1746 break;
1747
1748 case BFA_IOIM_SM_HWFAIL:
1749 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1750 bfa_ioim_move_to_comp_q(ioim);
1751 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1752 ioim);
1753 break;
1754
1755 case BFA_IOIM_SM_SQRETRY:
1756 if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
1757 /* max retry completed free IO */
1758 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1759 bfa_ioim_move_to_comp_q(ioim);
1760 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1761 __bfa_cb_ioim_failed, ioim);
1762 break;
1763 }
1764 /* waiting for IO tag resource free */
1765 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1766 break;
1767
1768 default:
1769 bfa_sm_fault(ioim->bfa, event);
1770 }
1771}
1772
Jing Huang5fbe25c2010-10-18 17:17:23 -07001773/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001774* IO is retried with new tag.
1775*/
1776static void
1777bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1778{
1779 bfa_trc_fp(ioim->bfa, ioim->iotag);
1780 bfa_trc_fp(ioim->bfa, event);
1781
1782 switch (event) {
1783 case BFA_IOIM_SM_FREE:
1784 /* abts and rrq done. Now retry the IO with new tag */
1785 if (!bfa_ioim_send_ioreq(ioim)) {
1786 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1787 break;
1788 }
1789 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1790 break;
1791
1792 case BFA_IOIM_SM_CLEANUP:
1793 ioim->iosp->abort_explicit = BFA_FALSE;
1794 ioim->io_cbfn = __bfa_cb_ioim_failed;
1795
1796 if (bfa_ioim_send_abort(ioim))
1797 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1798 else {
1799 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1800 bfa_stats(ioim->itnim, qwait);
1801 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1802 &ioim->iosp->reqq_wait);
1803 }
1804 break;
1805
1806 case BFA_IOIM_SM_HWFAIL:
1807 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1808 bfa_ioim_move_to_comp_q(ioim);
1809 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1810 __bfa_cb_ioim_failed, ioim);
1811 break;
1812
1813 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001814 /* in this state IO abort is done.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001815 * Waiting for IO tag resource free.
1816 */
1817 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1818 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1819 ioim);
1820 break;
1821
1822 default:
1823 bfa_sm_fault(ioim->bfa, event);
1824 }
1825}
1826
Jing Huang5fbe25c2010-10-18 17:17:23 -07001827/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001828 * IO is being aborted, waiting for completion from firmware.
1829 */
1830static void
1831bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1832{
1833 bfa_trc(ioim->bfa, ioim->iotag);
1834 bfa_trc(ioim->bfa, event);
1835
1836 switch (event) {
1837 case BFA_IOIM_SM_COMP_GOOD:
1838 case BFA_IOIM_SM_COMP:
1839 case BFA_IOIM_SM_DONE:
1840 case BFA_IOIM_SM_FREE:
1841 break;
1842
1843 case BFA_IOIM_SM_ABORT_DONE:
1844 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1845 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1846 ioim);
1847 break;
1848
1849 case BFA_IOIM_SM_ABORT_COMP:
1850 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1851 bfa_ioim_move_to_comp_q(ioim);
1852 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1853 ioim);
1854 break;
1855
1856 case BFA_IOIM_SM_COMP_UTAG:
1857 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1858 bfa_ioim_move_to_comp_q(ioim);
1859 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1860 ioim);
1861 break;
1862
1863 case BFA_IOIM_SM_CLEANUP:
1864 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
1865 ioim->iosp->abort_explicit = BFA_FALSE;
1866
1867 if (bfa_ioim_send_abort(ioim))
1868 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1869 else {
1870 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1871 bfa_stats(ioim->itnim, qwait);
1872 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1873 &ioim->iosp->reqq_wait);
1874 }
1875 break;
1876
1877 case BFA_IOIM_SM_HWFAIL:
1878 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1879 bfa_ioim_move_to_comp_q(ioim);
1880 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1881 ioim);
1882 break;
1883
1884 default:
1885 bfa_sm_fault(ioim->bfa, event);
1886 }
1887}
1888
Jing Huang5fbe25c2010-10-18 17:17:23 -07001889/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001890 * IO is being cleaned up (implicit abort), waiting for completion from
1891 * firmware.
1892 */
1893static void
1894bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1895{
1896 bfa_trc(ioim->bfa, ioim->iotag);
1897 bfa_trc(ioim->bfa, event);
1898
1899 switch (event) {
1900 case BFA_IOIM_SM_COMP_GOOD:
1901 case BFA_IOIM_SM_COMP:
1902 case BFA_IOIM_SM_DONE:
1903 case BFA_IOIM_SM_FREE:
1904 break;
1905
1906 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001907 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001908 * IO is already being aborted implicitly
1909 */
1910 ioim->io_cbfn = __bfa_cb_ioim_abort;
1911 break;
1912
1913 case BFA_IOIM_SM_ABORT_DONE:
1914 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1915 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1916 bfa_ioim_notify_cleanup(ioim);
1917 break;
1918
1919 case BFA_IOIM_SM_ABORT_COMP:
1920 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1921 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1922 bfa_ioim_notify_cleanup(ioim);
1923 break;
1924
1925 case BFA_IOIM_SM_COMP_UTAG:
1926 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1927 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1928 bfa_ioim_notify_cleanup(ioim);
1929 break;
1930
1931 case BFA_IOIM_SM_HWFAIL:
1932 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1933 bfa_ioim_move_to_comp_q(ioim);
1934 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1935 ioim);
1936 break;
1937
1938 case BFA_IOIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001939 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001940 * IO can be in cleanup state already due to TM command.
1941 * 2nd cleanup request comes from ITN offline event.
1942 */
1943 break;
1944
1945 default:
1946 bfa_sm_fault(ioim->bfa, event);
1947 }
1948}
1949
Jing Huang5fbe25c2010-10-18 17:17:23 -07001950/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001951 * IO is waiting for room in request CQ
1952 */
1953static void
1954bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1955{
1956 bfa_trc(ioim->bfa, ioim->iotag);
1957 bfa_trc(ioim->bfa, event);
1958
1959 switch (event) {
1960 case BFA_IOIM_SM_QRESUME:
1961 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1962 bfa_ioim_send_ioreq(ioim);
1963 break;
1964
1965 case BFA_IOIM_SM_ABORT:
1966 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1967 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1968 bfa_ioim_move_to_comp_q(ioim);
1969 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1970 ioim);
1971 break;
1972
1973 case BFA_IOIM_SM_CLEANUP:
1974 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1975 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1976 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1977 ioim);
1978 bfa_ioim_notify_cleanup(ioim);
1979 break;
1980
1981 case BFA_IOIM_SM_HWFAIL:
1982 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1983 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1984 bfa_ioim_move_to_comp_q(ioim);
1985 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1986 ioim);
1987 break;
1988
1989 default:
1990 bfa_sm_fault(ioim->bfa, event);
1991 }
1992}
1993
Jing Huang5fbe25c2010-10-18 17:17:23 -07001994/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001995 * Active IO is being aborted, waiting for room in request CQ.
1996 */
1997static void
1998bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1999{
2000 bfa_trc(ioim->bfa, ioim->iotag);
2001 bfa_trc(ioim->bfa, event);
2002
2003 switch (event) {
2004 case BFA_IOIM_SM_QRESUME:
2005 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
2006 bfa_ioim_send_abort(ioim);
2007 break;
2008
2009 case BFA_IOIM_SM_CLEANUP:
2010 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
2011 ioim->iosp->abort_explicit = BFA_FALSE;
2012 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
2013 break;
2014
2015 case BFA_IOIM_SM_COMP_GOOD:
2016 case BFA_IOIM_SM_COMP:
2017 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2018 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2019 bfa_ioim_move_to_comp_q(ioim);
2020 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2021 ioim);
2022 break;
2023
2024 case BFA_IOIM_SM_DONE:
2025 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2026 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2027 bfa_ioim_move_to_comp_q(ioim);
2028 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2029 ioim);
2030 break;
2031
2032 case BFA_IOIM_SM_HWFAIL:
2033 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2034 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2035 bfa_ioim_move_to_comp_q(ioim);
2036 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2037 ioim);
2038 break;
2039
2040 default:
2041 bfa_sm_fault(ioim->bfa, event);
2042 }
2043}
2044
Jing Huang5fbe25c2010-10-18 17:17:23 -07002045/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002046 * Active IO is being cleaned up, waiting for room in request CQ.
2047 */
2048static void
2049bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2050{
2051 bfa_trc(ioim->bfa, ioim->iotag);
2052 bfa_trc(ioim->bfa, event);
2053
2054 switch (event) {
2055 case BFA_IOIM_SM_QRESUME:
2056 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
2057 bfa_ioim_send_abort(ioim);
2058 break;
2059
2060 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002061 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002062 * IO is alraedy being cleaned up implicitly
2063 */
2064 ioim->io_cbfn = __bfa_cb_ioim_abort;
2065 break;
2066
2067 case BFA_IOIM_SM_COMP_GOOD:
2068 case BFA_IOIM_SM_COMP:
2069 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2070 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2071 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2072 bfa_ioim_notify_cleanup(ioim);
2073 break;
2074
2075 case BFA_IOIM_SM_DONE:
2076 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2077 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2078 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2079 bfa_ioim_notify_cleanup(ioim);
2080 break;
2081
2082 case BFA_IOIM_SM_HWFAIL:
2083 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2084 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2085 bfa_ioim_move_to_comp_q(ioim);
2086 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2087 ioim);
2088 break;
2089
2090 default:
2091 bfa_sm_fault(ioim->bfa, event);
2092 }
2093}
2094
Jing Huang5fbe25c2010-10-18 17:17:23 -07002095/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002096 * IO bfa callback is pending.
2097 */
2098static void
2099bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2100{
2101 bfa_trc_fp(ioim->bfa, ioim->iotag);
2102 bfa_trc_fp(ioim->bfa, event);
2103
2104 switch (event) {
2105 case BFA_IOIM_SM_HCB:
2106 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2107 bfa_ioim_free(ioim);
2108 break;
2109
2110 case BFA_IOIM_SM_CLEANUP:
2111 bfa_ioim_notify_cleanup(ioim);
2112 break;
2113
2114 case BFA_IOIM_SM_HWFAIL:
2115 break;
2116
2117 default:
2118 bfa_sm_fault(ioim->bfa, event);
2119 }
2120}
2121
Jing Huang5fbe25c2010-10-18 17:17:23 -07002122/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002123 * IO bfa callback is pending. IO resource cannot be freed.
2124 */
2125static void
2126bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2127{
2128 bfa_trc(ioim->bfa, ioim->iotag);
2129 bfa_trc(ioim->bfa, event);
2130
2131 switch (event) {
2132 case BFA_IOIM_SM_HCB:
2133 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2134 list_del(&ioim->qe);
2135 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2136 break;
2137
2138 case BFA_IOIM_SM_FREE:
2139 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2140 break;
2141
2142 case BFA_IOIM_SM_CLEANUP:
2143 bfa_ioim_notify_cleanup(ioim);
2144 break;
2145
2146 case BFA_IOIM_SM_HWFAIL:
2147 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2148 break;
2149
2150 default:
2151 bfa_sm_fault(ioim->bfa, event);
2152 }
2153}
2154
Jing Huang5fbe25c2010-10-18 17:17:23 -07002155/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002156 * IO is completed, waiting resource free from firmware.
2157 */
2158static void
2159bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2160{
2161 bfa_trc(ioim->bfa, ioim->iotag);
2162 bfa_trc(ioim->bfa, event);
2163
2164 switch (event) {
2165 case BFA_IOIM_SM_FREE:
2166 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2167 bfa_ioim_free(ioim);
2168 break;
2169
2170 case BFA_IOIM_SM_CLEANUP:
2171 bfa_ioim_notify_cleanup(ioim);
2172 break;
2173
2174 case BFA_IOIM_SM_HWFAIL:
2175 break;
2176
2177 default:
2178 bfa_sm_fault(ioim->bfa, event);
2179 }
2180}
2181
2182
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002183static void
2184__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2185{
2186 struct bfa_ioim_s *ioim = cbarg;
2187
2188 if (!complete) {
2189 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2190 return;
2191 }
2192
2193 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2194}
2195
2196static void
2197__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2198{
2199 struct bfa_ioim_s *ioim = cbarg;
2200 struct bfi_ioim_rsp_s *m;
2201 u8 *snsinfo = NULL;
2202 u8 sns_len = 0;
2203 s32 residue = 0;
2204
2205 if (!complete) {
2206 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2207 return;
2208 }
2209
2210 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2211 if (m->io_status == BFI_IOIM_STS_OK) {
Jing Huang5fbe25c2010-10-18 17:17:23 -07002212 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002213 * setup sense information, if present
2214 */
2215 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2216 m->sns_len) {
2217 sns_len = m->sns_len;
2218 snsinfo = ioim->iosp->snsinfo;
2219 }
2220
Jing Huang5fbe25c2010-10-18 17:17:23 -07002221 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002222 * setup residue value correctly for normal completions
2223 */
2224 if (m->resid_flags == FCP_RESID_UNDER) {
Jing Huangba816ea2010-10-18 17:10:50 -07002225 residue = be32_to_cpu(m->residue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002226 bfa_stats(ioim->itnim, iocomp_underrun);
2227 }
2228 if (m->resid_flags == FCP_RESID_OVER) {
Jing Huangba816ea2010-10-18 17:10:50 -07002229 residue = be32_to_cpu(m->residue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002230 residue = -residue;
2231 bfa_stats(ioim->itnim, iocomp_overrun);
2232 }
2233 }
2234
2235 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2236 m->scsi_status, sns_len, snsinfo, residue);
2237}
2238
2239static void
2240__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2241{
2242 struct bfa_ioim_s *ioim = cbarg;
2243
2244 if (!complete) {
2245 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2246 return;
2247 }
2248
2249 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2250 0, 0, NULL, 0);
2251}
2252
2253static void
2254__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2255{
2256 struct bfa_ioim_s *ioim = cbarg;
2257
2258 bfa_stats(ioim->itnim, path_tov_expired);
2259 if (!complete) {
2260 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2261 return;
2262 }
2263
2264 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2265 0, 0, NULL, 0);
2266}
2267
2268static void
2269__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2270{
2271 struct bfa_ioim_s *ioim = cbarg;
2272
2273 if (!complete) {
2274 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2275 return;
2276 }
2277
2278 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2279}
2280
2281static void
2282bfa_ioim_sgpg_alloced(void *cbarg)
2283{
2284 struct bfa_ioim_s *ioim = cbarg;
2285
2286 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2287 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002288 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002289 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2290}
2291
Jing Huang5fbe25c2010-10-18 17:17:23 -07002292/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002293 * Send I/O request to firmware.
2294 */
2295static bfa_boolean_t
2296bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2297{
2298 struct bfa_itnim_s *itnim = ioim->itnim;
2299 struct bfi_ioim_req_s *m;
Maggie Zhangf3148782010-12-09 19:11:39 -08002300 static struct fcp_cmnd_s cmnd_z0 = {{{0}}};
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002301 struct bfi_sge_s *sge, *sgpge;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002302 u32 pgdlen = 0;
2303 u32 fcp_dl;
2304 u64 addr;
2305 struct scatterlist *sg;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002306 struct bfa_sgpg_s *sgpg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002307 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002308 u32 i, sge_id, pgcumsz;
Maggie Zhangf3148782010-12-09 19:11:39 -08002309 enum dma_data_direction dmadir;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002310
Jing Huang5fbe25c2010-10-18 17:17:23 -07002311 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002312 * check for room in queue to send request now
2313 */
2314 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2315 if (!m) {
2316 bfa_stats(ioim->itnim, qwait);
2317 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2318 &ioim->iosp->reqq_wait);
2319 return BFA_FALSE;
2320 }
2321
Jing Huang5fbe25c2010-10-18 17:17:23 -07002322 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002323 * build i/o request message next
2324 */
Jing Huangba816ea2010-10-18 17:10:50 -07002325 m->io_tag = cpu_to_be16(ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002326 m->rport_hdl = ioim->itnim->rport->fw_handle;
Maggie Zhangf3148782010-12-09 19:11:39 -08002327 m->io_timeout = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002328
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002329 sge = &m->sges[0];
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002330 sgpg = ioim->sgpg;
2331 sge_id = 0;
2332 sgpge = NULL;
2333 pgcumsz = 0;
2334 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2335 if (i == 0) {
2336 /* build inline IO SG element */
Maggie Zhangf16a1752010-12-09 19:12:32 -08002337 addr = bfa_sgaddr_le(sg_dma_address(sg));
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002338 sge->sga = *(union bfi_addr_u *) &addr;
2339 pgdlen = sg_dma_len(sg);
2340 sge->sg_len = pgdlen;
2341 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002342 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002343 bfa_sge_to_be(sge);
2344 sge++;
2345 } else {
2346 if (sge_id == 0)
2347 sgpge = sgpg->sgpg->sges;
2348
Maggie Zhangf16a1752010-12-09 19:12:32 -08002349 addr = bfa_sgaddr_le(sg_dma_address(sg));
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002350 sgpge->sga = *(union bfi_addr_u *) &addr;
2351 sgpge->sg_len = sg_dma_len(sg);
2352 pgcumsz += sgpge->sg_len;
2353
2354 /* set flags */
2355 if (i < (ioim->nsges - 1) &&
2356 sge_id < (BFI_SGPG_DATA_SGES - 1))
2357 sgpge->flags = BFI_SGE_DATA;
2358 else if (i < (ioim->nsges - 1))
2359 sgpge->flags = BFI_SGE_DATA_CPL;
2360 else
2361 sgpge->flags = BFI_SGE_DATA_LAST;
2362
2363 bfa_sge_to_le(sgpge);
2364
2365 sgpge++;
2366 if (i == (ioim->nsges - 1)) {
2367 sgpge->flags = BFI_SGE_PGDLEN;
2368 sgpge->sga.a32.addr_lo = 0;
2369 sgpge->sga.a32.addr_hi = 0;
2370 sgpge->sg_len = pgcumsz;
2371 bfa_sge_to_le(sgpge);
2372 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2373 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2374 sgpge->flags = BFI_SGE_LINK;
2375 sgpge->sga = sgpg->sgpg_pa;
2376 sgpge->sg_len = pgcumsz;
2377 bfa_sge_to_le(sgpge);
2378 sge_id = 0;
2379 pgcumsz = 0;
2380 }
2381 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002382 }
2383
2384 if (ioim->nsges > BFI_SGE_INLINE) {
2385 sge->sga = ioim->sgpg->sgpg_pa;
2386 } else {
2387 sge->sga.a32.addr_lo = 0;
2388 sge->sga.a32.addr_hi = 0;
2389 }
2390 sge->sg_len = pgdlen;
2391 sge->flags = BFI_SGE_PGDLEN;
2392 bfa_sge_to_be(sge);
2393
Jing Huang5fbe25c2010-10-18 17:17:23 -07002394 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002395 * set up I/O command parameters
2396 */
Jing Huang6a18b162010-10-18 17:08:54 -07002397 m->cmnd = cmnd_z0;
Maggie Zhangf3148782010-12-09 19:11:39 -08002398 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2399 dmadir = cmnd->sc_data_direction;
2400 if (dmadir == DMA_TO_DEVICE)
2401 m->cmnd.iodir = FCP_IODIR_WRITE;
2402 else if (dmadir == DMA_FROM_DEVICE)
2403 m->cmnd.iodir = FCP_IODIR_READ;
2404 else
2405 m->cmnd.iodir = FCP_IODIR_NONE;
2406
2407 m->cmnd.cdb = *(scsi_cdb_t *) cmnd->cmnd;
2408 fcp_dl = scsi_bufflen(cmnd);
Jing Huangba816ea2010-10-18 17:10:50 -07002409 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002410
Jing Huang5fbe25c2010-10-18 17:17:23 -07002411 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002412 * set up I/O message header
2413 */
2414 switch (m->cmnd.iodir) {
2415 case FCP_IODIR_READ:
2416 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2417 bfa_stats(itnim, input_reqs);
2418 ioim->itnim->stats.rd_throughput += fcp_dl;
2419 break;
2420 case FCP_IODIR_WRITE:
2421 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2422 bfa_stats(itnim, output_reqs);
2423 ioim->itnim->stats.wr_throughput += fcp_dl;
2424 break;
2425 case FCP_IODIR_RW:
2426 bfa_stats(itnim, input_reqs);
2427 bfa_stats(itnim, output_reqs);
2428 default:
2429 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2430 }
2431 if (itnim->seq_rec ||
Maggie Zhangf3148782010-12-09 19:11:39 -08002432 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002433 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2434
Jing Huang5fbe25c2010-10-18 17:17:23 -07002435 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002436 * queue I/O message to firmware
2437 */
2438 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2439 return BFA_TRUE;
2440}
2441
Jing Huang5fbe25c2010-10-18 17:17:23 -07002442/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002443 * Setup any additional SG pages needed.Inline SG element is setup
2444 * at queuing time.
2445 */
2446static bfa_boolean_t
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002447bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002448{
2449 u16 nsgpgs;
2450
2451 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
2452
Jing Huang5fbe25c2010-10-18 17:17:23 -07002453 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002454 * allocate SG pages needed
2455 */
2456 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2457 if (!nsgpgs)
2458 return BFA_TRUE;
2459
2460 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2461 != BFA_STATUS_OK) {
2462 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2463 return BFA_FALSE;
2464 }
2465
2466 ioim->nsgpgs = nsgpgs;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002467 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002468
2469 return BFA_TRUE;
2470}
2471
Jing Huang5fbe25c2010-10-18 17:17:23 -07002472/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002473 * Send I/O abort request to firmware.
2474 */
2475static bfa_boolean_t
2476bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2477{
2478 struct bfi_ioim_abort_req_s *m;
2479 enum bfi_ioim_h2i msgop;
2480
Jing Huang5fbe25c2010-10-18 17:17:23 -07002481 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002482 * check for room in queue to send request now
2483 */
2484 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2485 if (!m)
2486 return BFA_FALSE;
2487
Jing Huang5fbe25c2010-10-18 17:17:23 -07002488 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002489 * build i/o request message next
2490 */
2491 if (ioim->iosp->abort_explicit)
2492 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2493 else
2494 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2495
2496 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
Jing Huangba816ea2010-10-18 17:10:50 -07002497 m->io_tag = cpu_to_be16(ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002498 m->abort_tag = ++ioim->abort_tag;
2499
Jing Huang5fbe25c2010-10-18 17:17:23 -07002500 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002501 * queue I/O message to firmware
2502 */
2503 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2504 return BFA_TRUE;
2505}
2506
Jing Huang5fbe25c2010-10-18 17:17:23 -07002507/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002508 * Call to resume any I/O requests waiting for room in request queue.
2509 */
2510static void
2511bfa_ioim_qresume(void *cbarg)
2512{
2513 struct bfa_ioim_s *ioim = cbarg;
2514
2515 bfa_stats(ioim->itnim, qresumes);
2516 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2517}
2518
2519
2520static void
2521bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2522{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002523 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002524 * Move IO from itnim queue to fcpim global queue since itnim will be
2525 * freed.
2526 */
2527 list_del(&ioim->qe);
2528 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2529
2530 if (!ioim->iosp->tskim) {
2531 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2532 bfa_cb_dequeue(&ioim->hcb_qe);
2533 list_del(&ioim->qe);
2534 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2535 }
2536 bfa_itnim_iodone(ioim->itnim);
2537 } else
Maggie Zhangf7f738122010-12-09 19:08:43 -08002538 bfa_wc_down(&ioim->iosp->tskim->wc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002539}
2540
2541static bfa_boolean_t
2542bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2543{
2544 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2545 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2546 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2547 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2548 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2549 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2550 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2551 return BFA_FALSE;
2552
2553 return BFA_TRUE;
2554}
2555
Jing Huang5fbe25c2010-10-18 17:17:23 -07002556/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002557 * or after the link comes back.
2558 */
2559void
2560bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2561{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002562 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002563 * If path tov timer expired, failback with PATHTOV status - these
2564 * IO requests are not normally retried by IO stack.
2565 *
2566 * Otherwise device cameback online and fail it with normal failed
2567 * status so that IO stack retries these failed IO requests.
2568 */
2569 if (iotov)
2570 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2571 else {
2572 ioim->io_cbfn = __bfa_cb_ioim_failed;
2573 bfa_stats(ioim->itnim, iocom_nexus_abort);
2574 }
2575 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2576
Jing Huang5fbe25c2010-10-18 17:17:23 -07002577 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002578 * Move IO to fcpim global queue since itnim will be
2579 * freed.
2580 */
2581 list_del(&ioim->qe);
2582 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2583}
2584
2585
Jing Huang5fbe25c2010-10-18 17:17:23 -07002586/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002587 * Memory allocation and initialization.
2588 */
2589void
2590bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2591{
2592 struct bfa_ioim_s *ioim;
2593 struct bfa_ioim_sp_s *iosp;
2594 u16 i;
2595 u8 *snsinfo;
2596 u32 snsbufsz;
2597
Jing Huang5fbe25c2010-10-18 17:17:23 -07002598 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002599 * claim memory first
2600 */
2601 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2602 fcpim->ioim_arr = ioim;
2603 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2604
2605 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2606 fcpim->ioim_sp_arr = iosp;
2607 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2608
Jing Huang5fbe25c2010-10-18 17:17:23 -07002609 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002610 * Claim DMA memory for per IO sense data.
2611 */
2612 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2613 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2614 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2615
2616 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2617 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2618 snsinfo = fcpim->snsbase.kva;
2619 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2620
Jing Huang5fbe25c2010-10-18 17:17:23 -07002621 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002622 * Initialize ioim free queues
2623 */
2624 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2625 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2626 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2627
2628 for (i = 0; i < fcpim->num_ioim_reqs;
2629 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2630 /*
2631 * initialize IOIM
2632 */
Jing Huang6a18b162010-10-18 17:08:54 -07002633 memset(ioim, 0, sizeof(struct bfa_ioim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002634 ioim->iotag = i;
2635 ioim->bfa = fcpim->bfa;
2636 ioim->fcpim = fcpim;
2637 ioim->iosp = iosp;
2638 iosp->snsinfo = snsinfo;
2639 INIT_LIST_HEAD(&ioim->sgpg_q);
2640 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2641 bfa_ioim_qresume, ioim);
2642 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2643 bfa_ioim_sgpg_alloced, ioim);
2644 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2645
2646 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2647 }
2648}
2649
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002650void
2651bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2652{
2653 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2654 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2655 struct bfa_ioim_s *ioim;
2656 u16 iotag;
2657 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2658
Jing Huangba816ea2010-10-18 17:10:50 -07002659 iotag = be16_to_cpu(rsp->io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002660
2661 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2662 bfa_assert(ioim->iotag == iotag);
2663
2664 bfa_trc(ioim->bfa, ioim->iotag);
2665 bfa_trc(ioim->bfa, rsp->io_status);
2666 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2667
2668 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
Jing Huang6a18b162010-10-18 17:08:54 -07002669 ioim->iosp->comp_rspmsg = *m;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002670
2671 switch (rsp->io_status) {
2672 case BFI_IOIM_STS_OK:
2673 bfa_stats(ioim->itnim, iocomp_ok);
2674 if (rsp->reuse_io_tag == 0)
2675 evt = BFA_IOIM_SM_DONE;
2676 else
2677 evt = BFA_IOIM_SM_COMP;
2678 break;
2679
2680 case BFI_IOIM_STS_TIMEDOUT:
2681 bfa_stats(ioim->itnim, iocomp_timedout);
2682 case BFI_IOIM_STS_ABORTED:
2683 rsp->io_status = BFI_IOIM_STS_ABORTED;
2684 bfa_stats(ioim->itnim, iocomp_aborted);
2685 if (rsp->reuse_io_tag == 0)
2686 evt = BFA_IOIM_SM_DONE;
2687 else
2688 evt = BFA_IOIM_SM_COMP;
2689 break;
2690
2691 case BFI_IOIM_STS_PROTO_ERR:
2692 bfa_stats(ioim->itnim, iocom_proto_err);
2693 bfa_assert(rsp->reuse_io_tag);
2694 evt = BFA_IOIM_SM_COMP;
2695 break;
2696
2697 case BFI_IOIM_STS_SQER_NEEDED:
2698 bfa_stats(ioim->itnim, iocom_sqer_needed);
2699 bfa_assert(rsp->reuse_io_tag == 0);
2700 evt = BFA_IOIM_SM_SQRETRY;
2701 break;
2702
2703 case BFI_IOIM_STS_RES_FREE:
2704 bfa_stats(ioim->itnim, iocom_res_free);
2705 evt = BFA_IOIM_SM_FREE;
2706 break;
2707
2708 case BFI_IOIM_STS_HOST_ABORTED:
2709 bfa_stats(ioim->itnim, iocom_hostabrts);
2710 if (rsp->abort_tag != ioim->abort_tag) {
2711 bfa_trc(ioim->bfa, rsp->abort_tag);
2712 bfa_trc(ioim->bfa, ioim->abort_tag);
2713 return;
2714 }
2715
2716 if (rsp->reuse_io_tag)
2717 evt = BFA_IOIM_SM_ABORT_COMP;
2718 else
2719 evt = BFA_IOIM_SM_ABORT_DONE;
2720 break;
2721
2722 case BFI_IOIM_STS_UTAG:
2723 bfa_stats(ioim->itnim, iocom_utags);
2724 evt = BFA_IOIM_SM_COMP_UTAG;
2725 break;
2726
2727 default:
2728 bfa_assert(0);
2729 }
2730
2731 bfa_sm_send_event(ioim, evt);
2732}
2733
2734void
2735bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2736{
2737 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2738 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2739 struct bfa_ioim_s *ioim;
2740 u16 iotag;
2741
Jing Huangba816ea2010-10-18 17:10:50 -07002742 iotag = be16_to_cpu(rsp->io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002743
2744 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2745 bfa_assert(ioim->iotag == iotag);
2746
2747 bfa_trc_fp(ioim->bfa, ioim->iotag);
2748 bfa_ioim_cb_profile_comp(fcpim, ioim);
2749
2750 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2751}
2752
2753void
2754bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
2755{
Jing Huang6a18b162010-10-18 17:08:54 -07002756 ioim->start_time = jiffies;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002757}
2758
2759void
2760bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2761{
Maggie Zhangf3148782010-12-09 19:11:39 -08002762 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
2763 u32 fcp_dl = scsi_bufflen(cmnd);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002764 u32 index = bfa_ioim_get_index(fcp_dl);
Jing Huang6a18b162010-10-18 17:08:54 -07002765 u64 end_time = jiffies;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002766 struct bfa_itnim_latency_s *io_lat =
2767 &(ioim->itnim->ioprofile.io_latency);
2768 u32 val = (u32)(end_time - ioim->start_time);
2769
2770 bfa_itnim_ioprofile_update(ioim->itnim, index);
2771
2772 io_lat->count[index]++;
2773 io_lat->min[index] = (io_lat->min[index] < val) ?
2774 io_lat->min[index] : val;
2775 io_lat->max[index] = (io_lat->max[index] > val) ?
2776 io_lat->max[index] : val;
2777 io_lat->avg[index] += val;
2778}
Jing Huang5fbe25c2010-10-18 17:17:23 -07002779/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002780 * Called by itnim to clean up IO while going offline.
2781 */
2782void
2783bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2784{
2785 bfa_trc(ioim->bfa, ioim->iotag);
2786 bfa_stats(ioim->itnim, io_cleanups);
2787
2788 ioim->iosp->tskim = NULL;
2789 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2790}
2791
2792void
2793bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2794{
2795 bfa_trc(ioim->bfa, ioim->iotag);
2796 bfa_stats(ioim->itnim, io_tmaborts);
2797
2798 ioim->iosp->tskim = tskim;
2799 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2800}
2801
Jing Huang5fbe25c2010-10-18 17:17:23 -07002802/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002803 * IOC failure handling.
2804 */
2805void
2806bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2807{
2808 bfa_trc(ioim->bfa, ioim->iotag);
2809 bfa_stats(ioim->itnim, io_iocdowns);
2810 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2811}
2812
Jing Huang5fbe25c2010-10-18 17:17:23 -07002813/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002814 * IO offline TOV popped. Fail the pending IO.
2815 */
2816void
2817bfa_ioim_tov(struct bfa_ioim_s *ioim)
2818{
2819 bfa_trc(ioim->bfa, ioim->iotag);
2820 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2821}
2822
2823
Jing Huang5fbe25c2010-10-18 17:17:23 -07002824/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002825 * Allocate IOIM resource for initiator mode I/O request.
2826 */
2827struct bfa_ioim_s *
2828bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2829 struct bfa_itnim_s *itnim, u16 nsges)
2830{
2831 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2832 struct bfa_ioim_s *ioim;
2833
Jing Huang5fbe25c2010-10-18 17:17:23 -07002834 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002835 * alocate IOIM resource
2836 */
2837 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2838 if (!ioim) {
2839 bfa_stats(itnim, no_iotags);
2840 return NULL;
2841 }
2842
2843 ioim->dio = dio;
2844 ioim->itnim = itnim;
2845 ioim->nsges = nsges;
2846 ioim->nsgpgs = 0;
2847
2848 bfa_stats(itnim, total_ios);
2849 fcpim->ios_active++;
2850
2851 list_add_tail(&ioim->qe, &itnim->io_q);
2852 bfa_trc_fp(ioim->bfa, ioim->iotag);
2853
2854 return ioim;
2855}
2856
2857void
2858bfa_ioim_free(struct bfa_ioim_s *ioim)
2859{
2860 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2861
2862 bfa_trc_fp(ioim->bfa, ioim->iotag);
2863 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2864
2865 bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2866 (ioim->nsges > BFI_SGE_INLINE));
2867
2868 if (ioim->nsgpgs > 0)
2869 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2870
2871 bfa_stats(ioim->itnim, io_comps);
2872 fcpim->ios_active--;
2873
2874 list_del(&ioim->qe);
2875 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2876}
2877
2878void
2879bfa_ioim_start(struct bfa_ioim_s *ioim)
2880{
2881 bfa_trc_fp(ioim->bfa, ioim->iotag);
2882
2883 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2884
Jing Huang5fbe25c2010-10-18 17:17:23 -07002885 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002886 * Obtain the queue over which this request has to be issued
2887 */
2888 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
Maggie Zhangf3148782010-12-09 19:11:39 -08002889 BFA_FALSE : bfa_itnim_get_reqq(ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002890
2891 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2892}
2893
Jing Huang5fbe25c2010-10-18 17:17:23 -07002894/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002895 * Driver I/O abort request.
2896 */
2897bfa_status_t
2898bfa_ioim_abort(struct bfa_ioim_s *ioim)
2899{
2900
2901 bfa_trc(ioim->bfa, ioim->iotag);
2902
2903 if (!bfa_ioim_is_abortable(ioim))
2904 return BFA_STATUS_FAILED;
2905
2906 bfa_stats(ioim->itnim, io_aborts);
2907 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2908
2909 return BFA_STATUS_OK;
2910}
2911
2912
Jing Huang5fbe25c2010-10-18 17:17:23 -07002913/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002914 * BFA TSKIM state machine functions
2915 */
2916
Jing Huang5fbe25c2010-10-18 17:17:23 -07002917/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002918 * Task management command beginning state.
2919 */
2920static void
2921bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2922{
2923 bfa_trc(tskim->bfa, event);
2924
2925 switch (event) {
2926 case BFA_TSKIM_SM_START:
2927 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2928 bfa_tskim_gather_ios(tskim);
2929
Jing Huang5fbe25c2010-10-18 17:17:23 -07002930 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002931 * If device is offline, do not send TM on wire. Just cleanup
2932 * any pending IO requests and complete TM request.
2933 */
2934 if (!bfa_itnim_is_online(tskim->itnim)) {
2935 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2936 tskim->tsk_status = BFI_TSKIM_STS_OK;
2937 bfa_tskim_cleanup_ios(tskim);
2938 return;
2939 }
2940
2941 if (!bfa_tskim_send(tskim)) {
2942 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2943 bfa_stats(tskim->itnim, tm_qwait);
2944 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2945 &tskim->reqq_wait);
2946 }
2947 break;
2948
2949 default:
2950 bfa_sm_fault(tskim->bfa, event);
2951 }
2952}
2953
Jing Huang5fbe25c2010-10-18 17:17:23 -07002954/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002955 * brief
2956 * TM command is active, awaiting completion from firmware to
2957 * cleanup IO requests in TM scope.
2958 */
2959static void
2960bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2961{
2962 bfa_trc(tskim->bfa, event);
2963
2964 switch (event) {
2965 case BFA_TSKIM_SM_DONE:
2966 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2967 bfa_tskim_cleanup_ios(tskim);
2968 break;
2969
2970 case BFA_TSKIM_SM_CLEANUP:
2971 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2972 if (!bfa_tskim_send_abort(tskim)) {
2973 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
2974 bfa_stats(tskim->itnim, tm_qwait);
2975 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2976 &tskim->reqq_wait);
2977 }
2978 break;
2979
2980 case BFA_TSKIM_SM_HWFAIL:
2981 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2982 bfa_tskim_iocdisable_ios(tskim);
2983 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2984 break;
2985
2986 default:
2987 bfa_sm_fault(tskim->bfa, event);
2988 }
2989}
2990
Jing Huang5fbe25c2010-10-18 17:17:23 -07002991/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002992 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
2993 * completion event from firmware.
2994 */
2995static void
2996bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2997{
2998 bfa_trc(tskim->bfa, event);
2999
3000 switch (event) {
3001 case BFA_TSKIM_SM_DONE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07003002 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003003 * Ignore and wait for ABORT completion from firmware.
3004 */
3005 break;
3006
3007 case BFA_TSKIM_SM_CLEANUP_DONE:
3008 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3009 bfa_tskim_cleanup_ios(tskim);
3010 break;
3011
3012 case BFA_TSKIM_SM_HWFAIL:
3013 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3014 bfa_tskim_iocdisable_ios(tskim);
3015 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3016 break;
3017
3018 default:
3019 bfa_sm_fault(tskim->bfa, event);
3020 }
3021}
3022
3023static void
3024bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3025{
3026 bfa_trc(tskim->bfa, event);
3027
3028 switch (event) {
3029 case BFA_TSKIM_SM_IOS_DONE:
3030 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3031 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3032 break;
3033
3034 case BFA_TSKIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07003035 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003036 * Ignore, TM command completed on wire.
3037 * Notify TM conmpletion on IO cleanup completion.
3038 */
3039 break;
3040
3041 case BFA_TSKIM_SM_HWFAIL:
3042 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3043 bfa_tskim_iocdisable_ios(tskim);
3044 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3045 break;
3046
3047 default:
3048 bfa_sm_fault(tskim->bfa, event);
3049 }
3050}
3051
Jing Huang5fbe25c2010-10-18 17:17:23 -07003052/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003053 * Task management command is waiting for room in request CQ
3054 */
3055static void
3056bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3057{
3058 bfa_trc(tskim->bfa, event);
3059
3060 switch (event) {
3061 case BFA_TSKIM_SM_QRESUME:
3062 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3063 bfa_tskim_send(tskim);
3064 break;
3065
3066 case BFA_TSKIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07003067 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003068 * No need to send TM on wire since ITN is offline.
3069 */
3070 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3071 bfa_reqq_wcancel(&tskim->reqq_wait);
3072 bfa_tskim_cleanup_ios(tskim);
3073 break;
3074
3075 case BFA_TSKIM_SM_HWFAIL:
3076 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3077 bfa_reqq_wcancel(&tskim->reqq_wait);
3078 bfa_tskim_iocdisable_ios(tskim);
3079 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3080 break;
3081
3082 default:
3083 bfa_sm_fault(tskim->bfa, event);
3084 }
3085}
3086
Jing Huang5fbe25c2010-10-18 17:17:23 -07003087/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003088 * Task management command is active, awaiting for room in request CQ
3089 * to send clean up request.
3090 */
3091static void
3092bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3093 enum bfa_tskim_event event)
3094{
3095 bfa_trc(tskim->bfa, event);
3096
3097 switch (event) {
3098 case BFA_TSKIM_SM_DONE:
3099 bfa_reqq_wcancel(&tskim->reqq_wait);
Jing Huang5fbe25c2010-10-18 17:17:23 -07003100 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003101 *
3102 * Fall through !!!
3103 */
3104
3105 case BFA_TSKIM_SM_QRESUME:
3106 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3107 bfa_tskim_send_abort(tskim);
3108 break;
3109
3110 case BFA_TSKIM_SM_HWFAIL:
3111 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3112 bfa_reqq_wcancel(&tskim->reqq_wait);
3113 bfa_tskim_iocdisable_ios(tskim);
3114 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3115 break;
3116
3117 default:
3118 bfa_sm_fault(tskim->bfa, event);
3119 }
3120}
3121
Jing Huang5fbe25c2010-10-18 17:17:23 -07003122/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003123 * BFA callback is pending
3124 */
3125static void
3126bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3127{
3128 bfa_trc(tskim->bfa, event);
3129
3130 switch (event) {
3131 case BFA_TSKIM_SM_HCB:
3132 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3133 bfa_tskim_free(tskim);
3134 break;
3135
3136 case BFA_TSKIM_SM_CLEANUP:
3137 bfa_tskim_notify_comp(tskim);
3138 break;
3139
3140 case BFA_TSKIM_SM_HWFAIL:
3141 break;
3142
3143 default:
3144 bfa_sm_fault(tskim->bfa, event);
3145 }
3146}
3147
3148
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003149static void
3150__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3151{
3152 struct bfa_tskim_s *tskim = cbarg;
3153
3154 if (!complete) {
3155 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3156 return;
3157 }
3158
3159 bfa_stats(tskim->itnim, tm_success);
3160 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3161}
3162
3163static void
3164__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3165{
3166 struct bfa_tskim_s *tskim = cbarg;
3167
3168 if (!complete) {
3169 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3170 return;
3171 }
3172
3173 bfa_stats(tskim->itnim, tm_failures);
3174 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3175 BFI_TSKIM_STS_FAILED);
3176}
3177
3178static bfa_boolean_t
Maggie Zhangf3148782010-12-09 19:11:39 -08003179bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003180{
3181 switch (tskim->tm_cmnd) {
3182 case FCP_TM_TARGET_RESET:
3183 return BFA_TRUE;
3184
3185 case FCP_TM_ABORT_TASK_SET:
3186 case FCP_TM_CLEAR_TASK_SET:
3187 case FCP_TM_LUN_RESET:
3188 case FCP_TM_CLEAR_ACA:
Maggie Zhangf3148782010-12-09 19:11:39 -08003189 return (!memcmp(&tskim->lun, &lun, sizeof(lun)));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003190
3191 default:
3192 bfa_assert(0);
3193 }
3194
3195 return BFA_FALSE;
3196}
3197
Jing Huang5fbe25c2010-10-18 17:17:23 -07003198/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003199 * Gather affected IO requests and task management commands.
3200 */
3201static void
3202bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3203{
3204 struct bfa_itnim_s *itnim = tskim->itnim;
3205 struct bfa_ioim_s *ioim;
Maggie Zhangf3148782010-12-09 19:11:39 -08003206 struct list_head *qe, *qen;
3207 struct scsi_cmnd *cmnd;
3208 struct scsi_lun scsilun;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003209
3210 INIT_LIST_HEAD(&tskim->io_q);
3211
Jing Huang5fbe25c2010-10-18 17:17:23 -07003212 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003213 * Gather any active IO requests first.
3214 */
3215 list_for_each_safe(qe, qen, &itnim->io_q) {
3216 ioim = (struct bfa_ioim_s *) qe;
Maggie Zhangf3148782010-12-09 19:11:39 -08003217 cmnd = (struct scsi_cmnd *) ioim->dio;
3218 int_to_scsilun(cmnd->device->lun, &scsilun);
3219 if (bfa_tskim_match_scope(tskim, scsilun)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003220 list_del(&ioim->qe);
3221 list_add_tail(&ioim->qe, &tskim->io_q);
3222 }
3223 }
3224
Jing Huang5fbe25c2010-10-18 17:17:23 -07003225 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003226 * Failback any pending IO requests immediately.
3227 */
3228 list_for_each_safe(qe, qen, &itnim->pending_q) {
3229 ioim = (struct bfa_ioim_s *) qe;
Maggie Zhangf3148782010-12-09 19:11:39 -08003230 cmnd = (struct scsi_cmnd *) ioim->dio;
3231 int_to_scsilun(cmnd->device->lun, &scsilun);
3232 if (bfa_tskim_match_scope(tskim, scsilun)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003233 list_del(&ioim->qe);
3234 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3235 bfa_ioim_tov(ioim);
3236 }
3237 }
3238}
3239
Jing Huang5fbe25c2010-10-18 17:17:23 -07003240/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003241 * IO cleanup completion
3242 */
3243static void
3244bfa_tskim_cleanp_comp(void *tskim_cbarg)
3245{
3246 struct bfa_tskim_s *tskim = tskim_cbarg;
3247
3248 bfa_stats(tskim->itnim, tm_io_comps);
3249 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3250}
3251
Jing Huang5fbe25c2010-10-18 17:17:23 -07003252/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003253 * Gather affected IO requests and task management commands.
3254 */
3255static void
3256bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3257{
3258 struct bfa_ioim_s *ioim;
3259 struct list_head *qe, *qen;
3260
3261 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3262
3263 list_for_each_safe(qe, qen, &tskim->io_q) {
3264 ioim = (struct bfa_ioim_s *) qe;
3265 bfa_wc_up(&tskim->wc);
3266 bfa_ioim_cleanup_tm(ioim, tskim);
3267 }
3268
3269 bfa_wc_wait(&tskim->wc);
3270}
3271
Jing Huang5fbe25c2010-10-18 17:17:23 -07003272/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003273 * Send task management request to firmware.
3274 */
3275static bfa_boolean_t
3276bfa_tskim_send(struct bfa_tskim_s *tskim)
3277{
3278 struct bfa_itnim_s *itnim = tskim->itnim;
3279 struct bfi_tskim_req_s *m;
3280
Jing Huang5fbe25c2010-10-18 17:17:23 -07003281 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003282 * check for room in queue to send request now
3283 */
3284 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3285 if (!m)
3286 return BFA_FALSE;
3287
Jing Huang5fbe25c2010-10-18 17:17:23 -07003288 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003289 * build i/o request message next
3290 */
3291 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3292 bfa_lpuid(tskim->bfa));
3293
Jing Huangba816ea2010-10-18 17:10:50 -07003294 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003295 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3296 m->t_secs = tskim->tsecs;
3297 m->lun = tskim->lun;
3298 m->tm_flags = tskim->tm_cmnd;
3299
Jing Huang5fbe25c2010-10-18 17:17:23 -07003300 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003301 * queue I/O message to firmware
3302 */
3303 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3304 return BFA_TRUE;
3305}
3306
Jing Huang5fbe25c2010-10-18 17:17:23 -07003307/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003308 * Send abort request to cleanup an active TM to firmware.
3309 */
3310static bfa_boolean_t
3311bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3312{
3313 struct bfa_itnim_s *itnim = tskim->itnim;
3314 struct bfi_tskim_abortreq_s *m;
3315
Jing Huang5fbe25c2010-10-18 17:17:23 -07003316 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003317 * check for room in queue to send request now
3318 */
3319 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3320 if (!m)
3321 return BFA_FALSE;
3322
Jing Huang5fbe25c2010-10-18 17:17:23 -07003323 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003324 * build i/o request message next
3325 */
3326 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3327 bfa_lpuid(tskim->bfa));
3328
Jing Huangba816ea2010-10-18 17:10:50 -07003329 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003330
Jing Huang5fbe25c2010-10-18 17:17:23 -07003331 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003332 * queue I/O message to firmware
3333 */
3334 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3335 return BFA_TRUE;
3336}
3337
Jing Huang5fbe25c2010-10-18 17:17:23 -07003338/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003339 * Call to resume task management cmnd waiting for room in request queue.
3340 */
3341static void
3342bfa_tskim_qresume(void *cbarg)
3343{
3344 struct bfa_tskim_s *tskim = cbarg;
3345
3346 bfa_stats(tskim->itnim, tm_qresumes);
3347 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3348}
3349
Jing Huang5fbe25c2010-10-18 17:17:23 -07003350/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003351 * Cleanup IOs associated with a task mangement command on IOC failures.
3352 */
3353static void
3354bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3355{
3356 struct bfa_ioim_s *ioim;
3357 struct list_head *qe, *qen;
3358
3359 list_for_each_safe(qe, qen, &tskim->io_q) {
3360 ioim = (struct bfa_ioim_s *) qe;
3361 bfa_ioim_iocdisable(ioim);
3362 }
3363}
3364
3365
Jing Huang5fbe25c2010-10-18 17:17:23 -07003366/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003367 * Notification on completions from related ioim.
3368 */
3369void
3370bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3371{
3372 bfa_wc_down(&tskim->wc);
3373}
3374
Jing Huang5fbe25c2010-10-18 17:17:23 -07003375/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003376 * Handle IOC h/w failure notification from itnim.
3377 */
3378void
3379bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3380{
3381 tskim->notify = BFA_FALSE;
3382 bfa_stats(tskim->itnim, tm_iocdowns);
3383 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3384}
3385
Jing Huang5fbe25c2010-10-18 17:17:23 -07003386/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003387 * Cleanup TM command and associated IOs as part of ITNIM offline.
3388 */
3389void
3390bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3391{
3392 tskim->notify = BFA_TRUE;
3393 bfa_stats(tskim->itnim, tm_cleanups);
3394 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3395}
3396
Jing Huang5fbe25c2010-10-18 17:17:23 -07003397/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003398 * Memory allocation and initialization.
3399 */
3400void
3401bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3402{
3403 struct bfa_tskim_s *tskim;
3404 u16 i;
3405
3406 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3407
3408 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3409 fcpim->tskim_arr = tskim;
3410
3411 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3412 /*
3413 * initialize TSKIM
3414 */
Jing Huang6a18b162010-10-18 17:08:54 -07003415 memset(tskim, 0, sizeof(struct bfa_tskim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003416 tskim->tsk_tag = i;
3417 tskim->bfa = fcpim->bfa;
3418 tskim->fcpim = fcpim;
3419 tskim->notify = BFA_FALSE;
3420 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3421 tskim);
3422 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3423
3424 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3425 }
3426
3427 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3428}
3429
3430void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003431bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3432{
3433 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3434 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3435 struct bfa_tskim_s *tskim;
Jing Huangba816ea2010-10-18 17:10:50 -07003436 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003437
3438 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3439 bfa_assert(tskim->tsk_tag == tsk_tag);
3440
3441 tskim->tsk_status = rsp->tsk_status;
3442
Jing Huang5fbe25c2010-10-18 17:17:23 -07003443 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003444 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3445 * requests. All other statuses are for normal completions.
3446 */
3447 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3448 bfa_stats(tskim->itnim, tm_cleanup_comps);
3449 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3450 } else {
3451 bfa_stats(tskim->itnim, tm_fw_rsps);
3452 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3453 }
3454}
3455
3456
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003457struct bfa_tskim_s *
3458bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3459{
3460 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3461 struct bfa_tskim_s *tskim;
3462
3463 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3464
3465 if (tskim)
3466 tskim->dtsk = dtsk;
3467
3468 return tskim;
3469}
3470
3471void
3472bfa_tskim_free(struct bfa_tskim_s *tskim)
3473{
3474 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3475 list_del(&tskim->qe);
3476 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3477}
3478
Jing Huang5fbe25c2010-10-18 17:17:23 -07003479/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003480 * Start a task management command.
3481 *
3482 * @param[in] tskim BFA task management command instance
3483 * @param[in] itnim i-t nexus for the task management command
3484 * @param[in] lun lun, if applicable
3485 * @param[in] tm_cmnd Task management command code.
3486 * @param[in] t_secs Timeout in seconds
3487 *
3488 * @return None.
3489 */
3490void
Maggie Zhangf3148782010-12-09 19:11:39 -08003491bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3492 struct scsi_lun lun,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003493 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3494{
3495 tskim->itnim = itnim;
3496 tskim->lun = lun;
3497 tskim->tm_cmnd = tm_cmnd;
3498 tskim->tsecs = tsecs;
3499 tskim->notify = BFA_FALSE;
3500 bfa_stats(itnim, tm_cmnds);
3501
3502 list_add_tail(&tskim->qe, &itnim->tsk_q);
3503 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3504}