blob: d6c2bf3865d26b7a6903d6f3de9db5634002b99b [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_ioc.h"
Krishna Gudipati11189202011-06-13 15:50:35 -070020#include "bfi_reg.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070021#include "bfa_defs.h"
22#include "bfa_defs_svc.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070023
Krishna Gudipati7af074d2010-03-05 19:35:45 -080024BFA_TRC_FILE(CNA, IOC);
Jing Huang7725ccf2009-09-23 17:46:15 -070025
Jing Huang5fbe25c2010-10-18 17:17:23 -070026/*
Jing Huang7725ccf2009-09-23 17:46:15 -070027 * IOC local definitions
28 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070029#define BFA_IOC_TOV 3000 /* msecs */
30#define BFA_IOC_HWSEM_TOV 500 /* msecs */
31#define BFA_IOC_HB_TOV 500 /* msecs */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070032#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
Krishna Gudipati775c7742011-06-13 15:52:12 -070033#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
Jing Huang7725ccf2009-09-23 17:46:15 -070034
35#define bfa_ioc_timer_start(__ioc) \
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
37 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
38#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
39
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070040#define bfa_hb_timer_start(__ioc) \
41 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
42 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
44
Jing Huang7725ccf2009-09-23 17:46:15 -070045#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
Jing Huang7725ccf2009-09-23 17:46:15 -070046
Jing Huang5fbe25c2010-10-18 17:17:23 -070047/*
Krishna Gudipati0a20de42010-03-05 19:34:20 -080048 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
49 */
50
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070051#define bfa_ioc_firmware_lock(__ioc) \
Krishna Gudipati0a20de42010-03-05 19:34:20 -080052 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070053#define bfa_ioc_firmware_unlock(__ioc) \
Krishna Gudipati0a20de42010-03-05 19:34:20 -080054 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
Krishna Gudipati0a20de42010-03-05 19:34:20 -080055#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
56#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
Krishna Gudipatif1d584d2010-12-13 16:17:11 -080057#define bfa_ioc_notify_fail(__ioc) \
58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
Jing Huang45d7f0c2011-04-13 11:45:53 -070059#define bfa_ioc_sync_start(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
Krishna Gudipatif1d584d2010-12-13 16:17:11 -080061#define bfa_ioc_sync_join(__ioc) \
62 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
63#define bfa_ioc_sync_leave(__ioc) \
64 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
65#define bfa_ioc_sync_ack(__ioc) \
66 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
67#define bfa_ioc_sync_complete(__ioc) \
68 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070069
70#define bfa_ioc_mbox_cmd_pending(__ioc) \
71 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
Jing Huang53440262010-10-18 17:12:29 -070072 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070073
74bfa_boolean_t bfa_auto_recover = BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -070075
76/*
77 * forward declarations
78 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070079static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070080static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
81static void bfa_ioc_timeout(void *ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -070082static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070083static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
84static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
85static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
86static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070087static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
Krishna Gudipati8b070b42011-06-13 15:52:40 -070088static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070089static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
90static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
Krishna Gudipatid37779f2011-06-13 15:42:10 -070091static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
92 enum bfa_ioc_event_e event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070093static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
94static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -080095static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
96static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070097static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -070098
Jing Huang7725ccf2009-09-23 17:46:15 -070099
Jing Huang5fbe25c2010-10-18 17:17:23 -0700100/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700101 * IOC state machine definitions/declarations
Jing Huang7725ccf2009-09-23 17:46:15 -0700102 */
103enum ioc_event {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700104 IOC_E_RESET = 1, /* IOC reset request */
105 IOC_E_ENABLE = 2, /* IOC enable request */
106 IOC_E_DISABLE = 3, /* IOC disable request */
107 IOC_E_DETACH = 4, /* driver detach cleanup */
108 IOC_E_ENABLED = 5, /* f/w enabled */
109 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
110 IOC_E_DISABLED = 7, /* f/w disabled */
Krishna Gudipati775c7742011-06-13 15:52:12 -0700111 IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
112 IOC_E_HBFAIL = 9, /* heartbeat failure */
113 IOC_E_HWERROR = 10, /* hardware error interrupt */
114 IOC_E_TIMEOUT = 11, /* timeout */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700115 IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
Krishna Gudipatia7141342011-06-24 20:23:19 -0700116 IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */
Jing Huang7725ccf2009-09-23 17:46:15 -0700117};
118
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700119bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700120bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700121bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
122bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
123bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800124bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700125bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700126bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
127bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700128bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipatia7141342011-06-24 20:23:19 -0700129bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700130
131static struct bfa_sm_table_s ioc_sm_table[] = {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700132 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
Jing Huang7725ccf2009-09-23 17:46:15 -0700133 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700134 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
Jing Huang7725ccf2009-09-23 17:46:15 -0700135 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
136 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800137 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700138 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
Jing Huang7725ccf2009-09-23 17:46:15 -0700139 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
140 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700141 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
Krishna Gudipatia7141342011-06-24 20:23:19 -0700142 {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
Jing Huang7725ccf2009-09-23 17:46:15 -0700143};
144
Jing Huang5fbe25c2010-10-18 17:17:23 -0700145/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700146 * IOCPF state machine definitions/declarations
147 */
148
149#define bfa_iocpf_timer_start(__ioc) \
150 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
151 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
152#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
153
Krishna Gudipati775c7742011-06-13 15:52:12 -0700154#define bfa_iocpf_poll_timer_start(__ioc) \
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700155 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
Krishna Gudipati775c7742011-06-13 15:52:12 -0700156 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700157
158#define bfa_sem_timer_start(__ioc) \
159 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
160 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
161#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
162
163/*
164 * Forward declareations for iocpf state machine
165 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700166static void bfa_iocpf_timeout(void *ioc_arg);
167static void bfa_iocpf_sem_timeout(void *ioc_arg);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700168static void bfa_iocpf_poll_timeout(void *ioc_arg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700169
Jing Huang5fbe25c2010-10-18 17:17:23 -0700170/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700171 * IOCPF state machine events
172 */
173enum iocpf_event {
174 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
175 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
176 IOCPF_E_STOP = 3, /* stop on driver detach */
177 IOCPF_E_FWREADY = 4, /* f/w initialization done */
178 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
179 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
180 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
181 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
182 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
183 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
184 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700185 IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700186};
187
Jing Huang5fbe25c2010-10-18 17:17:23 -0700188/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700189 * IOCPF states
190 */
191enum bfa_iocpf_state {
192 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
193 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
194 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
195 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
196 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
197 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
198 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
199 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
200 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
201};
202
203bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
204bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
205bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
206bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
207bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
208bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
209bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800210bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
211 enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700212bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800213bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700214bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
215bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800216bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
217 enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700218bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
219
220static struct bfa_sm_table_s iocpf_sm_table[] = {
221 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
222 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
223 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
224 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
225 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
226 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
227 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800228 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700229 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800230 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700231 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
232 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800233 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700234 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
235};
236
Jing Huang5fbe25c2010-10-18 17:17:23 -0700237/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700238 * IOC State Machine
239 */
240
Jing Huang5fbe25c2010-10-18 17:17:23 -0700241/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700242 * Beginning state. IOC uninit state.
243 */
244
245static void
246bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
247{
248}
249
Jing Huang5fbe25c2010-10-18 17:17:23 -0700250/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700251 * IOC is in uninit state.
252 */
253static void
254bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
255{
256 bfa_trc(ioc, event);
257
258 switch (event) {
259 case IOC_E_RESET:
260 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
261 break;
262
263 default:
264 bfa_sm_fault(ioc, event);
265 }
266}
Jing Huang5fbe25c2010-10-18 17:17:23 -0700267/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700268 * Reset entry actions -- initialize state machine
269 */
270static void
271bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
272{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700273 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
Jing Huang7725ccf2009-09-23 17:46:15 -0700274}
275
Jing Huang5fbe25c2010-10-18 17:17:23 -0700276/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700277 * IOC is in reset state.
Jing Huang7725ccf2009-09-23 17:46:15 -0700278 */
279static void
280bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
281{
282 bfa_trc(ioc, event);
283
284 switch (event) {
285 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700286 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
287 break;
288
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700289 case IOC_E_DISABLE:
290 bfa_ioc_disable_comp(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -0700291 break;
292
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700293 case IOC_E_DETACH:
294 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Jing Huang7725ccf2009-09-23 17:46:15 -0700295 break;
296
297 default:
298 bfa_sm_fault(ioc, event);
299 }
300}
301
302
303static void
304bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
305{
Maggie Zhangf7f738122010-12-09 19:08:43 -0800306 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -0700307}
308
Jing Huang5fbe25c2010-10-18 17:17:23 -0700309/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700310 * Host IOC function is being enabled, awaiting response from firmware.
311 * Semaphore is acquired.
312 */
313static void
314bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
315{
316 bfa_trc(ioc, event);
317
318 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700319 case IOC_E_ENABLED:
Jing Huang7725ccf2009-09-23 17:46:15 -0700320 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
321 break;
322
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800323 case IOC_E_PFFAILED:
324 /* !!! fall through !!! */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700325 case IOC_E_HWERROR:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800326 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700327 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800328 if (event != IOC_E_PFFAILED)
329 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700330 break;
331
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700332 case IOC_E_HWFAILED:
333 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
334 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
335 break;
336
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700337 case IOC_E_DISABLE:
338 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
339 break;
340
341 case IOC_E_DETACH:
342 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800343 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700344 break;
345
346 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700347 break;
348
349 default:
350 bfa_sm_fault(ioc, event);
351 }
352}
353
354
355static void
356bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
357{
358 bfa_ioc_timer_start(ioc);
359 bfa_ioc_send_getattr(ioc);
360}
361
Jing Huang5fbe25c2010-10-18 17:17:23 -0700362/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700363 * IOC configuration in progress. Timer is active.
364 */
365static void
366bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
367{
368 bfa_trc(ioc, event);
369
370 switch (event) {
371 case IOC_E_FWRSP_GETATTR:
372 bfa_ioc_timer_stop(ioc);
Jing Huang07b28382010-07-08 19:59:24 -0700373 bfa_ioc_check_attr_wwns(ioc);
Krishna Gudipatia7141342011-06-24 20:23:19 -0700374 bfa_ioc_hb_monitor(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -0700375 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
376 break;
377
Krishna Gudipatia7141342011-06-24 20:23:19 -0700378 case IOC_E_FWRSP_ACQ_ADDR:
379 bfa_ioc_timer_stop(ioc);
380 bfa_ioc_hb_monitor(ioc);
381 bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700382 break;
Krishna Gudipatia7141342011-06-24 20:23:19 -0700383
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800384 case IOC_E_PFFAILED:
Jing Huang7725ccf2009-09-23 17:46:15 -0700385 case IOC_E_HWERROR:
386 bfa_ioc_timer_stop(ioc);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800387 /* !!! fall through !!! */
Jing Huang7725ccf2009-09-23 17:46:15 -0700388 case IOC_E_TIMEOUT:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800389 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700390 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800391 if (event != IOC_E_PFFAILED)
392 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700393 break;
394
395 case IOC_E_DISABLE:
396 bfa_ioc_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700397 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
398 break;
399
400 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700401 break;
402
403 default:
404 bfa_sm_fault(ioc, event);
405 }
406}
407
Krishna Gudipatia7141342011-06-24 20:23:19 -0700408/*
409 * Acquiring address from fabric (entry function)
410 */
411static void
412bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
413{
414}
415
416/*
417 * Acquiring address from the fabric
418 */
419static void
420bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
421{
422 bfa_trc(ioc, event);
423
424 switch (event) {
425 case IOC_E_FWRSP_GETATTR:
426 bfa_ioc_check_attr_wwns(ioc);
427 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
428 break;
429
430 case IOC_E_PFFAILED:
431 case IOC_E_HWERROR:
432 bfa_hb_timer_stop(ioc);
433 case IOC_E_HBFAIL:
434 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
435 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
436 if (event != IOC_E_PFFAILED)
437 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
438 break;
439
440 case IOC_E_DISABLE:
441 bfa_hb_timer_stop(ioc);
442 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
443 break;
444
445 case IOC_E_ENABLE:
446 break;
447
448 default:
449 bfa_sm_fault(ioc, event);
450 }
451}
Jing Huang7725ccf2009-09-23 17:46:15 -0700452
453static void
454bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
455{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700456 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
457
Jing Huang7725ccf2009-09-23 17:46:15 -0700458 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
Krishna Gudipatid37779f2011-06-13 15:42:10 -0700459 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
Jing Huang88166242010-12-09 17:11:53 -0800460 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
Jing Huang7725ccf2009-09-23 17:46:15 -0700461}
462
463static void
464bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
465{
466 bfa_trc(ioc, event);
467
468 switch (event) {
469 case IOC_E_ENABLE:
470 break;
471
472 case IOC_E_DISABLE:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800473 bfa_hb_timer_stop(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -0700474 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
475 break;
476
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800477 case IOC_E_PFFAILED:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700478 case IOC_E_HWERROR:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800479 bfa_hb_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700480 /* !!! fall through !!! */
Jing Huang7725ccf2009-09-23 17:46:15 -0700481 case IOC_E_HBFAIL:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800482 if (ioc->iocpf.auto_recover)
483 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
484 else
485 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
486
Krishna Gudipati775c7742011-06-13 15:52:12 -0700487 bfa_ioc_fail_notify(ioc);
488
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800489 if (event != IOC_E_PFFAILED)
490 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700491 break;
492
493 default:
494 bfa_sm_fault(ioc, event);
495 }
496}
497
498
499static void
500bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
501{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700502 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
Maggie Zhangf7f738122010-12-09 19:08:43 -0800503 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
Jing Huang88166242010-12-09 17:11:53 -0800504 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
Jing Huang7725ccf2009-09-23 17:46:15 -0700505}
506
Jing Huang5fbe25c2010-10-18 17:17:23 -0700507/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700508 * IOC is being disabled
509 */
510static void
511bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
512{
513 bfa_trc(ioc, event);
514
515 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700516 case IOC_E_DISABLED:
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800517 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
518 break;
519
520 case IOC_E_HWERROR:
Jing Huang7725ccf2009-09-23 17:46:15 -0700521 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700522 * No state change. Will move to disabled state
523 * after iocpf sm completes failure processing and
524 * moves to disabled state.
Jing Huang7725ccf2009-09-23 17:46:15 -0700525 */
Maggie Zhangf7f738122010-12-09 19:08:43 -0800526 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700527 break;
528
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700529 case IOC_E_HWFAILED:
530 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
531 bfa_ioc_disable_comp(ioc);
532 break;
533
Jing Huang7725ccf2009-09-23 17:46:15 -0700534 default:
535 bfa_sm_fault(ioc, event);
536 }
537}
538
Jing Huang5fbe25c2010-10-18 17:17:23 -0700539/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700540 * IOC disable completion entry.
541 */
542static void
543bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
544{
545 bfa_ioc_disable_comp(ioc);
546}
547
548static void
549bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
550{
551 bfa_trc(ioc, event);
552
553 switch (event) {
554 case IOC_E_ENABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700555 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700556 break;
557
558 case IOC_E_DISABLE:
559 ioc->cbfn->disable_cbfn(ioc->bfa);
560 break;
561
Jing Huang7725ccf2009-09-23 17:46:15 -0700562 case IOC_E_DETACH:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700563 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800564 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Jing Huang7725ccf2009-09-23 17:46:15 -0700565 break;
566
567 default:
568 bfa_sm_fault(ioc, event);
569 }
570}
571
572
573static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800574bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -0700575{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800576 bfa_trc(ioc, 0);
Jing Huang7725ccf2009-09-23 17:46:15 -0700577}
578
Jing Huang5fbe25c2010-10-18 17:17:23 -0700579/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800580 * Hardware initialization retry.
Jing Huang7725ccf2009-09-23 17:46:15 -0700581 */
582static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800583bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
Jing Huang7725ccf2009-09-23 17:46:15 -0700584{
585 bfa_trc(ioc, event);
586
587 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700588 case IOC_E_ENABLED:
589 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
590 break;
591
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800592 case IOC_E_PFFAILED:
593 case IOC_E_HWERROR:
Jing Huang5fbe25c2010-10-18 17:17:23 -0700594 /*
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800595 * Initialization retry failed.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700596 */
597 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700598 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800599 if (event != IOC_E_PFFAILED)
600 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
601 break;
602
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700603 case IOC_E_HWFAILED:
604 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
605 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
606 break;
607
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800608 case IOC_E_ENABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700609 break;
610
Jing Huang7725ccf2009-09-23 17:46:15 -0700611 case IOC_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700612 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700613 break;
614
615 case IOC_E_DETACH:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700616 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800617 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Jing Huang7725ccf2009-09-23 17:46:15 -0700618 break;
619
620 default:
621 bfa_sm_fault(ioc, event);
622 }
623}
624
625
626static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700627bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -0700628{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800629 bfa_trc(ioc, 0);
Jing Huang7725ccf2009-09-23 17:46:15 -0700630}
631
Jing Huang5fbe25c2010-10-18 17:17:23 -0700632/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700633 * IOC failure.
Jing Huang7725ccf2009-09-23 17:46:15 -0700634 */
635static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700636bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
Jing Huang7725ccf2009-09-23 17:46:15 -0700637{
638 bfa_trc(ioc, event);
639
640 switch (event) {
641
642 case IOC_E_ENABLE:
643 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
644 break;
645
646 case IOC_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700647 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700648 break;
649
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800650 case IOC_E_DETACH:
651 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
652 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
653 break;
654
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800655 case IOC_E_HWERROR:
656 /*
657 * HB failure notification, ignore.
658 */
659 break;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700660 default:
661 bfa_sm_fault(ioc, event);
662 }
663}
664
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700665static void
666bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
667{
668 bfa_trc(ioc, 0);
669}
670
671static void
672bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
673{
674 bfa_trc(ioc, event);
675
676 switch (event) {
677 case IOC_E_ENABLE:
678 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
679 break;
680
681 case IOC_E_DISABLE:
682 ioc->cbfn->disable_cbfn(ioc->bfa);
683 break;
684
685 case IOC_E_DETACH:
686 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
687 break;
688
689 default:
690 bfa_sm_fault(ioc, event);
691 }
692}
693
Jing Huang5fbe25c2010-10-18 17:17:23 -0700694/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700695 * IOCPF State Machine
696 */
697
Jing Huang5fbe25c2010-10-18 17:17:23 -0700698/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700699 * Reset entry actions -- initialize state machine
700 */
701static void
702bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
703{
Krishna Gudipati775c7742011-06-13 15:52:12 -0700704 iocpf->fw_mismatch_notified = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700705 iocpf->auto_recover = bfa_auto_recover;
706}
707
Jing Huang5fbe25c2010-10-18 17:17:23 -0700708/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700709 * Beginning state. IOC is in reset state.
710 */
711static void
712bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
713{
714 struct bfa_ioc_s *ioc = iocpf->ioc;
715
716 bfa_trc(ioc, event);
717
718 switch (event) {
719 case IOCPF_E_ENABLE:
720 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
721 break;
722
723 case IOCPF_E_STOP:
724 break;
725
726 default:
727 bfa_sm_fault(ioc, event);
728 }
729}
730
Jing Huang5fbe25c2010-10-18 17:17:23 -0700731/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700732 * Semaphore should be acquired for version check.
733 */
734static void
735bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
736{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700737 struct bfi_ioc_image_hdr_s fwhdr;
738 u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
739
740 /* h/w sem init */
741 if (fwstate == BFI_IOC_UNINIT)
742 goto sem_get;
743
744 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
745
746 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
747 goto sem_get;
748
749 bfa_trc(iocpf->ioc, fwstate);
750 bfa_trc(iocpf->ioc, fwhdr.exec);
751 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
752
753 /*
754 * Try to lock and then unlock the semaphore.
755 */
756 readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
757 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
758sem_get:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700759 bfa_ioc_hw_sem_get(iocpf->ioc);
760}
761
Jing Huang5fbe25c2010-10-18 17:17:23 -0700762/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700763 * Awaiting h/w semaphore to continue with version check.
764 */
765static void
766bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
767{
768 struct bfa_ioc_s *ioc = iocpf->ioc;
769
770 bfa_trc(ioc, event);
771
772 switch (event) {
773 case IOCPF_E_SEMLOCKED:
774 if (bfa_ioc_firmware_lock(ioc)) {
Jing Huang45d7f0c2011-04-13 11:45:53 -0700775 if (bfa_ioc_sync_start(ioc)) {
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800776 bfa_ioc_sync_join(ioc);
777 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
778 } else {
779 bfa_ioc_firmware_unlock(ioc);
780 writel(1, ioc->ioc_regs.ioc_sem_reg);
781 bfa_sem_timer_start(ioc);
782 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700783 } else {
Maggie Zhangf7f738122010-12-09 19:08:43 -0800784 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700785 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
786 }
787 break;
788
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700789 case IOCPF_E_SEM_ERROR:
790 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
791 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
792 break;
793
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700794 case IOCPF_E_DISABLE:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800795 bfa_sem_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700796 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800797 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700798 break;
799
800 case IOCPF_E_STOP:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800801 bfa_sem_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700802 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
803 break;
804
805 default:
806 bfa_sm_fault(ioc, event);
807 }
808}
809
Jing Huang5fbe25c2010-10-18 17:17:23 -0700810/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700811 * Notify enable completion callback.
812 */
813static void
814bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
815{
816 /*
817 * Call only the first time sm enters fwmismatch state.
818 */
Krishna Gudipati775c7742011-06-13 15:52:12 -0700819 if (iocpf->fw_mismatch_notified == BFA_FALSE)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700820 bfa_ioc_pf_fwmismatch(iocpf->ioc);
821
Krishna Gudipati775c7742011-06-13 15:52:12 -0700822 iocpf->fw_mismatch_notified = BFA_TRUE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700823 bfa_iocpf_timer_start(iocpf->ioc);
824}
825
Jing Huang5fbe25c2010-10-18 17:17:23 -0700826/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700827 * Awaiting firmware version match.
828 */
829static void
830bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
831{
832 struct bfa_ioc_s *ioc = iocpf->ioc;
833
834 bfa_trc(ioc, event);
835
836 switch (event) {
837 case IOCPF_E_TIMEOUT:
838 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
839 break;
840
841 case IOCPF_E_DISABLE:
842 bfa_iocpf_timer_stop(ioc);
843 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800844 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700845 break;
846
847 case IOCPF_E_STOP:
848 bfa_iocpf_timer_stop(ioc);
849 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
850 break;
851
852 default:
853 bfa_sm_fault(ioc, event);
854 }
855}
856
Jing Huang5fbe25c2010-10-18 17:17:23 -0700857/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700858 * Request for semaphore.
859 */
860static void
861bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
862{
863 bfa_ioc_hw_sem_get(iocpf->ioc);
864}
865
Jing Huang5fbe25c2010-10-18 17:17:23 -0700866/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700867 * Awaiting semaphore for h/w initialzation.
868 */
869static void
870bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
871{
872 struct bfa_ioc_s *ioc = iocpf->ioc;
873
874 bfa_trc(ioc, event);
875
876 switch (event) {
877 case IOCPF_E_SEMLOCKED:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800878 if (bfa_ioc_sync_complete(ioc)) {
879 bfa_ioc_sync_join(ioc);
880 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
881 } else {
882 writel(1, ioc->ioc_regs.ioc_sem_reg);
883 bfa_sem_timer_start(ioc);
884 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700885 break;
886
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700887 case IOCPF_E_SEM_ERROR:
888 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
889 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
890 break;
891
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700892 case IOCPF_E_DISABLE:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800893 bfa_sem_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800894 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700895 break;
896
897 default:
898 bfa_sm_fault(ioc, event);
899 }
900}
901
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700902static void
903bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
904{
Krishna Gudipati775c7742011-06-13 15:52:12 -0700905 iocpf->poll_time = 0;
Maggie Zhangf7f738122010-12-09 19:08:43 -0800906 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700907}
908
Jing Huang5fbe25c2010-10-18 17:17:23 -0700909/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700910 * Hardware is being initialized. Interrupts are enabled.
911 * Holding hardware semaphore lock.
912 */
913static void
914bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
915{
916 struct bfa_ioc_s *ioc = iocpf->ioc;
917
918 bfa_trc(ioc, event);
919
920 switch (event) {
921 case IOCPF_E_FWREADY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700922 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
923 break;
924
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700925 case IOCPF_E_TIMEOUT:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800926 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700927 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800928 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700929 break;
930
931 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700932 bfa_iocpf_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800933 bfa_ioc_sync_leave(ioc);
934 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700935 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
936 break;
937
938 default:
939 bfa_sm_fault(ioc, event);
940 }
941}
942
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700943static void
944bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
945{
946 bfa_iocpf_timer_start(iocpf->ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700947 /*
948 * Enable Interrupts before sending fw IOC ENABLE cmd.
949 */
950 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700951 bfa_ioc_send_enable(iocpf->ioc);
952}
953
Jing Huang5fbe25c2010-10-18 17:17:23 -0700954/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700955 * Host IOC function is being enabled, awaiting response from firmware.
956 * Semaphore is acquired.
957 */
958static void
959bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
960{
961 struct bfa_ioc_s *ioc = iocpf->ioc;
962
963 bfa_trc(ioc, event);
964
965 switch (event) {
966 case IOCPF_E_FWRSP_ENABLE:
967 bfa_iocpf_timer_stop(ioc);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800968 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700969 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
970 break;
971
972 case IOCPF_E_INITFAIL:
973 bfa_iocpf_timer_stop(ioc);
974 /*
975 * !!! fall through !!!
976 */
977
978 case IOCPF_E_TIMEOUT:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800979 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700980 if (event == IOCPF_E_TIMEOUT)
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800981 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800982 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700983 break;
984
985 case IOCPF_E_DISABLE:
986 bfa_iocpf_timer_stop(ioc);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800987 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700988 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
989 break;
990
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700991 default:
992 bfa_sm_fault(ioc, event);
993 }
994}
995
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700996static void
997bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
998{
Maggie Zhangf7f738122010-12-09 19:08:43 -0800999 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001000}
1001
1002static void
1003bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1004{
1005 struct bfa_ioc_s *ioc = iocpf->ioc;
1006
1007 bfa_trc(ioc, event);
1008
1009 switch (event) {
1010 case IOCPF_E_DISABLE:
1011 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1012 break;
1013
1014 case IOCPF_E_GETATTRFAIL:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001015 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001016 break;
1017
1018 case IOCPF_E_FAIL:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001019 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001020 break;
1021
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001022 default:
1023 bfa_sm_fault(ioc, event);
1024 }
1025}
1026
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001027static void
1028bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1029{
1030 bfa_iocpf_timer_start(iocpf->ioc);
1031 bfa_ioc_send_disable(iocpf->ioc);
1032}
1033
Jing Huang5fbe25c2010-10-18 17:17:23 -07001034/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001035 * IOC is being disabled
1036 */
1037static void
1038bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1039{
1040 struct bfa_ioc_s *ioc = iocpf->ioc;
1041
1042 bfa_trc(ioc, event);
1043
1044 switch (event) {
1045 case IOCPF_E_FWRSP_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001046 bfa_iocpf_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001047 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001048 break;
1049
1050 case IOCPF_E_FAIL:
1051 bfa_iocpf_timer_stop(ioc);
1052 /*
1053 * !!! fall through !!!
1054 */
1055
1056 case IOCPF_E_TIMEOUT:
Jing Huang53440262010-10-18 17:12:29 -07001057 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001058 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001059 break;
1060
1061 case IOCPF_E_FWRSP_ENABLE:
1062 break;
1063
1064 default:
1065 bfa_sm_fault(ioc, event);
1066 }
1067}
1068
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001069static void
1070bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1071{
1072 bfa_ioc_hw_sem_get(iocpf->ioc);
1073}
1074
Jing Huang8f4bfad2010-12-26 21:50:10 -08001075/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001076 * IOC hb ack request is being removed.
1077 */
1078static void
1079bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1080{
1081 struct bfa_ioc_s *ioc = iocpf->ioc;
1082
1083 bfa_trc(ioc, event);
1084
1085 switch (event) {
1086 case IOCPF_E_SEMLOCKED:
1087 bfa_ioc_sync_leave(ioc);
1088 writel(1, ioc->ioc_regs.ioc_sem_reg);
1089 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1090 break;
1091
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001092 case IOCPF_E_SEM_ERROR:
1093 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1094 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1095 break;
1096
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001097 case IOCPF_E_FAIL:
1098 break;
1099
1100 default:
1101 bfa_sm_fault(ioc, event);
1102 }
1103}
1104
Jing Huang5fbe25c2010-10-18 17:17:23 -07001105/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001106 * IOC disable completion entry.
1107 */
1108static void
1109bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1110{
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001111 bfa_ioc_mbox_flush(iocpf->ioc);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001112 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001113}
1114
1115static void
1116bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1117{
1118 struct bfa_ioc_s *ioc = iocpf->ioc;
1119
1120 bfa_trc(ioc, event);
1121
1122 switch (event) {
1123 case IOCPF_E_ENABLE:
1124 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1125 break;
1126
1127 case IOCPF_E_STOP:
1128 bfa_ioc_firmware_unlock(ioc);
1129 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1130 break;
1131
1132 default:
1133 bfa_sm_fault(ioc, event);
1134 }
1135}
1136
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001137static void
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001138bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1139{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001140 bfa_ioc_debug_save_ftrc(iocpf->ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001141 bfa_ioc_hw_sem_get(iocpf->ioc);
1142}
1143
Jing Huang8f4bfad2010-12-26 21:50:10 -08001144/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001145 * Hardware initialization failed.
1146 */
1147static void
1148bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1149{
1150 struct bfa_ioc_s *ioc = iocpf->ioc;
1151
1152 bfa_trc(ioc, event);
1153
1154 switch (event) {
1155 case IOCPF_E_SEMLOCKED:
1156 bfa_ioc_notify_fail(ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -07001157 bfa_ioc_sync_leave(ioc);
1158 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1159 writel(1, ioc->ioc_regs.ioc_sem_reg);
1160 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001161 break;
1162
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001163 case IOCPF_E_SEM_ERROR:
1164 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1165 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1166 break;
1167
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001168 case IOCPF_E_DISABLE:
1169 bfa_sem_timer_stop(ioc);
1170 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1171 break;
1172
1173 case IOCPF_E_STOP:
1174 bfa_sem_timer_stop(ioc);
1175 bfa_ioc_firmware_unlock(ioc);
1176 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1177 break;
1178
1179 case IOCPF_E_FAIL:
1180 break;
1181
1182 default:
1183 bfa_sm_fault(ioc, event);
1184 }
1185}
1186
1187static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001188bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1189{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001190 bfa_trc(iocpf->ioc, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001191}
1192
Jing Huang5fbe25c2010-10-18 17:17:23 -07001193/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001194 * Hardware initialization failed.
1195 */
1196static void
1197bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1198{
1199 struct bfa_ioc_s *ioc = iocpf->ioc;
1200
1201 bfa_trc(ioc, event);
1202
1203 switch (event) {
1204 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001205 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1206 break;
1207
1208 case IOCPF_E_STOP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001209 bfa_ioc_firmware_unlock(ioc);
1210 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1211 break;
1212
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001213 default:
1214 bfa_sm_fault(ioc, event);
1215 }
1216}
1217
1218static void
1219bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1220{
Jing Huang8f4bfad2010-12-26 21:50:10 -08001221 /*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001222 * Mark IOC as failed in hardware and stop firmware.
1223 */
1224 bfa_ioc_lpu_stop(iocpf->ioc);
1225
Jing Huang8f4bfad2010-12-26 21:50:10 -08001226 /*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001227 * Flush any queued up mailbox requests.
1228 */
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001229 bfa_ioc_mbox_flush(iocpf->ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001230
1231 bfa_ioc_hw_sem_get(iocpf->ioc);
1232}
1233
1234static void
1235bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1236{
1237 struct bfa_ioc_s *ioc = iocpf->ioc;
1238
1239 bfa_trc(ioc, event);
1240
1241 switch (event) {
1242 case IOCPF_E_SEMLOCKED:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001243 bfa_ioc_sync_ack(ioc);
1244 bfa_ioc_notify_fail(ioc);
1245 if (!iocpf->auto_recover) {
1246 bfa_ioc_sync_leave(ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -07001247 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001248 writel(1, ioc->ioc_regs.ioc_sem_reg);
1249 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1250 } else {
1251 if (bfa_ioc_sync_complete(ioc))
1252 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1253 else {
1254 writel(1, ioc->ioc_regs.ioc_sem_reg);
1255 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1256 }
1257 }
1258 break;
1259
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001260 case IOCPF_E_SEM_ERROR:
1261 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1262 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1263 break;
1264
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001265 case IOCPF_E_DISABLE:
1266 bfa_sem_timer_stop(ioc);
1267 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1268 break;
1269
1270 case IOCPF_E_FAIL:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001271 break;
1272
1273 default:
1274 bfa_sm_fault(ioc, event);
1275 }
1276}
1277
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001278static void
1279bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1280{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001281 bfa_trc(iocpf->ioc, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001282}
1283
Jing Huang5fbe25c2010-10-18 17:17:23 -07001284/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001285 * IOC is in failed state.
1286 */
1287static void
1288bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1289{
1290 struct bfa_ioc_s *ioc = iocpf->ioc;
1291
1292 bfa_trc(ioc, event);
1293
1294 switch (event) {
1295 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001296 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1297 break;
1298
Jing Huang7725ccf2009-09-23 17:46:15 -07001299 default:
1300 bfa_sm_fault(ioc, event);
1301 }
1302}
1303
Jing Huang5fbe25c2010-10-18 17:17:23 -07001304/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -08001305 * BFA IOC private functions
Jing Huang7725ccf2009-09-23 17:46:15 -07001306 */
1307
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001308/*
1309 * Notify common modules registered for notification.
1310 */
1311static void
1312bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1313{
1314 struct bfa_ioc_notify_s *notify;
1315 struct list_head *qe;
1316
1317 list_for_each(qe, &ioc->notify_q) {
1318 notify = (struct bfa_ioc_notify_s *)qe;
1319 notify->cbfn(notify->cbarg, event);
1320 }
1321}
1322
Jing Huang7725ccf2009-09-23 17:46:15 -07001323static void
1324bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1325{
Jing Huang7725ccf2009-09-23 17:46:15 -07001326 ioc->cbfn->disable_cbfn(ioc->bfa);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001327 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
Jing Huang7725ccf2009-09-23 17:46:15 -07001328}
1329
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001330bfa_boolean_t
Jing Huang53440262010-10-18 17:12:29 -07001331bfa_ioc_sem_get(void __iomem *sem_reg)
Jing Huang7725ccf2009-09-23 17:46:15 -07001332{
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001333 u32 r32;
1334 int cnt = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001335#define BFA_SEM_SPINCNT 3000
Jing Huang7725ccf2009-09-23 17:46:15 -07001336
Jing Huang53440262010-10-18 17:12:29 -07001337 r32 = readl(sem_reg);
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001338
Krishna Gudipati11189202011-06-13 15:50:35 -07001339 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001340 cnt++;
Jing Huang6a18b162010-10-18 17:08:54 -07001341 udelay(2);
Jing Huang53440262010-10-18 17:12:29 -07001342 r32 = readl(sem_reg);
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001343 }
1344
Krishna Gudipati11189202011-06-13 15:50:35 -07001345 if (!(r32 & 1))
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001346 return BFA_TRUE;
1347
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001348 return BFA_FALSE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001349}
1350
Jing Huang7725ccf2009-09-23 17:46:15 -07001351static void
1352bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1353{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001354 u32 r32;
Jing Huang7725ccf2009-09-23 17:46:15 -07001355
Jing Huang5fbe25c2010-10-18 17:17:23 -07001356 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001357 * First read to the semaphore register will return 0, subsequent reads
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001358 * will return 1. Semaphore is released by writing 1 to the register
Jing Huang7725ccf2009-09-23 17:46:15 -07001359 */
Jing Huang53440262010-10-18 17:12:29 -07001360 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001361 if (r32 == ~0) {
1362 WARN_ON(r32 == ~0);
1363 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1364 return;
1365 }
Krishna Gudipati11189202011-06-13 15:50:35 -07001366 if (!(r32 & 1)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001367 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
Jing Huang7725ccf2009-09-23 17:46:15 -07001368 return;
1369 }
1370
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001371 bfa_sem_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001372}
1373
Jing Huang5fbe25c2010-10-18 17:17:23 -07001374/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001375 * Initialize LPU local memory (aka secondary memory / SRAM)
1376 */
1377static void
1378bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1379{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001380 u32 pss_ctl;
1381 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001382#define PSS_LMEM_INIT_TIME 10000
1383
Jing Huang53440262010-10-18 17:12:29 -07001384 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001385 pss_ctl &= ~__PSS_LMEM_RESET;
1386 pss_ctl |= __PSS_LMEM_INIT_EN;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001387
1388 /*
1389 * i2c workaround 12.5khz clock
1390 */
1391 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
Jing Huang53440262010-10-18 17:12:29 -07001392 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001393
Jing Huang5fbe25c2010-10-18 17:17:23 -07001394 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001395 * wait for memory initialization to be complete
1396 */
1397 i = 0;
1398 do {
Jing Huang53440262010-10-18 17:12:29 -07001399 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001400 i++;
1401 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1402
Jing Huang5fbe25c2010-10-18 17:17:23 -07001403 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001404 * If memory initialization is not successful, IOC timeout will catch
1405 * such failures.
1406 */
Jing Huangd4b671c2010-12-26 21:46:35 -08001407 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
Jing Huang7725ccf2009-09-23 17:46:15 -07001408 bfa_trc(ioc, pss_ctl);
1409
1410 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
Jing Huang53440262010-10-18 17:12:29 -07001411 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001412}
1413
1414static void
1415bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1416{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001417 u32 pss_ctl;
Jing Huang7725ccf2009-09-23 17:46:15 -07001418
Jing Huang5fbe25c2010-10-18 17:17:23 -07001419 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001420 * Take processor out of reset.
1421 */
Jing Huang53440262010-10-18 17:12:29 -07001422 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001423 pss_ctl &= ~__PSS_LPU0_RESET;
1424
Jing Huang53440262010-10-18 17:12:29 -07001425 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001426}
1427
1428static void
1429bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1430{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001431 u32 pss_ctl;
Jing Huang7725ccf2009-09-23 17:46:15 -07001432
Jing Huang5fbe25c2010-10-18 17:17:23 -07001433 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001434 * Put processors in reset.
1435 */
Jing Huang53440262010-10-18 17:12:29 -07001436 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001437 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1438
Jing Huang53440262010-10-18 17:12:29 -07001439 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001440}
1441
Jing Huang5fbe25c2010-10-18 17:17:23 -07001442/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001443 * Get driver and firmware versions.
1444 */
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001445void
Jing Huang7725ccf2009-09-23 17:46:15 -07001446bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1447{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001448 u32 pgnum, pgoff;
1449 u32 loff = 0;
1450 int i;
1451 u32 *fwsig = (u32 *) fwhdr;
Jing Huang7725ccf2009-09-23 17:46:15 -07001452
Maggie Zhangf7f738122010-12-09 19:08:43 -08001453 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1454 pgoff = PSS_SMEM_PGOFF(loff);
Jing Huang53440262010-10-18 17:12:29 -07001455 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001456
1457 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1458 i++) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001459 fwsig[i] =
1460 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
Jing Huang7725ccf2009-09-23 17:46:15 -07001461 loff += sizeof(u32);
1462 }
1463}
1464
Jing Huang5fbe25c2010-10-18 17:17:23 -07001465/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001466 * Returns TRUE if same.
1467 */
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001468bfa_boolean_t
Jing Huang7725ccf2009-09-23 17:46:15 -07001469bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1470{
1471 struct bfi_ioc_image_hdr_s *drv_fwhdr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001472 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001473
Jing Huang293f82d2010-07-08 19:45:20 -07001474 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
Krishna Gudipati11189202011-06-13 15:50:35 -07001475 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
Jing Huang7725ccf2009-09-23 17:46:15 -07001476
1477 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1478 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1479 bfa_trc(ioc, i);
1480 bfa_trc(ioc, fwhdr->md5sum[i]);
1481 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1482 return BFA_FALSE;
1483 }
1484 }
1485
1486 bfa_trc(ioc, fwhdr->md5sum[0]);
1487 return BFA_TRUE;
1488}
1489
Jing Huang5fbe25c2010-10-18 17:17:23 -07001490/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001491 * Return true if current running version is valid. Firmware signature and
1492 * execution context (driver/bios) must match.
1493 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001494static bfa_boolean_t
1495bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07001496{
1497 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1498
Jing Huang7725ccf2009-09-23 17:46:15 -07001499 bfa_ioc_fwver_get(ioc, &fwhdr);
Jing Huang293f82d2010-07-08 19:45:20 -07001500 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
Krishna Gudipati11189202011-06-13 15:50:35 -07001501 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
Jing Huang7725ccf2009-09-23 17:46:15 -07001502
1503 if (fwhdr.signature != drv_fwhdr->signature) {
1504 bfa_trc(ioc, fwhdr.signature);
1505 bfa_trc(ioc, drv_fwhdr->signature);
1506 return BFA_FALSE;
1507 }
1508
Krishna Gudipati11189202011-06-13 15:50:35 -07001509 if (swab32(fwhdr.bootenv) != boot_env) {
1510 bfa_trc(ioc, fwhdr.bootenv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001511 bfa_trc(ioc, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07001512 return BFA_FALSE;
1513 }
1514
1515 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1516}
1517
Jing Huang5fbe25c2010-10-18 17:17:23 -07001518/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001519 * Conditionally flush any pending message from firmware at start.
1520 */
1521static void
1522bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1523{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001524 u32 r32;
Jing Huang7725ccf2009-09-23 17:46:15 -07001525
Jing Huang53440262010-10-18 17:12:29 -07001526 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001527 if (r32)
Jing Huang53440262010-10-18 17:12:29 -07001528 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001529}
1530
Jing Huang7725ccf2009-09-23 17:46:15 -07001531static void
1532bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1533{
1534 enum bfi_ioc_state ioc_fwstate;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001535 bfa_boolean_t fwvalid;
1536 u32 boot_type;
1537 u32 boot_env;
Jing Huang7725ccf2009-09-23 17:46:15 -07001538
Jing Huang53440262010-10-18 17:12:29 -07001539 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07001540
1541 if (force)
1542 ioc_fwstate = BFI_IOC_UNINIT;
1543
1544 bfa_trc(ioc, ioc_fwstate);
1545
Krishna Gudipati11189202011-06-13 15:50:35 -07001546 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1547 boot_env = BFI_FWBOOT_ENV_OS;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001548
Jing Huang5fbe25c2010-10-18 17:17:23 -07001549 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001550 * check if firmware is valid
1551 */
1552 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001553 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07001554
1555 if (!fwvalid) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001556 bfa_ioc_boot(ioc, boot_type, boot_env);
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001557 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001558 return;
1559 }
1560
Jing Huang5fbe25c2010-10-18 17:17:23 -07001561 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001562 * If hardware initialization is in progress (initialized by other IOC),
1563 * just wait for an initialization completion interrupt.
1564 */
1565 if (ioc_fwstate == BFI_IOC_INITING) {
Krishna Gudipati775c7742011-06-13 15:52:12 -07001566 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001567 return;
1568 }
1569
Jing Huang5fbe25c2010-10-18 17:17:23 -07001570 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001571 * If IOC function is disabled and firmware version is same,
1572 * just re-enable IOC.
Jing Huang07b28382010-07-08 19:59:24 -07001573 *
1574 * If option rom, IOC must not be in operational state. With
1575 * convergence, IOC will be in operational state when 2nd driver
1576 * is loaded.
Jing Huang7725ccf2009-09-23 17:46:15 -07001577 */
Jing Huang8f4bfad2010-12-26 21:50:10 -08001578 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001579
Jing Huang5fbe25c2010-10-18 17:17:23 -07001580 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001581 * When using MSI-X any pending firmware ready event should
1582 * be flushed. Otherwise MSI-X interrupts are not delivered.
1583 */
1584 bfa_ioc_msgflush(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001585 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
Jing Huang7725ccf2009-09-23 17:46:15 -07001586 return;
1587 }
1588
Jing Huang5fbe25c2010-10-18 17:17:23 -07001589 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001590 * Initialize the h/w for any other states.
1591 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001592 bfa_ioc_boot(ioc, boot_type, boot_env);
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001593 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001594}
1595
1596static void
1597bfa_ioc_timeout(void *ioc_arg)
1598{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001599 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
Jing Huang7725ccf2009-09-23 17:46:15 -07001600
1601 bfa_trc(ioc, 0);
1602 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1603}
1604
1605void
1606bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1607{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001608 u32 *msgp = (u32 *) ioc_msg;
1609 u32 i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001610
1611 bfa_trc(ioc, msgp[0]);
1612 bfa_trc(ioc, len);
1613
Jing Huangd4b671c2010-12-26 21:46:35 -08001614 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
Jing Huang7725ccf2009-09-23 17:46:15 -07001615
1616 /*
1617 * first write msg to mailbox registers
1618 */
1619 for (i = 0; i < len / sizeof(u32); i++)
Jing Huang53440262010-10-18 17:12:29 -07001620 writel(cpu_to_le32(msgp[i]),
1621 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
Jing Huang7725ccf2009-09-23 17:46:15 -07001622
1623 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
Jing Huang53440262010-10-18 17:12:29 -07001624 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
Jing Huang7725ccf2009-09-23 17:46:15 -07001625
1626 /*
1627 * write 1 to mailbox CMD to trigger LPU event
1628 */
Jing Huang53440262010-10-18 17:12:29 -07001629 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1630 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001631}
1632
1633static void
1634bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1635{
1636 struct bfi_ioc_ctrl_req_s enable_req;
Maggie Zhangf16a1752010-12-09 19:12:32 -08001637 struct timeval tv;
Jing Huang7725ccf2009-09-23 17:46:15 -07001638
1639 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1640 bfa_ioc_portid(ioc));
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001641 enable_req.clscode = cpu_to_be16(ioc->clscode);
Maggie Zhangf16a1752010-12-09 19:12:32 -08001642 do_gettimeofday(&tv);
Jing Huangba816ea2010-10-18 17:10:50 -07001643 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
Jing Huang7725ccf2009-09-23 17:46:15 -07001644 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1645}
1646
1647static void
1648bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1649{
1650 struct bfi_ioc_ctrl_req_s disable_req;
1651
1652 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1653 bfa_ioc_portid(ioc));
1654 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1655}
1656
1657static void
1658bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1659{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001660 struct bfi_ioc_getattr_req_s attr_req;
Jing Huang7725ccf2009-09-23 17:46:15 -07001661
1662 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1663 bfa_ioc_portid(ioc));
1664 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1665 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1666}
1667
1668static void
1669bfa_ioc_hb_check(void *cbarg)
1670{
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001671 struct bfa_ioc_s *ioc = cbarg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001672 u32 hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07001673
Jing Huang53440262010-10-18 17:12:29 -07001674 hb_count = readl(ioc->ioc_regs.heartbeat);
Jing Huang7725ccf2009-09-23 17:46:15 -07001675 if (ioc->hb_count == hb_count) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001676 bfa_ioc_recover(ioc);
1677 return;
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001678 } else {
1679 ioc->hb_count = hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07001680 }
1681
1682 bfa_ioc_mbox_poll(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001683 bfa_hb_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001684}
1685
1686static void
1687bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1688{
Jing Huang53440262010-10-18 17:12:29 -07001689 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001690 bfa_hb_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001691}
1692
Jing Huang5fbe25c2010-10-18 17:17:23 -07001693/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001694 * Initiate a full firmware download.
Jing Huang7725ccf2009-09-23 17:46:15 -07001695 */
1696static void
1697bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001698 u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07001699{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001700 u32 *fwimg;
1701 u32 pgnum, pgoff;
1702 u32 loff = 0;
1703 u32 chunkno = 0;
1704 u32 i;
Krishna Gudipati11189202011-06-13 15:50:35 -07001705 u32 asicmode;
Jing Huang7725ccf2009-09-23 17:46:15 -07001706
Jing Huang5fbe25c2010-10-18 17:17:23 -07001707 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001708 * Initialize LMEM first before code download
1709 */
1710 bfa_ioc_lmem_init(ioc);
1711
Krishna Gudipati11189202011-06-13 15:50:35 -07001712 bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1713 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
Jing Huang7725ccf2009-09-23 17:46:15 -07001714
Maggie Zhangf7f738122010-12-09 19:08:43 -08001715 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1716 pgoff = PSS_SMEM_PGOFF(loff);
Jing Huang7725ccf2009-09-23 17:46:15 -07001717
Jing Huang53440262010-10-18 17:12:29 -07001718 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001719
Krishna Gudipati11189202011-06-13 15:50:35 -07001720 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001721
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001722 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1723 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
Krishna Gudipati11189202011-06-13 15:50:35 -07001724 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001725 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
Jing Huang7725ccf2009-09-23 17:46:15 -07001726 }
1727
Jing Huang5fbe25c2010-10-18 17:17:23 -07001728 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001729 * write smem
1730 */
1731 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001732 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
Jing Huang7725ccf2009-09-23 17:46:15 -07001733
1734 loff += sizeof(u32);
1735
Jing Huang5fbe25c2010-10-18 17:17:23 -07001736 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001737 * handle page offset wrap around
1738 */
1739 loff = PSS_SMEM_PGOFF(loff);
1740 if (loff == 0) {
1741 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001742 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001743 }
1744 }
1745
Maggie Zhangf7f738122010-12-09 19:08:43 -08001746 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1747 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipati13cc20c2010-03-05 19:37:29 -08001748
1749 /*
Krishna Gudipati11189202011-06-13 15:50:35 -07001750 * Set boot type and device mode at the end.
1751 */
1752 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1753 ioc->port0_mode, ioc->port1_mode);
1754 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1755 swab32(asicmode));
1756 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
Jing Huang53440262010-10-18 17:12:29 -07001757 swab32(boot_type));
Krishna Gudipati11189202011-06-13 15:50:35 -07001758 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
Jing Huang53440262010-10-18 17:12:29 -07001759 swab32(boot_env));
Jing Huang7725ccf2009-09-23 17:46:15 -07001760}
1761
Jing Huang7725ccf2009-09-23 17:46:15 -07001762
Jing Huang5fbe25c2010-10-18 17:17:23 -07001763/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001764 * Update BFA configuration from firmware configuration.
1765 */
1766static void
1767bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1768{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001769 struct bfi_ioc_attr_s *attr = ioc->attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07001770
Jing Huangba816ea2010-10-18 17:10:50 -07001771 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1772 attr->card_type = be32_to_cpu(attr->card_type);
1773 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001774 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
Jing Huang7725ccf2009-09-23 17:46:15 -07001775
1776 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1777}
1778
Jing Huang5fbe25c2010-10-18 17:17:23 -07001779/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001780 * Attach time initialization of mbox logic.
1781 */
1782static void
1783bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1784{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001785 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1786 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07001787
1788 INIT_LIST_HEAD(&mod->cmd_q);
1789 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1790 mod->mbhdlr[mc].cbfn = NULL;
1791 mod->mbhdlr[mc].cbarg = ioc->bfa;
1792 }
1793}
1794
Jing Huang5fbe25c2010-10-18 17:17:23 -07001795/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001796 * Mbox poll timer -- restarts any pending mailbox requests.
1797 */
1798static void
1799bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1800{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001801 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1802 struct bfa_mbox_cmd_s *cmd;
1803 u32 stat;
Jing Huang7725ccf2009-09-23 17:46:15 -07001804
Jing Huang5fbe25c2010-10-18 17:17:23 -07001805 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001806 * If no command pending, do nothing
1807 */
1808 if (list_empty(&mod->cmd_q))
1809 return;
1810
Jing Huang5fbe25c2010-10-18 17:17:23 -07001811 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001812 * If previous command is not yet fetched by firmware, do nothing
1813 */
Jing Huang53440262010-10-18 17:12:29 -07001814 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001815 if (stat)
1816 return;
1817
Jing Huang5fbe25c2010-10-18 17:17:23 -07001818 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001819 * Enqueue command to firmware.
1820 */
1821 bfa_q_deq(&mod->cmd_q, &cmd);
1822 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1823}
1824
Jing Huang5fbe25c2010-10-18 17:17:23 -07001825/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001826 * Cleanup any pending requests.
1827 */
1828static void
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001829bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -07001830{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001831 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1832 struct bfa_mbox_cmd_s *cmd;
Jing Huang7725ccf2009-09-23 17:46:15 -07001833
1834 while (!list_empty(&mod->cmd_q))
1835 bfa_q_deq(&mod->cmd_q, &cmd);
1836}
1837
Jing Huang5fbe25c2010-10-18 17:17:23 -07001838/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001839 * Read data from SMEM to host through PCI memmap
1840 *
1841 * @param[in] ioc memory for IOC
1842 * @param[in] tbuf app memory to store data from smem
1843 * @param[in] soff smem offset
1844 * @param[in] sz size of smem in bytes
Jing Huang7725ccf2009-09-23 17:46:15 -07001845 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001846static bfa_status_t
1847bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1848{
Maggie50444a32010-11-29 18:26:32 -08001849 u32 pgnum, loff;
1850 __be32 r32;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001851 int i, len;
1852 u32 *buf = tbuf;
1853
Maggie Zhangf7f738122010-12-09 19:08:43 -08001854 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1855 loff = PSS_SMEM_PGOFF(soff);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001856 bfa_trc(ioc, pgnum);
1857 bfa_trc(ioc, loff);
1858 bfa_trc(ioc, sz);
1859
1860 /*
1861 * Hold semaphore to serialize pll init and fwtrc.
1862 */
1863 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1864 bfa_trc(ioc, 0);
1865 return BFA_STATUS_FAILED;
1866 }
1867
Jing Huang53440262010-10-18 17:12:29 -07001868 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001869
1870 len = sz/sizeof(u32);
1871 bfa_trc(ioc, len);
1872 for (i = 0; i < len; i++) {
1873 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
Jing Huangba816ea2010-10-18 17:10:50 -07001874 buf[i] = be32_to_cpu(r32);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001875 loff += sizeof(u32);
1876
Jing Huang5fbe25c2010-10-18 17:17:23 -07001877 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001878 * handle page offset wrap around
1879 */
1880 loff = PSS_SMEM_PGOFF(loff);
1881 if (loff == 0) {
1882 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001883 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001884 }
1885 }
Maggie Zhangf7f738122010-12-09 19:08:43 -08001886 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1887 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001888 /*
1889 * release semaphore.
1890 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001891 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001892 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001893
1894 bfa_trc(ioc, pgnum);
1895 return BFA_STATUS_OK;
1896}
1897
Jing Huang5fbe25c2010-10-18 17:17:23 -07001898/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001899 * Clear SMEM data from host through PCI memmap
1900 *
1901 * @param[in] ioc memory for IOC
1902 * @param[in] soff smem offset
1903 * @param[in] sz size of smem in bytes
1904 */
1905static bfa_status_t
1906bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1907{
1908 int i, len;
1909 u32 pgnum, loff;
1910
Maggie Zhangf7f738122010-12-09 19:08:43 -08001911 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1912 loff = PSS_SMEM_PGOFF(soff);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001913 bfa_trc(ioc, pgnum);
1914 bfa_trc(ioc, loff);
1915 bfa_trc(ioc, sz);
1916
1917 /*
1918 * Hold semaphore to serialize pll init and fwtrc.
1919 */
1920 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1921 bfa_trc(ioc, 0);
1922 return BFA_STATUS_FAILED;
1923 }
1924
Jing Huang53440262010-10-18 17:12:29 -07001925 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001926
1927 len = sz/sizeof(u32); /* len in words */
1928 bfa_trc(ioc, len);
1929 for (i = 0; i < len; i++) {
1930 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1931 loff += sizeof(u32);
1932
Jing Huang5fbe25c2010-10-18 17:17:23 -07001933 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001934 * handle page offset wrap around
1935 */
1936 loff = PSS_SMEM_PGOFF(loff);
1937 if (loff == 0) {
1938 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001939 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001940 }
1941 }
Maggie Zhangf7f738122010-12-09 19:08:43 -08001942 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1943 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001944
1945 /*
1946 * release semaphore.
1947 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001948 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001949 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001950 bfa_trc(ioc, pgnum);
1951 return BFA_STATUS_OK;
1952}
1953
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001954static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001955bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1956{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001957 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1958
Jing Huang8f4bfad2010-12-26 21:50:10 -08001959 /*
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001960 * Notify driver and common modules registered for notification.
1961 */
1962 ioc->cbfn->hbfail_cbfn(ioc->bfa);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001963 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001964
1965 bfa_ioc_debug_save_ftrc(ioc);
1966
1967 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1968 "Heart Beat of IOC has failed\n");
1969
1970}
1971
1972static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001973bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1974{
1975 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
Jing Huang5fbe25c2010-10-18 17:17:23 -07001976 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001977 * Provide enable completion callback.
1978 */
1979 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Jing Huang88166242010-12-09 17:11:53 -08001980 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001981 "Running firmware version is incompatible "
1982 "with the driver version\n");
1983}
1984
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001985bfa_status_t
1986bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1987{
1988
1989 /*
1990 * Hold semaphore so that nobody can access the chip during init.
1991 */
1992 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1993
1994 bfa_ioc_pll_init_asic(ioc);
1995
1996 ioc->pllinit = BFA_TRUE;
1997 /*
1998 * release semaphore.
1999 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002000 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f738122010-12-09 19:08:43 -08002001 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002002
2003 return BFA_STATUS_OK;
2004}
Jing Huang7725ccf2009-09-23 17:46:15 -07002005
Jing Huang5fbe25c2010-10-18 17:17:23 -07002006/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002007 * Interface used by diag module to do firmware boot with memory test
2008 * as the entry vector.
2009 */
2010void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002011bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07002012{
Jing Huang7725ccf2009-09-23 17:46:15 -07002013 bfa_ioc_stats(ioc, ioc_boots);
2014
2015 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2016 return;
2017
Jing Huang5fbe25c2010-10-18 17:17:23 -07002018 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002019 * Initialize IOC state of all functions on a chip reset.
2020 */
Krishna Gudipati11189202011-06-13 15:50:35 -07002021 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2022 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2023 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002024 } else {
Krishna Gudipati11189202011-06-13 15:50:35 -07002025 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2026 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002027 }
2028
Jing Huang07b28382010-07-08 19:59:24 -07002029 bfa_ioc_msgflush(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002030 bfa_ioc_download_fw(ioc, boot_type, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07002031 bfa_ioc_lpu_start(ioc);
2032}
2033
Jing Huang5fbe25c2010-10-18 17:17:23 -07002034/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002035 * Enable/disable IOC failure auto recovery.
2036 */
2037void
2038bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2039{
Krishna Gudipati2f9b8852010-03-03 17:42:51 -08002040 bfa_auto_recover = auto_recover;
Jing Huang7725ccf2009-09-23 17:46:15 -07002041}
2042
2043
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002044
Jing Huang7725ccf2009-09-23 17:46:15 -07002045bfa_boolean_t
2046bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2047{
2048 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2049}
2050
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002051bfa_boolean_t
2052bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2053{
Jing Huang53440262010-10-18 17:12:29 -07002054 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002055
2056 return ((r32 != BFI_IOC_UNINIT) &&
2057 (r32 != BFI_IOC_INITING) &&
2058 (r32 != BFI_IOC_MEMTEST));
2059}
2060
Krishna Gudipati11189202011-06-13 15:50:35 -07002061bfa_boolean_t
Jing Huang7725ccf2009-09-23 17:46:15 -07002062bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2063{
Maggie50444a32010-11-29 18:26:32 -08002064 __be32 *msgp = mbmsg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002065 u32 r32;
2066 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07002067
Krishna Gudipati11189202011-06-13 15:50:35 -07002068 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2069 if ((r32 & 1) == 0)
2070 return BFA_FALSE;
2071
Jing Huang5fbe25c2010-10-18 17:17:23 -07002072 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002073 * read the MBOX msg
2074 */
2075 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2076 i++) {
Jing Huang53440262010-10-18 17:12:29 -07002077 r32 = readl(ioc->ioc_regs.lpu_mbox +
Jing Huang7725ccf2009-09-23 17:46:15 -07002078 i * sizeof(u32));
Jing Huangba816ea2010-10-18 17:10:50 -07002079 msgp[i] = cpu_to_be32(r32);
Jing Huang7725ccf2009-09-23 17:46:15 -07002080 }
2081
Jing Huang5fbe25c2010-10-18 17:17:23 -07002082 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002083 * turn off mailbox interrupt by clearing mailbox status
2084 */
Jing Huang53440262010-10-18 17:12:29 -07002085 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2086 readl(ioc->ioc_regs.lpu_mbox_cmd);
Krishna Gudipati11189202011-06-13 15:50:35 -07002087
2088 return BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -07002089}
2090
2091void
2092bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2093{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002094 union bfi_ioc_i2h_msg_u *msg;
2095 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
Jing Huang7725ccf2009-09-23 17:46:15 -07002096
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002097 msg = (union bfi_ioc_i2h_msg_u *) m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002098
2099 bfa_ioc_stats(ioc, ioc_isrs);
2100
2101 switch (msg->mh.msg_id) {
2102 case BFI_IOC_I2H_HBEAT:
2103 break;
2104
Jing Huang7725ccf2009-09-23 17:46:15 -07002105 case BFI_IOC_I2H_ENABLE_REPLY:
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002106 ioc->port_mode = ioc->port_mode_cfg =
2107 (enum bfa_mode_s)msg->fw_event.port_mode;
2108 ioc->ad_cap_bm = msg->fw_event.cap_bm;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002109 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -07002110 break;
2111
2112 case BFI_IOC_I2H_DISABLE_REPLY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002113 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -07002114 break;
2115
2116 case BFI_IOC_I2H_GETATTR_REPLY:
2117 bfa_ioc_getattr_reply(ioc);
2118 break;
2119
Krishna Gudipatia7141342011-06-24 20:23:19 -07002120 case BFI_IOC_I2H_ACQ_ADDR_REPLY:
2121 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
2122 break;
2123
Jing Huang7725ccf2009-09-23 17:46:15 -07002124 default:
2125 bfa_trc(ioc, msg->mh.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08002126 WARN_ON(1);
Jing Huang7725ccf2009-09-23 17:46:15 -07002127 }
2128}
2129
Jing Huang5fbe25c2010-10-18 17:17:23 -07002130/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002131 * IOC attach time initialization and setup.
2132 *
2133 * @param[in] ioc memory for IOC
2134 * @param[in] bfa driver instance structure
Jing Huang7725ccf2009-09-23 17:46:15 -07002135 */
2136void
2137bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002138 struct bfa_timer_mod_s *timer_mod)
Jing Huang7725ccf2009-09-23 17:46:15 -07002139{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002140 ioc->bfa = bfa;
2141 ioc->cbfn = cbfn;
2142 ioc->timer_mod = timer_mod;
2143 ioc->fcmode = BFA_FALSE;
2144 ioc->pllinit = BFA_FALSE;
Jing Huang7725ccf2009-09-23 17:46:15 -07002145 ioc->dbg_fwsave_once = BFA_TRUE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002146 ioc->iocpf.ioc = ioc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002147
2148 bfa_ioc_mbox_attach(ioc);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002149 INIT_LIST_HEAD(&ioc->notify_q);
Jing Huang7725ccf2009-09-23 17:46:15 -07002150
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002151 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2152 bfa_fsm_send_event(ioc, IOC_E_RESET);
Jing Huang7725ccf2009-09-23 17:46:15 -07002153}
2154
Jing Huang5fbe25c2010-10-18 17:17:23 -07002155/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002156 * Driver detach time IOC cleanup.
2157 */
2158void
2159bfa_ioc_detach(struct bfa_ioc_s *ioc)
2160{
2161 bfa_fsm_send_event(ioc, IOC_E_DETACH);
Krishna Gudipati3350d982011-06-24 20:28:37 -07002162 INIT_LIST_HEAD(&ioc->notify_q);
Jing Huang7725ccf2009-09-23 17:46:15 -07002163}
2164
Jing Huang5fbe25c2010-10-18 17:17:23 -07002165/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002166 * Setup IOC PCI properties.
2167 *
2168 * @param[in] pcidev PCI device information for this IOC
2169 */
2170void
2171bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002172 enum bfi_pcifn_class clscode)
Jing Huang7725ccf2009-09-23 17:46:15 -07002173{
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002174 ioc->clscode = clscode;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002175 ioc->pcidev = *pcidev;
Krishna Gudipati11189202011-06-13 15:50:35 -07002176
2177 /*
2178 * Initialize IOC and device personality
2179 */
2180 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2181 ioc->asic_mode = BFI_ASIC_MODE_FC;
2182
2183 switch (pcidev->device_id) {
2184 case BFA_PCI_DEVICE_ID_FC_8G1P:
2185 case BFA_PCI_DEVICE_ID_FC_8G2P:
2186 ioc->asic_gen = BFI_ASIC_GEN_CB;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002187 ioc->fcmode = BFA_TRUE;
2188 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2189 ioc->ad_cap_bm = BFA_CM_HBA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002190 break;
2191
2192 case BFA_PCI_DEVICE_ID_CT:
2193 ioc->asic_gen = BFI_ASIC_GEN_CT;
2194 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2195 ioc->asic_mode = BFI_ASIC_MODE_ETH;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002196 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2197 ioc->ad_cap_bm = BFA_CM_CNA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002198 break;
2199
2200 case BFA_PCI_DEVICE_ID_CT_FC:
2201 ioc->asic_gen = BFI_ASIC_GEN_CT;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002202 ioc->fcmode = BFA_TRUE;
2203 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2204 ioc->ad_cap_bm = BFA_CM_HBA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002205 break;
2206
2207 case BFA_PCI_DEVICE_ID_CT2:
2208 ioc->asic_gen = BFI_ASIC_GEN_CT2;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002209 if (clscode == BFI_PCIFN_CLASS_FC &&
2210 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
Krishna Gudipati11189202011-06-13 15:50:35 -07002211 ioc->asic_mode = BFI_ASIC_MODE_FC16;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002212 ioc->fcmode = BFA_TRUE;
2213 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2214 ioc->ad_cap_bm = BFA_CM_HBA;
2215 } else {
Krishna Gudipati11189202011-06-13 15:50:35 -07002216 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002217 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2218 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2219 ioc->port_mode =
2220 ioc->port_mode_cfg = BFA_MODE_CNA;
2221 ioc->ad_cap_bm = BFA_CM_CNA;
2222 } else {
2223 ioc->port_mode =
2224 ioc->port_mode_cfg = BFA_MODE_NIC;
2225 ioc->ad_cap_bm = BFA_CM_NIC;
2226 }
Krishna Gudipati11189202011-06-13 15:50:35 -07002227 }
2228 break;
2229
2230 default:
2231 WARN_ON(1);
2232 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002233
Jing Huang5fbe25c2010-10-18 17:17:23 -07002234 /*
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002235 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2236 */
Krishna Gudipati11189202011-06-13 15:50:35 -07002237 if (ioc->asic_gen == BFI_ASIC_GEN_CB)
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002238 bfa_ioc_set_cb_hwif(ioc);
Krishna Gudipati11189202011-06-13 15:50:35 -07002239 else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2240 bfa_ioc_set_ct_hwif(ioc);
2241 else {
2242 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2243 bfa_ioc_set_ct2_hwif(ioc);
2244 bfa_ioc_ct2_poweron(ioc);
2245 }
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002246
Jing Huang7725ccf2009-09-23 17:46:15 -07002247 bfa_ioc_map_port(ioc);
2248 bfa_ioc_reg_init(ioc);
2249}
2250
Jing Huang5fbe25c2010-10-18 17:17:23 -07002251/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002252 * Initialize IOC dma memory
2253 *
2254 * @param[in] dm_kva kernel virtual address of IOC dma memory
2255 * @param[in] dm_pa physical address of IOC dma memory
2256 */
2257void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002258bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
Jing Huang7725ccf2009-09-23 17:46:15 -07002259{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002260 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002261 * dma memory for firmware attribute
2262 */
2263 ioc->attr_dma.kva = dm_kva;
2264 ioc->attr_dma.pa = dm_pa;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002265 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
Jing Huang7725ccf2009-09-23 17:46:15 -07002266}
2267
Jing Huang7725ccf2009-09-23 17:46:15 -07002268void
2269bfa_ioc_enable(struct bfa_ioc_s *ioc)
2270{
2271 bfa_ioc_stats(ioc, ioc_enables);
2272 ioc->dbg_fwsave_once = BFA_TRUE;
2273
2274 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2275}
2276
2277void
2278bfa_ioc_disable(struct bfa_ioc_s *ioc)
2279{
2280 bfa_ioc_stats(ioc, ioc_disables);
2281 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2282}
2283
Jing Huang7725ccf2009-09-23 17:46:15 -07002284
Jing Huang5fbe25c2010-10-18 17:17:23 -07002285/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002286 * Initialize memory for saving firmware trace. Driver must initialize
2287 * trace memory before call bfa_ioc_enable().
2288 */
2289void
2290bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2291{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002292 ioc->dbg_fwsave = dbg_fwsave;
Maggie Zhangf7f738122010-12-09 19:08:43 -08002293 ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07002294}
2295
Jing Huang5fbe25c2010-10-18 17:17:23 -07002296/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002297 * Register mailbox message handler functions
2298 *
2299 * @param[in] ioc IOC instance
2300 * @param[in] mcfuncs message class handler functions
2301 */
2302void
2303bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2304{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002305 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2306 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002307
2308 for (mc = 0; mc < BFI_MC_MAX; mc++)
2309 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2310}
2311
Jing Huang5fbe25c2010-10-18 17:17:23 -07002312/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002313 * Register mailbox message handler function, to be called by common modules
2314 */
2315void
2316bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2317 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2318{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002319 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
Jing Huang7725ccf2009-09-23 17:46:15 -07002320
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002321 mod->mbhdlr[mc].cbfn = cbfn;
2322 mod->mbhdlr[mc].cbarg = cbarg;
Jing Huang7725ccf2009-09-23 17:46:15 -07002323}
2324
Jing Huang5fbe25c2010-10-18 17:17:23 -07002325/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002326 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2327 * Responsibility of caller to serialize
2328 *
2329 * @param[in] ioc IOC instance
2330 * @param[i] cmd Mailbox command
2331 */
2332void
2333bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2334{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002335 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2336 u32 stat;
Jing Huang7725ccf2009-09-23 17:46:15 -07002337
Jing Huang5fbe25c2010-10-18 17:17:23 -07002338 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002339 * If a previous command is pending, queue new command
2340 */
2341 if (!list_empty(&mod->cmd_q)) {
2342 list_add_tail(&cmd->qe, &mod->cmd_q);
2343 return;
2344 }
2345
Jing Huang5fbe25c2010-10-18 17:17:23 -07002346 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002347 * If mailbox is busy, queue command for poll timer
2348 */
Jing Huang53440262010-10-18 17:12:29 -07002349 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07002350 if (stat) {
2351 list_add_tail(&cmd->qe, &mod->cmd_q);
2352 return;
2353 }
2354
Jing Huang5fbe25c2010-10-18 17:17:23 -07002355 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002356 * mailbox is free -- queue command to firmware
2357 */
2358 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2359}
2360
Jing Huang5fbe25c2010-10-18 17:17:23 -07002361/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002362 * Handle mailbox interrupts
2363 */
2364void
2365bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2366{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002367 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2368 struct bfi_mbmsg_s m;
2369 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002370
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002371 if (bfa_ioc_msgget(ioc, &m)) {
2372 /*
2373 * Treat IOC message class as special.
2374 */
2375 mc = m.mh.msg_class;
2376 if (mc == BFI_MC_IOC) {
2377 bfa_ioc_isr(ioc, &m);
2378 return;
2379 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002380
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002381 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2382 return;
2383
2384 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
Jing Huang7725ccf2009-09-23 17:46:15 -07002385 }
2386
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002387 bfa_ioc_lpu_read_stat(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002388
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002389 /*
2390 * Try to send pending mailbox commands
2391 */
2392 bfa_ioc_mbox_poll(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002393}
2394
2395void
2396bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2397{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002398 bfa_ioc_stats(ioc, ioc_hbfails);
2399 ioc->stats.hb_count = ioc->hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07002400 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2401}
2402
Jing Huang5fbe25c2010-10-18 17:17:23 -07002403/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002404 * return true if IOC is disabled
2405 */
2406bfa_boolean_t
2407bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2408{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002409 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2410 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
Jing Huang7725ccf2009-09-23 17:46:15 -07002411}
2412
Jing Huang5fbe25c2010-10-18 17:17:23 -07002413/*
Krishna Gudipatia7141342011-06-24 20:23:19 -07002414 * Return TRUE if IOC is in acquiring address state
2415 */
2416bfa_boolean_t
2417bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
2418{
2419 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
2420}
2421
2422/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002423 * return true if IOC firmware is different.
2424 */
2425bfa_boolean_t
2426bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2427{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002428 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2429 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2430 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
Jing Huang7725ccf2009-09-23 17:46:15 -07002431}
2432
2433#define bfa_ioc_state_disabled(__sm) \
2434 (((__sm) == BFI_IOC_UNINIT) || \
2435 ((__sm) == BFI_IOC_INITING) || \
2436 ((__sm) == BFI_IOC_HWINIT) || \
2437 ((__sm) == BFI_IOC_DISABLED) || \
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002438 ((__sm) == BFI_IOC_FAIL) || \
Jing Huang7725ccf2009-09-23 17:46:15 -07002439 ((__sm) == BFI_IOC_CFG_DISABLED))
2440
Jing Huang5fbe25c2010-10-18 17:17:23 -07002441/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002442 * Check if adapter is disabled -- both IOCs should be in a disabled
2443 * state.
2444 */
2445bfa_boolean_t
2446bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2447{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002448 u32 ioc_state;
Jing Huang7725ccf2009-09-23 17:46:15 -07002449
2450 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2451 return BFA_FALSE;
2452
Krishna Gudipati11189202011-06-13 15:50:35 -07002453 ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002454 if (!bfa_ioc_state_disabled(ioc_state))
2455 return BFA_FALSE;
2456
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002457 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
Krishna Gudipati11189202011-06-13 15:50:35 -07002458 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002459 if (!bfa_ioc_state_disabled(ioc_state))
2460 return BFA_FALSE;
2461 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002462
2463 return BFA_TRUE;
2464}
2465
Jing Huang8f4bfad2010-12-26 21:50:10 -08002466/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08002467 * Reset IOC fwstate registers.
2468 */
2469void
2470bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2471{
2472 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2473 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2474}
2475
Jing Huang7725ccf2009-09-23 17:46:15 -07002476#define BFA_MFG_NAME "Brocade"
2477void
2478bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2479 struct bfa_adapter_attr_s *ad_attr)
2480{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002481 struct bfi_ioc_attr_s *ioc_attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07002482
2483 ioc_attr = ioc->attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07002484
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002485 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2486 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2487 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2488 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
Jing Huang6a18b162010-10-18 17:08:54 -07002489 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
Jing Huang7725ccf2009-09-23 17:46:15 -07002490 sizeof(struct bfa_mfg_vpd_s));
2491
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002492 ad_attr->nports = bfa_ioc_get_nports(ioc);
2493 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002494
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002495 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2496 /* For now, model descr uses same model string */
2497 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
Jing Huang7725ccf2009-09-23 17:46:15 -07002498
Jing Huanged969322010-07-08 19:45:56 -07002499 ad_attr->card_type = ioc_attr->card_type;
2500 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2501
Jing Huang7725ccf2009-09-23 17:46:15 -07002502 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2503 ad_attr->prototype = 1;
2504 else
2505 ad_attr->prototype = 0;
2506
Maggie Zhangf7f738122010-12-09 19:08:43 -08002507 ad_attr->pwwn = ioc->attr->pwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002508 ad_attr->mac = bfa_ioc_get_mac(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002509
2510 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2511 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2512 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2513 ad_attr->asic_rev = ioc_attr->asic_rev;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002514
2515 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
Jing Huang7725ccf2009-09-23 17:46:15 -07002516
Krishna Gudipati11189202011-06-13 15:50:35 -07002517 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2518 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2519 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
Jing Huang7725ccf2009-09-23 17:46:15 -07002520}
2521
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002522enum bfa_ioc_type_e
2523bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2524{
Krishna Gudipati11189202011-06-13 15:50:35 -07002525 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002526 return BFA_IOC_TYPE_LL;
Krishna Gudipati11189202011-06-13 15:50:35 -07002527
2528 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2529
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002530 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
Krishna Gudipati11189202011-06-13 15:50:35 -07002531 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002532}
2533
Jing Huang7725ccf2009-09-23 17:46:15 -07002534void
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002535bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2536{
Jing Huang6a18b162010-10-18 17:08:54 -07002537 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2538 memcpy((void *)serial_num,
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002539 (void *)ioc->attr->brcd_serialnum,
2540 BFA_ADAPTER_SERIAL_NUM_LEN);
2541}
2542
2543void
2544bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2545{
Jing Huang6a18b162010-10-18 17:08:54 -07002546 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2547 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002548}
2549
2550void
2551bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2552{
Jing Huangd4b671c2010-12-26 21:46:35 -08002553 WARN_ON(!chip_rev);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002554
Jing Huang6a18b162010-10-18 17:08:54 -07002555 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002556
2557 chip_rev[0] = 'R';
2558 chip_rev[1] = 'e';
2559 chip_rev[2] = 'v';
2560 chip_rev[3] = '-';
2561 chip_rev[4] = ioc->attr->asic_rev;
2562 chip_rev[5] = '\0';
2563}
2564
2565void
2566bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2567{
Jing Huang6a18b162010-10-18 17:08:54 -07002568 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2569 memcpy(optrom_ver, ioc->attr->optrom_version,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002570 BFA_VERSION_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002571}
2572
2573void
2574bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2575{
Jing Huang6a18b162010-10-18 17:08:54 -07002576 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2577 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002578}
2579
2580void
2581bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2582{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002583 struct bfi_ioc_attr_s *ioc_attr;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002584
Jing Huangd4b671c2010-12-26 21:46:35 -08002585 WARN_ON(!model);
Jing Huang6a18b162010-10-18 17:08:54 -07002586 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002587
2588 ioc_attr = ioc->attr;
2589
Krishna Gudipati10a07372011-06-24 20:23:38 -07002590 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002591 BFA_MFG_NAME, ioc_attr->card_type);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002592}
2593
2594enum bfa_ioc_state
2595bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2596{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002597 enum bfa_iocpf_state iocpf_st;
2598 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2599
2600 if (ioc_st == BFA_IOC_ENABLING ||
2601 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2602
2603 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2604
2605 switch (iocpf_st) {
2606 case BFA_IOCPF_SEMWAIT:
2607 ioc_st = BFA_IOC_SEMWAIT;
2608 break;
2609
2610 case BFA_IOCPF_HWINIT:
2611 ioc_st = BFA_IOC_HWINIT;
2612 break;
2613
2614 case BFA_IOCPF_FWMISMATCH:
2615 ioc_st = BFA_IOC_FWMISMATCH;
2616 break;
2617
2618 case BFA_IOCPF_FAIL:
2619 ioc_st = BFA_IOC_FAIL;
2620 break;
2621
2622 case BFA_IOCPF_INITFAIL:
2623 ioc_st = BFA_IOC_INITFAIL;
2624 break;
2625
2626 default:
2627 break;
2628 }
2629 }
2630
2631 return ioc_st;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002632}
2633
2634void
Jing Huang7725ccf2009-09-23 17:46:15 -07002635bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2636{
Jing Huang6a18b162010-10-18 17:08:54 -07002637 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
Jing Huang7725ccf2009-09-23 17:46:15 -07002638
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002639 ioc_attr->state = bfa_ioc_get_state(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002640 ioc_attr->port_id = ioc->port_id;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002641 ioc_attr->port_mode = ioc->port_mode;
2642 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2643 ioc_attr->cap_bm = ioc->ad_cap_bm;
Jing Huang7725ccf2009-09-23 17:46:15 -07002644
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002645 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002646
2647 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2648
2649 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2650 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002651 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
Jing Huang7725ccf2009-09-23 17:46:15 -07002652}
2653
Jing Huang7725ccf2009-09-23 17:46:15 -07002654mac_t
2655bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2656{
Jing Huang15b64a82010-07-08 19:48:12 -07002657 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002658 * Check the IOC type and return the appropriate MAC
Jing Huang15b64a82010-07-08 19:48:12 -07002659 */
2660 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002661 return ioc->attr->fcoe_mac;
Jing Huang15b64a82010-07-08 19:48:12 -07002662 else
2663 return ioc->attr->mac;
2664}
2665
Jing Huang15b64a82010-07-08 19:48:12 -07002666mac_t
2667bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2668{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002669 mac_t m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002670
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002671 m = ioc->attr->mfg_mac;
2672 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2673 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2674 else
2675 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2676 bfa_ioc_pcifn(ioc));
Jing Huang7725ccf2009-09-23 17:46:15 -07002677
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002678 return m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002679}
2680
Jing Huang5fbe25c2010-10-18 17:17:23 -07002681/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002682 * Retrieve saved firmware trace from a prior IOC failure.
2683 */
2684bfa_status_t
2685bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2686{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002687 int tlen;
Jing Huang7725ccf2009-09-23 17:46:15 -07002688
2689 if (ioc->dbg_fwsave_len == 0)
2690 return BFA_STATUS_ENOFSAVE;
2691
2692 tlen = *trclen;
2693 if (tlen > ioc->dbg_fwsave_len)
2694 tlen = ioc->dbg_fwsave_len;
2695
Jing Huang6a18b162010-10-18 17:08:54 -07002696 memcpy(trcdata, ioc->dbg_fwsave, tlen);
Jing Huang7725ccf2009-09-23 17:46:15 -07002697 *trclen = tlen;
2698 return BFA_STATUS_OK;
2699}
2700
Krishna Gudipati738c9e62010-03-05 19:36:19 -08002701
Jing Huang5fbe25c2010-10-18 17:17:23 -07002702/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002703 * Retrieve saved firmware trace from a prior IOC failure.
2704 */
2705bfa_status_t
2706bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2707{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002708 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2709 int tlen;
2710 bfa_status_t status;
Jing Huang7725ccf2009-09-23 17:46:15 -07002711
2712 bfa_trc(ioc, *trclen);
2713
Jing Huang7725ccf2009-09-23 17:46:15 -07002714 tlen = *trclen;
2715 if (tlen > BFA_DBG_FWTRC_LEN)
2716 tlen = BFA_DBG_FWTRC_LEN;
Jing Huang7725ccf2009-09-23 17:46:15 -07002717
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002718 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2719 *trclen = tlen;
2720 return status;
2721}
Jing Huang7725ccf2009-09-23 17:46:15 -07002722
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002723static void
2724bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2725{
2726 struct bfa_mbox_cmd_s cmd;
2727 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
Jing Huang7725ccf2009-09-23 17:46:15 -07002728
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002729 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2730 bfa_ioc_portid(ioc));
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002731 req->clscode = cpu_to_be16(ioc->clscode);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002732 bfa_ioc_mbox_queue(ioc, &cmd);
2733}
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002734
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002735static void
2736bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2737{
2738 u32 fwsync_iter = 1000;
2739
2740 bfa_ioc_send_fwsync(ioc);
2741
Jing Huang5fbe25c2010-10-18 17:17:23 -07002742 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002743 * After sending a fw sync mbox command wait for it to
2744 * take effect. We will not wait for a response because
2745 * 1. fw_sync mbox cmd doesn't have a response.
2746 * 2. Even if we implement that, interrupts might not
2747 * be enabled when we call this function.
2748 * So, just keep checking if any mbox cmd is pending, and
2749 * after waiting for a reasonable amount of time, go ahead.
2750 * It is possible that fw has crashed and the mbox command
2751 * is never acknowledged.
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002752 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002753 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2754 fwsync_iter--;
2755}
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002756
Jing Huang5fbe25c2010-10-18 17:17:23 -07002757/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002758 * Dump firmware smem
2759 */
2760bfa_status_t
2761bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2762 u32 *offset, int *buflen)
2763{
2764 u32 loff;
2765 int dlen;
2766 bfa_status_t status;
2767 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002768
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002769 if (*offset >= smem_len) {
2770 *offset = *buflen = 0;
2771 return BFA_STATUS_EINVAL;
2772 }
2773
2774 loff = *offset;
2775 dlen = *buflen;
2776
Jing Huang5fbe25c2010-10-18 17:17:23 -07002777 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002778 * First smem read, sync smem before proceeding
2779 * No need to sync before reading every chunk.
2780 */
2781 if (loff == 0)
2782 bfa_ioc_fwsync(ioc);
2783
2784 if ((loff + dlen) >= smem_len)
2785 dlen = smem_len - loff;
2786
2787 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2788
2789 if (status != BFA_STATUS_OK) {
2790 *offset = *buflen = 0;
2791 return status;
2792 }
2793
2794 *offset += dlen;
2795
2796 if (*offset >= smem_len)
2797 *offset = 0;
2798
2799 *buflen = dlen;
2800
2801 return status;
2802}
2803
Jing Huang5fbe25c2010-10-18 17:17:23 -07002804/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002805 * Firmware statistics
2806 */
2807bfa_status_t
2808bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2809{
2810 u32 loff = BFI_IOC_FWSTATS_OFF + \
2811 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2812 int tlen;
2813 bfa_status_t status;
2814
2815 if (ioc->stats_busy) {
2816 bfa_trc(ioc, ioc->stats_busy);
2817 return BFA_STATUS_DEVBUSY;
2818 }
2819 ioc->stats_busy = BFA_TRUE;
2820
2821 tlen = sizeof(struct bfa_fw_stats_s);
2822 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2823
2824 ioc->stats_busy = BFA_FALSE;
2825 return status;
2826}
2827
2828bfa_status_t
2829bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2830{
2831 u32 loff = BFI_IOC_FWSTATS_OFF + \
2832 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2833 int tlen;
2834 bfa_status_t status;
2835
2836 if (ioc->stats_busy) {
2837 bfa_trc(ioc, ioc->stats_busy);
2838 return BFA_STATUS_DEVBUSY;
2839 }
2840 ioc->stats_busy = BFA_TRUE;
2841
2842 tlen = sizeof(struct bfa_fw_stats_s);
2843 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2844
2845 ioc->stats_busy = BFA_FALSE;
2846 return status;
Jing Huang7725ccf2009-09-23 17:46:15 -07002847}
2848
Jing Huang5fbe25c2010-10-18 17:17:23 -07002849/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002850 * Save firmware trace if configured.
2851 */
2852static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08002853bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -07002854{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002855 int tlen;
Jing Huang7725ccf2009-09-23 17:46:15 -07002856
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08002857 if (ioc->dbg_fwsave_once) {
2858 ioc->dbg_fwsave_once = BFA_FALSE;
2859 if (ioc->dbg_fwsave_len) {
2860 tlen = ioc->dbg_fwsave_len;
2861 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2862 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002863 }
2864}
2865
Jing Huang5fbe25c2010-10-18 17:17:23 -07002866/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002867 * Firmware failure detected. Start recovery actions.
2868 */
2869static void
2870bfa_ioc_recover(struct bfa_ioc_s *ioc)
2871{
Jing Huang7725ccf2009-09-23 17:46:15 -07002872 bfa_ioc_stats(ioc, ioc_hbfails);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002873 ioc->stats.hb_count = ioc->hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07002874 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2875}
2876
Jing Huang7725ccf2009-09-23 17:46:15 -07002877static void
Jing Huang07b28382010-07-08 19:59:24 -07002878bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -07002879{
Jing Huang07b28382010-07-08 19:59:24 -07002880 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2881 return;
Jing Huang7725ccf2009-09-23 17:46:15 -07002882}
2883
Jing Huang5fbe25c2010-10-18 17:17:23 -07002884/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -08002885 * BFA IOC PF private functions
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002886 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002887static void
2888bfa_iocpf_timeout(void *ioc_arg)
2889{
2890 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2891
2892 bfa_trc(ioc, 0);
2893 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2894}
2895
2896static void
2897bfa_iocpf_sem_timeout(void *ioc_arg)
2898{
2899 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2900
2901 bfa_ioc_hw_sem_get(ioc);
2902}
2903
Krishna Gudipati775c7742011-06-13 15:52:12 -07002904static void
2905bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2906{
2907 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2908
2909 bfa_trc(ioc, fwstate);
2910
2911 if (fwstate == BFI_IOC_DISABLED) {
2912 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2913 return;
2914 }
2915
2916 if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2917 bfa_iocpf_timeout(ioc);
2918 else {
2919 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2920 bfa_iocpf_poll_timer_start(ioc);
2921 }
2922}
2923
2924static void
2925bfa_iocpf_poll_timeout(void *ioc_arg)
2926{
2927 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2928
2929 bfa_ioc_poll_fwinit(ioc);
2930}
2931
Jing Huang5fbe25c2010-10-18 17:17:23 -07002932/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002933 * bfa timer function
2934 */
2935void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002936bfa_timer_beat(struct bfa_timer_mod_s *mod)
2937{
2938 struct list_head *qh = &mod->timer_q;
2939 struct list_head *qe, *qe_next;
2940 struct bfa_timer_s *elem;
2941 struct list_head timedout_q;
2942
2943 INIT_LIST_HEAD(&timedout_q);
2944
2945 qe = bfa_q_next(qh);
2946
2947 while (qe != qh) {
2948 qe_next = bfa_q_next(qe);
2949
2950 elem = (struct bfa_timer_s *) qe;
2951 if (elem->timeout <= BFA_TIMER_FREQ) {
2952 elem->timeout = 0;
2953 list_del(&elem->qe);
2954 list_add_tail(&elem->qe, &timedout_q);
2955 } else {
2956 elem->timeout -= BFA_TIMER_FREQ;
2957 }
2958
2959 qe = qe_next; /* go to next elem */
2960 }
2961
2962 /*
2963 * Pop all the timeout entries
2964 */
2965 while (!list_empty(&timedout_q)) {
2966 bfa_q_deq(&timedout_q, &elem);
2967 elem->timercb(elem->arg);
2968 }
2969}
2970
Jing Huang5fbe25c2010-10-18 17:17:23 -07002971/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002972 * Should be called with lock protection
2973 */
2974void
2975bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2976 void (*timercb) (void *), void *arg, unsigned int timeout)
2977{
2978
Jing Huangd4b671c2010-12-26 21:46:35 -08002979 WARN_ON(timercb == NULL);
2980 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002981
2982 timer->timeout = timeout;
2983 timer->timercb = timercb;
2984 timer->arg = arg;
2985
2986 list_add_tail(&timer->qe, &mod->timer_q);
2987}
2988
Jing Huang5fbe25c2010-10-18 17:17:23 -07002989/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002990 * Should be called with lock protection
2991 */
2992void
2993bfa_timer_stop(struct bfa_timer_s *timer)
2994{
Jing Huangd4b671c2010-12-26 21:46:35 -08002995 WARN_ON(list_empty(&timer->qe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002996
2997 list_del(&timer->qe);
2998}
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002999
3000/*
3001 * ASIC block related
3002 */
3003static void
3004bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3005{
3006 struct bfa_ablk_cfg_inst_s *cfg_inst;
3007 int i, j;
3008 u16 be16;
3009 u32 be32;
3010
3011 for (i = 0; i < BFA_ABLK_MAX; i++) {
3012 cfg_inst = &cfg->inst[i];
3013 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3014 be16 = cfg_inst->pf_cfg[j].pers;
3015 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3016 be16 = cfg_inst->pf_cfg[j].num_qpairs;
3017 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3018 be16 = cfg_inst->pf_cfg[j].num_vectors;
3019 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3020 be32 = cfg_inst->pf_cfg[j].bw;
3021 cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3022 }
3023 }
3024}
3025
3026static void
3027bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3028{
3029 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3030 struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3031 bfa_ablk_cbfn_t cbfn;
3032
3033 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3034 bfa_trc(ablk->ioc, msg->mh.msg_id);
3035
3036 switch (msg->mh.msg_id) {
3037 case BFI_ABLK_I2H_QUERY:
3038 if (rsp->status == BFA_STATUS_OK) {
3039 memcpy(ablk->cfg, ablk->dma_addr.kva,
3040 sizeof(struct bfa_ablk_cfg_s));
3041 bfa_ablk_config_swap(ablk->cfg);
3042 ablk->cfg = NULL;
3043 }
3044 break;
3045
3046 case BFI_ABLK_I2H_ADPT_CONFIG:
3047 case BFI_ABLK_I2H_PORT_CONFIG:
3048 /* update config port mode */
3049 ablk->ioc->port_mode_cfg = rsp->port_mode;
3050
3051 case BFI_ABLK_I2H_PF_DELETE:
3052 case BFI_ABLK_I2H_PF_UPDATE:
3053 case BFI_ABLK_I2H_OPTROM_ENABLE:
3054 case BFI_ABLK_I2H_OPTROM_DISABLE:
3055 /* No-op */
3056 break;
3057
3058 case BFI_ABLK_I2H_PF_CREATE:
3059 *(ablk->pcifn) = rsp->pcifn;
3060 ablk->pcifn = NULL;
3061 break;
3062
3063 default:
3064 WARN_ON(1);
3065 }
3066
3067 ablk->busy = BFA_FALSE;
3068 if (ablk->cbfn) {
3069 cbfn = ablk->cbfn;
3070 ablk->cbfn = NULL;
3071 cbfn(ablk->cbarg, rsp->status);
3072 }
3073}
3074
3075static void
3076bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3077{
3078 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3079
3080 bfa_trc(ablk->ioc, event);
3081
3082 switch (event) {
3083 case BFA_IOC_E_ENABLED:
3084 WARN_ON(ablk->busy != BFA_FALSE);
3085 break;
3086
3087 case BFA_IOC_E_DISABLED:
3088 case BFA_IOC_E_FAILED:
3089 /* Fail any pending requests */
3090 ablk->pcifn = NULL;
3091 if (ablk->busy) {
3092 if (ablk->cbfn)
3093 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3094 ablk->cbfn = NULL;
3095 ablk->busy = BFA_FALSE;
3096 }
3097 break;
3098
3099 default:
3100 WARN_ON(1);
3101 break;
3102 }
3103}
3104
3105u32
3106bfa_ablk_meminfo(void)
3107{
3108 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3109}
3110
3111void
3112bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3113{
3114 ablk->dma_addr.kva = dma_kva;
3115 ablk->dma_addr.pa = dma_pa;
3116}
3117
3118void
3119bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3120{
3121 ablk->ioc = ioc;
3122
3123 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
Krishna Gudipati3350d982011-06-24 20:28:37 -07003124 bfa_q_qe_init(&ablk->ioc_notify);
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07003125 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3126 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3127}
3128
3129bfa_status_t
3130bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3131 bfa_ablk_cbfn_t cbfn, void *cbarg)
3132{
3133 struct bfi_ablk_h2i_query_s *m;
3134
3135 WARN_ON(!ablk_cfg);
3136
3137 if (!bfa_ioc_is_operational(ablk->ioc)) {
3138 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3139 return BFA_STATUS_IOC_FAILURE;
3140 }
3141
3142 if (ablk->busy) {
3143 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3144 return BFA_STATUS_DEVBUSY;
3145 }
3146
3147 ablk->cfg = ablk_cfg;
3148 ablk->cbfn = cbfn;
3149 ablk->cbarg = cbarg;
3150 ablk->busy = BFA_TRUE;
3151
3152 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3153 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3154 bfa_ioc_portid(ablk->ioc));
3155 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3156 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3157
3158 return BFA_STATUS_OK;
3159}
3160
3161bfa_status_t
3162bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3163 u8 port, enum bfi_pcifn_class personality, int bw,
3164 bfa_ablk_cbfn_t cbfn, void *cbarg)
3165{
3166 struct bfi_ablk_h2i_pf_req_s *m;
3167
3168 if (!bfa_ioc_is_operational(ablk->ioc)) {
3169 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3170 return BFA_STATUS_IOC_FAILURE;
3171 }
3172
3173 if (ablk->busy) {
3174 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3175 return BFA_STATUS_DEVBUSY;
3176 }
3177
3178 ablk->pcifn = pcifn;
3179 ablk->cbfn = cbfn;
3180 ablk->cbarg = cbarg;
3181 ablk->busy = BFA_TRUE;
3182
3183 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3184 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3185 bfa_ioc_portid(ablk->ioc));
3186 m->pers = cpu_to_be16((u16)personality);
3187 m->bw = cpu_to_be32(bw);
3188 m->port = port;
3189 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3190
3191 return BFA_STATUS_OK;
3192}
3193
3194bfa_status_t
3195bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3196 bfa_ablk_cbfn_t cbfn, void *cbarg)
3197{
3198 struct bfi_ablk_h2i_pf_req_s *m;
3199
3200 if (!bfa_ioc_is_operational(ablk->ioc)) {
3201 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3202 return BFA_STATUS_IOC_FAILURE;
3203 }
3204
3205 if (ablk->busy) {
3206 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3207 return BFA_STATUS_DEVBUSY;
3208 }
3209
3210 ablk->cbfn = cbfn;
3211 ablk->cbarg = cbarg;
3212 ablk->busy = BFA_TRUE;
3213
3214 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3215 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3216 bfa_ioc_portid(ablk->ioc));
3217 m->pcifn = (u8)pcifn;
3218 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3219
3220 return BFA_STATUS_OK;
3221}
3222
3223bfa_status_t
3224bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3225 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3226{
3227 struct bfi_ablk_h2i_cfg_req_s *m;
3228
3229 if (!bfa_ioc_is_operational(ablk->ioc)) {
3230 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3231 return BFA_STATUS_IOC_FAILURE;
3232 }
3233
3234 if (ablk->busy) {
3235 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3236 return BFA_STATUS_DEVBUSY;
3237 }
3238
3239 ablk->cbfn = cbfn;
3240 ablk->cbarg = cbarg;
3241 ablk->busy = BFA_TRUE;
3242
3243 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3244 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3245 bfa_ioc_portid(ablk->ioc));
3246 m->mode = (u8)mode;
3247 m->max_pf = (u8)max_pf;
3248 m->max_vf = (u8)max_vf;
3249 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3250
3251 return BFA_STATUS_OK;
3252}
3253
3254bfa_status_t
3255bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3256 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3257{
3258 struct bfi_ablk_h2i_cfg_req_s *m;
3259
3260 if (!bfa_ioc_is_operational(ablk->ioc)) {
3261 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3262 return BFA_STATUS_IOC_FAILURE;
3263 }
3264
3265 if (ablk->busy) {
3266 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3267 return BFA_STATUS_DEVBUSY;
3268 }
3269
3270 ablk->cbfn = cbfn;
3271 ablk->cbarg = cbarg;
3272 ablk->busy = BFA_TRUE;
3273
3274 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3275 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3276 bfa_ioc_portid(ablk->ioc));
3277 m->port = (u8)port;
3278 m->mode = (u8)mode;
3279 m->max_pf = (u8)max_pf;
3280 m->max_vf = (u8)max_vf;
3281 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3282
3283 return BFA_STATUS_OK;
3284}
3285
3286bfa_status_t
3287bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3288 bfa_ablk_cbfn_t cbfn, void *cbarg)
3289{
3290 struct bfi_ablk_h2i_pf_req_s *m;
3291
3292 if (!bfa_ioc_is_operational(ablk->ioc)) {
3293 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3294 return BFA_STATUS_IOC_FAILURE;
3295 }
3296
3297 if (ablk->busy) {
3298 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3299 return BFA_STATUS_DEVBUSY;
3300 }
3301
3302 ablk->cbfn = cbfn;
3303 ablk->cbarg = cbarg;
3304 ablk->busy = BFA_TRUE;
3305
3306 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3307 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3308 bfa_ioc_portid(ablk->ioc));
3309 m->pcifn = (u8)pcifn;
3310 m->bw = cpu_to_be32(bw);
3311 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3312
3313 return BFA_STATUS_OK;
3314}
3315
3316bfa_status_t
3317bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3318{
3319 struct bfi_ablk_h2i_optrom_s *m;
3320
3321 if (!bfa_ioc_is_operational(ablk->ioc)) {
3322 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3323 return BFA_STATUS_IOC_FAILURE;
3324 }
3325
3326 if (ablk->busy) {
3327 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3328 return BFA_STATUS_DEVBUSY;
3329 }
3330
3331 ablk->cbfn = cbfn;
3332 ablk->cbarg = cbarg;
3333 ablk->busy = BFA_TRUE;
3334
3335 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3336 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3337 bfa_ioc_portid(ablk->ioc));
3338 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3339
3340 return BFA_STATUS_OK;
3341}
3342
3343bfa_status_t
3344bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3345{
3346 struct bfi_ablk_h2i_optrom_s *m;
3347
3348 if (!bfa_ioc_is_operational(ablk->ioc)) {
3349 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3350 return BFA_STATUS_IOC_FAILURE;
3351 }
3352
3353 if (ablk->busy) {
3354 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3355 return BFA_STATUS_DEVBUSY;
3356 }
3357
3358 ablk->cbfn = cbfn;
3359 ablk->cbarg = cbarg;
3360 ablk->busy = BFA_TRUE;
3361
3362 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3363 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3364 bfa_ioc_portid(ablk->ioc));
3365 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3366
3367 return BFA_STATUS_OK;
3368}
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003369
3370/*
3371 * SFP module specific
3372 */
3373
3374/* forward declarations */
3375static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3376static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3377static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3378 enum bfa_port_speed portspeed);
3379
3380static void
3381bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3382{
3383 bfa_trc(sfp, sfp->lock);
3384 if (sfp->cbfn)
3385 sfp->cbfn(sfp->cbarg, sfp->status);
3386 sfp->lock = 0;
3387 sfp->cbfn = NULL;
3388}
3389
3390static void
3391bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3392{
3393 bfa_trc(sfp, sfp->portspeed);
3394 if (sfp->media) {
3395 bfa_sfp_media_get(sfp);
3396 if (sfp->state_query_cbfn)
3397 sfp->state_query_cbfn(sfp->state_query_cbarg,
3398 sfp->status);
3399 sfp->media = NULL;
3400 }
3401
3402 if (sfp->portspeed) {
3403 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3404 if (sfp->state_query_cbfn)
3405 sfp->state_query_cbfn(sfp->state_query_cbarg,
3406 sfp->status);
3407 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3408 }
3409
3410 sfp->state_query_lock = 0;
3411 sfp->state_query_cbfn = NULL;
3412}
3413
3414/*
3415 * IOC event handler.
3416 */
3417static void
3418bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3419{
3420 struct bfa_sfp_s *sfp = sfp_arg;
3421
3422 bfa_trc(sfp, event);
3423 bfa_trc(sfp, sfp->lock);
3424 bfa_trc(sfp, sfp->state_query_lock);
3425
3426 switch (event) {
3427 case BFA_IOC_E_DISABLED:
3428 case BFA_IOC_E_FAILED:
3429 if (sfp->lock) {
3430 sfp->status = BFA_STATUS_IOC_FAILURE;
3431 bfa_cb_sfp_show(sfp);
3432 }
3433
3434 if (sfp->state_query_lock) {
3435 sfp->status = BFA_STATUS_IOC_FAILURE;
3436 bfa_cb_sfp_state_query(sfp);
3437 }
3438 break;
3439
3440 default:
3441 break;
3442 }
3443}
3444
3445/*
3446 * SFP get data send
3447 */
3448static void
3449bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3450{
3451 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3452
3453 bfa_trc(sfp, req->memtype);
3454
3455 /* build host command */
3456 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3457 bfa_ioc_portid(sfp->ioc));
3458
3459 /* send mbox cmd */
3460 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3461}
3462
3463/*
3464 * SFP is valid, read sfp data
3465 */
3466static void
3467bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3468{
3469 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3470
3471 WARN_ON(sfp->lock != 0);
3472 bfa_trc(sfp, sfp->state);
3473
3474 sfp->lock = 1;
3475 sfp->memtype = memtype;
3476 req->memtype = memtype;
3477
3478 /* Setup SG list */
3479 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3480
3481 bfa_sfp_getdata_send(sfp);
3482}
3483
3484/*
3485 * SFP show complete
3486 */
3487static void
3488bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3489{
3490 struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3491
3492 if (!sfp->lock) {
3493 /*
3494 * receiving response after ioc failure
3495 */
3496 bfa_trc(sfp, sfp->lock);
3497 return;
3498 }
3499
3500 bfa_trc(sfp, rsp->status);
3501 if (rsp->status == BFA_STATUS_OK) {
3502 sfp->data_valid = 1;
3503 if (sfp->state == BFA_SFP_STATE_VALID)
3504 sfp->status = BFA_STATUS_OK;
3505 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3506 sfp->status = BFA_STATUS_SFP_UNSUPP;
3507 else
3508 bfa_trc(sfp, sfp->state);
3509 } else {
3510 sfp->data_valid = 0;
3511 sfp->status = rsp->status;
3512 /* sfpshow shouldn't change sfp state */
3513 }
3514
3515 bfa_trc(sfp, sfp->memtype);
3516 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3517 bfa_trc(sfp, sfp->data_valid);
3518 if (sfp->data_valid) {
3519 u32 size = sizeof(struct sfp_mem_s);
3520 u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3521 memcpy(des, sfp->dbuf_kva, size);
3522 }
3523 /*
3524 * Queue completion callback.
3525 */
3526 bfa_cb_sfp_show(sfp);
3527 } else
3528 sfp->lock = 0;
3529
3530 bfa_trc(sfp, sfp->state_query_lock);
3531 if (sfp->state_query_lock) {
3532 sfp->state = rsp->state;
3533 /* Complete callback */
3534 bfa_cb_sfp_state_query(sfp);
3535 }
3536}
3537
3538/*
3539 * SFP query fw sfp state
3540 */
3541static void
3542bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3543{
3544 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3545
3546 /* Should not be doing query if not in _INIT state */
3547 WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3548 WARN_ON(sfp->state_query_lock != 0);
3549 bfa_trc(sfp, sfp->state);
3550
3551 sfp->state_query_lock = 1;
3552 req->memtype = 0;
3553
3554 if (!sfp->lock)
3555 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3556}
3557
3558static void
3559bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3560{
3561 enum bfa_defs_sfp_media_e *media = sfp->media;
3562
3563 *media = BFA_SFP_MEDIA_UNKNOWN;
3564
3565 if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3566 *media = BFA_SFP_MEDIA_UNSUPPORT;
3567 else if (sfp->state == BFA_SFP_STATE_VALID) {
3568 union sfp_xcvr_e10g_code_u e10g;
3569 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3570 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3571 (sfpmem->srlid_base.xcvr[5] >> 1);
3572
3573 e10g.b = sfpmem->srlid_base.xcvr[0];
3574 bfa_trc(sfp, e10g.b);
3575 bfa_trc(sfp, xmtr_tech);
3576 /* check fc transmitter tech */
3577 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3578 (xmtr_tech & SFP_XMTR_TECH_CP) ||
3579 (xmtr_tech & SFP_XMTR_TECH_CA))
3580 *media = BFA_SFP_MEDIA_CU;
3581 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3582 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3583 *media = BFA_SFP_MEDIA_EL;
3584 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3585 (xmtr_tech & SFP_XMTR_TECH_LC))
3586 *media = BFA_SFP_MEDIA_LW;
3587 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3588 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3589 (xmtr_tech & SFP_XMTR_TECH_SA))
3590 *media = BFA_SFP_MEDIA_SW;
3591 /* Check 10G Ethernet Compilance code */
3592 else if (e10g.b & 0x10)
3593 *media = BFA_SFP_MEDIA_SW;
3594 else if (e10g.b & 0x60)
3595 *media = BFA_SFP_MEDIA_LW;
3596 else if (e10g.r.e10g_unall & 0x80)
3597 *media = BFA_SFP_MEDIA_UNKNOWN;
3598 else
3599 bfa_trc(sfp, 0);
3600 } else
3601 bfa_trc(sfp, sfp->state);
3602}
3603
3604static bfa_status_t
3605bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3606{
3607 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3608 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3609 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3610 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3611
3612 if (portspeed == BFA_PORT_SPEED_10GBPS) {
3613 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3614 return BFA_STATUS_OK;
3615 else {
3616 bfa_trc(sfp, e10g.b);
3617 return BFA_STATUS_UNSUPP_SPEED;
3618 }
3619 }
3620 if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3621 ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3622 ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3623 ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3624 ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3625 return BFA_STATUS_OK;
3626 else {
3627 bfa_trc(sfp, portspeed);
3628 bfa_trc(sfp, fc3.b);
3629 bfa_trc(sfp, e10g.b);
3630 return BFA_STATUS_UNSUPP_SPEED;
3631 }
3632}
3633
3634/*
3635 * SFP hmbox handler
3636 */
3637void
3638bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3639{
3640 struct bfa_sfp_s *sfp = sfparg;
3641
3642 switch (msg->mh.msg_id) {
3643 case BFI_SFP_I2H_SHOW:
3644 bfa_sfp_show_comp(sfp, msg);
3645 break;
3646
3647 case BFI_SFP_I2H_SCN:
3648 bfa_trc(sfp, msg->mh.msg_id);
3649 break;
3650
3651 default:
3652 bfa_trc(sfp, msg->mh.msg_id);
3653 WARN_ON(1);
3654 }
3655}
3656
3657/*
3658 * Return DMA memory needed by sfp module.
3659 */
3660u32
3661bfa_sfp_meminfo(void)
3662{
3663 return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3664}
3665
3666/*
3667 * Attach virtual and physical memory for SFP.
3668 */
3669void
3670bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3671 struct bfa_trc_mod_s *trcmod)
3672{
3673 sfp->dev = dev;
3674 sfp->ioc = ioc;
3675 sfp->trcmod = trcmod;
3676
3677 sfp->cbfn = NULL;
3678 sfp->cbarg = NULL;
3679 sfp->sfpmem = NULL;
3680 sfp->lock = 0;
3681 sfp->data_valid = 0;
3682 sfp->state = BFA_SFP_STATE_INIT;
3683 sfp->state_query_lock = 0;
3684 sfp->state_query_cbfn = NULL;
3685 sfp->state_query_cbarg = NULL;
3686 sfp->media = NULL;
3687 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3688 sfp->is_elb = BFA_FALSE;
3689
3690 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3691 bfa_q_qe_init(&sfp->ioc_notify);
3692 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3693 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3694}
3695
3696/*
3697 * Claim Memory for SFP
3698 */
3699void
3700bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3701{
3702 sfp->dbuf_kva = dm_kva;
3703 sfp->dbuf_pa = dm_pa;
3704 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3705
3706 dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3707 dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3708}
3709
3710/*
3711 * Show SFP eeprom content
3712 *
3713 * @param[in] sfp - bfa sfp module
3714 *
3715 * @param[out] sfpmem - sfp eeprom data
3716 *
3717 */
3718bfa_status_t
3719bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3720 bfa_cb_sfp_t cbfn, void *cbarg)
3721{
3722
3723 if (!bfa_ioc_is_operational(sfp->ioc)) {
3724 bfa_trc(sfp, 0);
3725 return BFA_STATUS_IOC_NON_OP;
3726 }
3727
3728 if (sfp->lock) {
3729 bfa_trc(sfp, 0);
3730 return BFA_STATUS_DEVBUSY;
3731 }
3732
3733 sfp->cbfn = cbfn;
3734 sfp->cbarg = cbarg;
3735 sfp->sfpmem = sfpmem;
3736
3737 bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3738 return BFA_STATUS_OK;
3739}
3740
3741/*
3742 * Return SFP Media type
3743 *
3744 * @param[in] sfp - bfa sfp module
3745 *
3746 * @param[out] media - port speed from user
3747 *
3748 */
3749bfa_status_t
3750bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3751 bfa_cb_sfp_t cbfn, void *cbarg)
3752{
3753 if (!bfa_ioc_is_operational(sfp->ioc)) {
3754 bfa_trc(sfp, 0);
3755 return BFA_STATUS_IOC_NON_OP;
3756 }
3757
3758 sfp->media = media;
3759 if (sfp->state == BFA_SFP_STATE_INIT) {
3760 if (sfp->state_query_lock) {
3761 bfa_trc(sfp, 0);
3762 return BFA_STATUS_DEVBUSY;
3763 } else {
3764 sfp->state_query_cbfn = cbfn;
3765 sfp->state_query_cbarg = cbarg;
3766 bfa_sfp_state_query(sfp);
3767 return BFA_STATUS_SFP_NOT_READY;
3768 }
3769 }
3770
3771 bfa_sfp_media_get(sfp);
3772 return BFA_STATUS_OK;
3773}
3774
3775/*
3776 * Check if user set port speed is allowed by the SFP
3777 *
3778 * @param[in] sfp - bfa sfp module
3779 * @param[in] portspeed - port speed from user
3780 *
3781 */
3782bfa_status_t
3783bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3784 bfa_cb_sfp_t cbfn, void *cbarg)
3785{
3786 WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3787
3788 if (!bfa_ioc_is_operational(sfp->ioc))
3789 return BFA_STATUS_IOC_NON_OP;
3790
3791 /* For Mezz card, all speed is allowed */
3792 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3793 return BFA_STATUS_OK;
3794
3795 /* Check SFP state */
3796 sfp->portspeed = portspeed;
3797 if (sfp->state == BFA_SFP_STATE_INIT) {
3798 if (sfp->state_query_lock) {
3799 bfa_trc(sfp, 0);
3800 return BFA_STATUS_DEVBUSY;
3801 } else {
3802 sfp->state_query_cbfn = cbfn;
3803 sfp->state_query_cbarg = cbarg;
3804 bfa_sfp_state_query(sfp);
3805 return BFA_STATUS_SFP_NOT_READY;
3806 }
3807 }
3808
3809 if (sfp->state == BFA_SFP_STATE_REMOVED ||
3810 sfp->state == BFA_SFP_STATE_FAILED) {
3811 bfa_trc(sfp, sfp->state);
3812 return BFA_STATUS_NO_SFP_DEV;
3813 }
3814
3815 if (sfp->state == BFA_SFP_STATE_INSERTED) {
3816 bfa_trc(sfp, sfp->state);
3817 return BFA_STATUS_DEVBUSY; /* sfp is reading data */
3818 }
3819
3820 /* For eloopback, all speed is allowed */
3821 if (sfp->is_elb)
3822 return BFA_STATUS_OK;
3823
3824 return bfa_sfp_speed_valid(sfp, portspeed);
3825}
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07003826
3827/*
3828 * Flash module specific
3829 */
3830
3831/*
3832 * FLASH DMA buffer should be big enough to hold both MFG block and
3833 * asic block(64k) at the same time and also should be 2k aligned to
3834 * avoid write segement to cross sector boundary.
3835 */
3836#define BFA_FLASH_SEG_SZ 2048
3837#define BFA_FLASH_DMA_BUF_SZ \
3838 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3839
3840static void
3841bfa_flash_cb(struct bfa_flash_s *flash)
3842{
3843 flash->op_busy = 0;
3844 if (flash->cbfn)
3845 flash->cbfn(flash->cbarg, flash->status);
3846}
3847
3848static void
3849bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
3850{
3851 struct bfa_flash_s *flash = cbarg;
3852
3853 bfa_trc(flash, event);
3854 switch (event) {
3855 case BFA_IOC_E_DISABLED:
3856 case BFA_IOC_E_FAILED:
3857 if (flash->op_busy) {
3858 flash->status = BFA_STATUS_IOC_FAILURE;
3859 flash->cbfn(flash->cbarg, flash->status);
3860 flash->op_busy = 0;
3861 }
3862 break;
3863
3864 default:
3865 break;
3866 }
3867}
3868
3869/*
3870 * Send flash attribute query request.
3871 *
3872 * @param[in] cbarg - callback argument
3873 */
3874static void
3875bfa_flash_query_send(void *cbarg)
3876{
3877 struct bfa_flash_s *flash = cbarg;
3878 struct bfi_flash_query_req_s *msg =
3879 (struct bfi_flash_query_req_s *) flash->mb.msg;
3880
3881 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
3882 bfa_ioc_portid(flash->ioc));
3883 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
3884 flash->dbuf_pa);
3885 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3886}
3887
3888/*
3889 * Send flash write request.
3890 *
3891 * @param[in] cbarg - callback argument
3892 */
3893static void
3894bfa_flash_write_send(struct bfa_flash_s *flash)
3895{
3896 struct bfi_flash_write_req_s *msg =
3897 (struct bfi_flash_write_req_s *) flash->mb.msg;
3898 u32 len;
3899
3900 msg->type = be32_to_cpu(flash->type);
3901 msg->instance = flash->instance;
3902 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3903 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3904 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3905 msg->length = be32_to_cpu(len);
3906
3907 /* indicate if it's the last msg of the whole write operation */
3908 msg->last = (len == flash->residue) ? 1 : 0;
3909
3910 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
3911 bfa_ioc_portid(flash->ioc));
3912 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3913 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
3914 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3915
3916 flash->residue -= len;
3917 flash->offset += len;
3918}
3919
3920/*
3921 * Send flash read request.
3922 *
3923 * @param[in] cbarg - callback argument
3924 */
3925static void
3926bfa_flash_read_send(void *cbarg)
3927{
3928 struct bfa_flash_s *flash = cbarg;
3929 struct bfi_flash_read_req_s *msg =
3930 (struct bfi_flash_read_req_s *) flash->mb.msg;
3931 u32 len;
3932
3933 msg->type = be32_to_cpu(flash->type);
3934 msg->instance = flash->instance;
3935 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3936 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3937 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3938 msg->length = be32_to_cpu(len);
3939 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
3940 bfa_ioc_portid(flash->ioc));
3941 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3942 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3943}
3944
3945/*
3946 * Send flash erase request.
3947 *
3948 * @param[in] cbarg - callback argument
3949 */
3950static void
3951bfa_flash_erase_send(void *cbarg)
3952{
3953 struct bfa_flash_s *flash = cbarg;
3954 struct bfi_flash_erase_req_s *msg =
3955 (struct bfi_flash_erase_req_s *) flash->mb.msg;
3956
3957 msg->type = be32_to_cpu(flash->type);
3958 msg->instance = flash->instance;
3959 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
3960 bfa_ioc_portid(flash->ioc));
3961 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
3962}
3963
3964/*
3965 * Process flash response messages upon receiving interrupts.
3966 *
3967 * @param[in] flasharg - flash structure
3968 * @param[in] msg - message structure
3969 */
3970static void
3971bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
3972{
3973 struct bfa_flash_s *flash = flasharg;
3974 u32 status;
3975
3976 union {
3977 struct bfi_flash_query_rsp_s *query;
3978 struct bfi_flash_erase_rsp_s *erase;
3979 struct bfi_flash_write_rsp_s *write;
3980 struct bfi_flash_read_rsp_s *read;
3981 struct bfi_mbmsg_s *msg;
3982 } m;
3983
3984 m.msg = msg;
3985 bfa_trc(flash, msg->mh.msg_id);
3986
3987 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
3988 /* receiving response after ioc failure */
3989 bfa_trc(flash, 0x9999);
3990 return;
3991 }
3992
3993 switch (msg->mh.msg_id) {
3994 case BFI_FLASH_I2H_QUERY_RSP:
3995 status = be32_to_cpu(m.query->status);
3996 bfa_trc(flash, status);
3997 if (status == BFA_STATUS_OK) {
3998 u32 i;
3999 struct bfa_flash_attr_s *attr, *f;
4000
4001 attr = (struct bfa_flash_attr_s *) flash->ubuf;
4002 f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4003 attr->status = be32_to_cpu(f->status);
4004 attr->npart = be32_to_cpu(f->npart);
4005 bfa_trc(flash, attr->status);
4006 bfa_trc(flash, attr->npart);
4007 for (i = 0; i < attr->npart; i++) {
4008 attr->part[i].part_type =
4009 be32_to_cpu(f->part[i].part_type);
4010 attr->part[i].part_instance =
4011 be32_to_cpu(f->part[i].part_instance);
4012 attr->part[i].part_off =
4013 be32_to_cpu(f->part[i].part_off);
4014 attr->part[i].part_size =
4015 be32_to_cpu(f->part[i].part_size);
4016 attr->part[i].part_len =
4017 be32_to_cpu(f->part[i].part_len);
4018 attr->part[i].part_status =
4019 be32_to_cpu(f->part[i].part_status);
4020 }
4021 }
4022 flash->status = status;
4023 bfa_flash_cb(flash);
4024 break;
4025 case BFI_FLASH_I2H_ERASE_RSP:
4026 status = be32_to_cpu(m.erase->status);
4027 bfa_trc(flash, status);
4028 flash->status = status;
4029 bfa_flash_cb(flash);
4030 break;
4031 case BFI_FLASH_I2H_WRITE_RSP:
4032 status = be32_to_cpu(m.write->status);
4033 bfa_trc(flash, status);
4034 if (status != BFA_STATUS_OK || flash->residue == 0) {
4035 flash->status = status;
4036 bfa_flash_cb(flash);
4037 } else {
4038 bfa_trc(flash, flash->offset);
4039 bfa_flash_write_send(flash);
4040 }
4041 break;
4042 case BFI_FLASH_I2H_READ_RSP:
4043 status = be32_to_cpu(m.read->status);
4044 bfa_trc(flash, status);
4045 if (status != BFA_STATUS_OK) {
4046 flash->status = status;
4047 bfa_flash_cb(flash);
4048 } else {
4049 u32 len = be32_to_cpu(m.read->length);
4050 bfa_trc(flash, flash->offset);
4051 bfa_trc(flash, len);
4052 memcpy(flash->ubuf + flash->offset,
4053 flash->dbuf_kva, len);
4054 flash->residue -= len;
4055 flash->offset += len;
4056 if (flash->residue == 0) {
4057 flash->status = status;
4058 bfa_flash_cb(flash);
4059 } else
4060 bfa_flash_read_send(flash);
4061 }
4062 break;
4063 case BFI_FLASH_I2H_BOOT_VER_RSP:
4064 case BFI_FLASH_I2H_EVENT:
4065 bfa_trc(flash, msg->mh.msg_id);
4066 break;
4067
4068 default:
4069 WARN_ON(1);
4070 }
4071}
4072
4073/*
4074 * Flash memory info API.
4075 *
4076 * @param[in] mincfg - minimal cfg variable
4077 */
4078u32
4079bfa_flash_meminfo(bfa_boolean_t mincfg)
4080{
4081 /* min driver doesn't need flash */
4082 if (mincfg)
4083 return 0;
4084 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4085}
4086
4087/*
4088 * Flash attach API.
4089 *
4090 * @param[in] flash - flash structure
4091 * @param[in] ioc - ioc structure
4092 * @param[in] dev - device structure
4093 * @param[in] trcmod - trace module
4094 * @param[in] logmod - log module
4095 */
4096void
4097bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4098 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4099{
4100 flash->ioc = ioc;
4101 flash->trcmod = trcmod;
4102 flash->cbfn = NULL;
4103 flash->cbarg = NULL;
4104 flash->op_busy = 0;
4105
4106 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4107 bfa_q_qe_init(&flash->ioc_notify);
4108 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4109 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4110
4111 /* min driver doesn't need flash */
4112 if (mincfg) {
4113 flash->dbuf_kva = NULL;
4114 flash->dbuf_pa = 0;
4115 }
4116}
4117
4118/*
4119 * Claim memory for flash
4120 *
4121 * @param[in] flash - flash structure
4122 * @param[in] dm_kva - pointer to virtual memory address
4123 * @param[in] dm_pa - physical memory address
4124 * @param[in] mincfg - minimal cfg variable
4125 */
4126void
4127bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4128 bfa_boolean_t mincfg)
4129{
4130 if (mincfg)
4131 return;
4132
4133 flash->dbuf_kva = dm_kva;
4134 flash->dbuf_pa = dm_pa;
4135 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4136 dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4137 dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4138}
4139
4140/*
4141 * Get flash attribute.
4142 *
4143 * @param[in] flash - flash structure
4144 * @param[in] attr - flash attribute structure
4145 * @param[in] cbfn - callback function
4146 * @param[in] cbarg - callback argument
4147 *
4148 * Return status.
4149 */
4150bfa_status_t
4151bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4152 bfa_cb_flash_t cbfn, void *cbarg)
4153{
4154 bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4155
4156 if (!bfa_ioc_is_operational(flash->ioc))
4157 return BFA_STATUS_IOC_NON_OP;
4158
4159 if (flash->op_busy) {
4160 bfa_trc(flash, flash->op_busy);
4161 return BFA_STATUS_DEVBUSY;
4162 }
4163
4164 flash->op_busy = 1;
4165 flash->cbfn = cbfn;
4166 flash->cbarg = cbarg;
4167 flash->ubuf = (u8 *) attr;
4168 bfa_flash_query_send(flash);
4169
4170 return BFA_STATUS_OK;
4171}
4172
4173/*
4174 * Erase flash partition.
4175 *
4176 * @param[in] flash - flash structure
4177 * @param[in] type - flash partition type
4178 * @param[in] instance - flash partition instance
4179 * @param[in] cbfn - callback function
4180 * @param[in] cbarg - callback argument
4181 *
4182 * Return status.
4183 */
4184bfa_status_t
4185bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4186 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4187{
4188 bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4189 bfa_trc(flash, type);
4190 bfa_trc(flash, instance);
4191
4192 if (!bfa_ioc_is_operational(flash->ioc))
4193 return BFA_STATUS_IOC_NON_OP;
4194
4195 if (flash->op_busy) {
4196 bfa_trc(flash, flash->op_busy);
4197 return BFA_STATUS_DEVBUSY;
4198 }
4199
4200 flash->op_busy = 1;
4201 flash->cbfn = cbfn;
4202 flash->cbarg = cbarg;
4203 flash->type = type;
4204 flash->instance = instance;
4205
4206 bfa_flash_erase_send(flash);
4207 return BFA_STATUS_OK;
4208}
4209
4210/*
4211 * Update flash partition.
4212 *
4213 * @param[in] flash - flash structure
4214 * @param[in] type - flash partition type
4215 * @param[in] instance - flash partition instance
4216 * @param[in] buf - update data buffer
4217 * @param[in] len - data buffer length
4218 * @param[in] offset - offset relative to the partition starting address
4219 * @param[in] cbfn - callback function
4220 * @param[in] cbarg - callback argument
4221 *
4222 * Return status.
4223 */
4224bfa_status_t
4225bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4226 u8 instance, void *buf, u32 len, u32 offset,
4227 bfa_cb_flash_t cbfn, void *cbarg)
4228{
4229 bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4230 bfa_trc(flash, type);
4231 bfa_trc(flash, instance);
4232 bfa_trc(flash, len);
4233 bfa_trc(flash, offset);
4234
4235 if (!bfa_ioc_is_operational(flash->ioc))
4236 return BFA_STATUS_IOC_NON_OP;
4237
4238 /*
4239 * 'len' must be in word (4-byte) boundary
4240 * 'offset' must be in sector (16kb) boundary
4241 */
4242 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4243 return BFA_STATUS_FLASH_BAD_LEN;
4244
4245 if (type == BFA_FLASH_PART_MFG)
4246 return BFA_STATUS_EINVAL;
4247
4248 if (flash->op_busy) {
4249 bfa_trc(flash, flash->op_busy);
4250 return BFA_STATUS_DEVBUSY;
4251 }
4252
4253 flash->op_busy = 1;
4254 flash->cbfn = cbfn;
4255 flash->cbarg = cbarg;
4256 flash->type = type;
4257 flash->instance = instance;
4258 flash->residue = len;
4259 flash->offset = 0;
4260 flash->addr_off = offset;
4261 flash->ubuf = buf;
4262
4263 bfa_flash_write_send(flash);
4264 return BFA_STATUS_OK;
4265}
4266
4267/*
4268 * Read flash partition.
4269 *
4270 * @param[in] flash - flash structure
4271 * @param[in] type - flash partition type
4272 * @param[in] instance - flash partition instance
4273 * @param[in] buf - read data buffer
4274 * @param[in] len - data buffer length
4275 * @param[in] offset - offset relative to the partition starting address
4276 * @param[in] cbfn - callback function
4277 * @param[in] cbarg - callback argument
4278 *
4279 * Return status.
4280 */
4281bfa_status_t
4282bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4283 u8 instance, void *buf, u32 len, u32 offset,
4284 bfa_cb_flash_t cbfn, void *cbarg)
4285{
4286 bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4287 bfa_trc(flash, type);
4288 bfa_trc(flash, instance);
4289 bfa_trc(flash, len);
4290 bfa_trc(flash, offset);
4291
4292 if (!bfa_ioc_is_operational(flash->ioc))
4293 return BFA_STATUS_IOC_NON_OP;
4294
4295 /*
4296 * 'len' must be in word (4-byte) boundary
4297 * 'offset' must be in sector (16kb) boundary
4298 */
4299 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4300 return BFA_STATUS_FLASH_BAD_LEN;
4301
4302 if (flash->op_busy) {
4303 bfa_trc(flash, flash->op_busy);
4304 return BFA_STATUS_DEVBUSY;
4305 }
4306
4307 flash->op_busy = 1;
4308 flash->cbfn = cbfn;
4309 flash->cbarg = cbarg;
4310 flash->type = type;
4311 flash->instance = instance;
4312 flash->residue = len;
4313 flash->offset = 0;
4314 flash->addr_off = offset;
4315 flash->ubuf = buf;
4316 bfa_flash_read_send(flash);
4317
4318 return BFA_STATUS_OK;
4319}
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004320
4321/*
4322 * DIAG module specific
4323 */
4324
4325#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4326#define BFA_DIAG_FWPING_TOV 1000 /* msec */
4327
4328/* IOC event handler */
4329static void
4330bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4331{
4332 struct bfa_diag_s *diag = diag_arg;
4333
4334 bfa_trc(diag, event);
4335 bfa_trc(diag, diag->block);
4336 bfa_trc(diag, diag->fwping.lock);
4337 bfa_trc(diag, diag->tsensor.lock);
4338
4339 switch (event) {
4340 case BFA_IOC_E_DISABLED:
4341 case BFA_IOC_E_FAILED:
4342 if (diag->fwping.lock) {
4343 diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4344 diag->fwping.cbfn(diag->fwping.cbarg,
4345 diag->fwping.status);
4346 diag->fwping.lock = 0;
4347 }
4348
4349 if (diag->tsensor.lock) {
4350 diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4351 diag->tsensor.cbfn(diag->tsensor.cbarg,
4352 diag->tsensor.status);
4353 diag->tsensor.lock = 0;
4354 }
4355
4356 if (diag->block) {
4357 if (diag->timer_active) {
4358 bfa_timer_stop(&diag->timer);
4359 diag->timer_active = 0;
4360 }
4361
4362 diag->status = BFA_STATUS_IOC_FAILURE;
4363 diag->cbfn(diag->cbarg, diag->status);
4364 diag->block = 0;
4365 }
4366 break;
4367
4368 default:
4369 break;
4370 }
4371}
4372
4373static void
4374bfa_diag_memtest_done(void *cbarg)
4375{
4376 struct bfa_diag_s *diag = cbarg;
4377 struct bfa_ioc_s *ioc = diag->ioc;
4378 struct bfa_diag_memtest_result *res = diag->result;
4379 u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
4380 u32 pgnum, pgoff, i;
4381
4382 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4383 pgoff = PSS_SMEM_PGOFF(loff);
4384
4385 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4386
4387 for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4388 sizeof(u32)); i++) {
4389 /* read test result from smem */
4390 *((u32 *) res + i) =
4391 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4392 loff += sizeof(u32);
4393 }
4394
4395 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4396 bfa_ioc_reset_fwstate(ioc);
4397
4398 res->status = swab32(res->status);
4399 bfa_trc(diag, res->status);
4400
4401 if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4402 diag->status = BFA_STATUS_OK;
4403 else {
4404 diag->status = BFA_STATUS_MEMTEST_FAILED;
4405 res->addr = swab32(res->addr);
4406 res->exp = swab32(res->exp);
4407 res->act = swab32(res->act);
4408 res->err_status = swab32(res->err_status);
4409 res->err_status1 = swab32(res->err_status1);
4410 res->err_addr = swab32(res->err_addr);
4411 bfa_trc(diag, res->addr);
4412 bfa_trc(diag, res->exp);
4413 bfa_trc(diag, res->act);
4414 bfa_trc(diag, res->err_status);
4415 bfa_trc(diag, res->err_status1);
4416 bfa_trc(diag, res->err_addr);
4417 }
4418 diag->timer_active = 0;
4419 diag->cbfn(diag->cbarg, diag->status);
4420 diag->block = 0;
4421}
4422
4423/*
4424 * Firmware ping
4425 */
4426
4427/*
4428 * Perform DMA test directly
4429 */
4430static void
4431diag_fwping_send(struct bfa_diag_s *diag)
4432{
4433 struct bfi_diag_fwping_req_s *fwping_req;
4434 u32 i;
4435
4436 bfa_trc(diag, diag->fwping.dbuf_pa);
4437
4438 /* fill DMA area with pattern */
4439 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4440 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4441
4442 /* Fill mbox msg */
4443 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4444
4445 /* Setup SG list */
4446 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4447 diag->fwping.dbuf_pa);
4448 /* Set up dma count */
4449 fwping_req->count = cpu_to_be32(diag->fwping.count);
4450 /* Set up data pattern */
4451 fwping_req->data = diag->fwping.data;
4452
4453 /* build host command */
4454 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4455 bfa_ioc_portid(diag->ioc));
4456
4457 /* send mbox cmd */
4458 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4459}
4460
4461static void
4462diag_fwping_comp(struct bfa_diag_s *diag,
4463 struct bfi_diag_fwping_rsp_s *diag_rsp)
4464{
4465 u32 rsp_data = diag_rsp->data;
4466 u8 rsp_dma_status = diag_rsp->dma_status;
4467
4468 bfa_trc(diag, rsp_data);
4469 bfa_trc(diag, rsp_dma_status);
4470
4471 if (rsp_dma_status == BFA_STATUS_OK) {
4472 u32 i, pat;
4473 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4474 diag->fwping.data;
4475 /* Check mbox data */
4476 if (diag->fwping.data != rsp_data) {
4477 bfa_trc(diag, rsp_data);
4478 diag->fwping.result->dmastatus =
4479 BFA_STATUS_DATACORRUPTED;
4480 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4481 diag->fwping.cbfn(diag->fwping.cbarg,
4482 diag->fwping.status);
4483 diag->fwping.lock = 0;
4484 return;
4485 }
4486 /* Check dma pattern */
4487 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4488 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4489 bfa_trc(diag, i);
4490 bfa_trc(diag, pat);
4491 bfa_trc(diag,
4492 *((u32 *)diag->fwping.dbuf_kva + i));
4493 diag->fwping.result->dmastatus =
4494 BFA_STATUS_DATACORRUPTED;
4495 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4496 diag->fwping.cbfn(diag->fwping.cbarg,
4497 diag->fwping.status);
4498 diag->fwping.lock = 0;
4499 return;
4500 }
4501 }
4502 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4503 diag->fwping.status = BFA_STATUS_OK;
4504 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4505 diag->fwping.lock = 0;
4506 } else {
4507 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4508 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4509 diag->fwping.lock = 0;
4510 }
4511}
4512
4513/*
4514 * Temperature Sensor
4515 */
4516
4517static void
4518diag_tempsensor_send(struct bfa_diag_s *diag)
4519{
4520 struct bfi_diag_ts_req_s *msg;
4521
4522 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4523 bfa_trc(diag, msg->temp);
4524 /* build host command */
4525 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4526 bfa_ioc_portid(diag->ioc));
4527 /* send mbox cmd */
4528 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4529}
4530
4531static void
4532diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4533{
4534 if (!diag->tsensor.lock) {
4535 /* receiving response after ioc failure */
4536 bfa_trc(diag, diag->tsensor.lock);
4537 return;
4538 }
4539
4540 /*
4541 * ASIC junction tempsensor is a reg read operation
4542 * it will always return OK
4543 */
4544 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4545 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4546 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4547 diag->tsensor.temp->status = BFA_STATUS_OK;
4548
4549 if (rsp->ts_brd) {
4550 if (rsp->status == BFA_STATUS_OK) {
4551 diag->tsensor.temp->brd_temp =
4552 be16_to_cpu(rsp->brd_temp);
4553 } else {
4554 bfa_trc(diag, rsp->status);
4555 diag->tsensor.temp->brd_temp = 0;
4556 diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4557 }
4558 }
4559 bfa_trc(diag, rsp->ts_junc);
4560 bfa_trc(diag, rsp->temp);
4561 bfa_trc(diag, rsp->ts_brd);
4562 bfa_trc(diag, rsp->brd_temp);
4563 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4564 diag->tsensor.lock = 0;
4565}
4566
4567/*
4568 * LED Test command
4569 */
4570static void
4571diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4572{
4573 struct bfi_diag_ledtest_req_s *msg;
4574
4575 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4576 /* build host command */
4577 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4578 bfa_ioc_portid(diag->ioc));
4579
4580 /*
4581 * convert the freq from N blinks per 10 sec to
4582 * crossbow ontime value. We do it here because division is need
4583 */
4584 if (ledtest->freq)
4585 ledtest->freq = 500 / ledtest->freq;
4586
4587 if (ledtest->freq == 0)
4588 ledtest->freq = 1;
4589
4590 bfa_trc(diag, ledtest->freq);
4591 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4592 msg->cmd = (u8) ledtest->cmd;
4593 msg->color = (u8) ledtest->color;
4594 msg->portid = bfa_ioc_portid(diag->ioc);
4595 msg->led = ledtest->led;
4596 msg->freq = cpu_to_be16(ledtest->freq);
4597
4598 /* send mbox cmd */
4599 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4600}
4601
4602static void
4603diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
4604{
4605 bfa_trc(diag, diag->ledtest.lock);
4606 diag->ledtest.lock = BFA_FALSE;
4607 /* no bfa_cb_queue is needed because driver is not waiting */
4608}
4609
4610/*
4611 * Port beaconing
4612 */
4613static void
4614diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4615{
4616 struct bfi_diag_portbeacon_req_s *msg;
4617
4618 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4619 /* build host command */
4620 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4621 bfa_ioc_portid(diag->ioc));
4622 msg->beacon = beacon;
4623 msg->period = cpu_to_be32(sec);
4624 /* send mbox cmd */
4625 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4626}
4627
4628static void
4629diag_portbeacon_comp(struct bfa_diag_s *diag)
4630{
4631 bfa_trc(diag, diag->beacon.state);
4632 diag->beacon.state = BFA_FALSE;
4633 if (diag->cbfn_beacon)
4634 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4635}
4636
4637/*
4638 * Diag hmbox handler
4639 */
4640void
4641bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4642{
4643 struct bfa_diag_s *diag = diagarg;
4644
4645 switch (msg->mh.msg_id) {
4646 case BFI_DIAG_I2H_PORTBEACON:
4647 diag_portbeacon_comp(diag);
4648 break;
4649 case BFI_DIAG_I2H_FWPING:
4650 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4651 break;
4652 case BFI_DIAG_I2H_TEMPSENSOR:
4653 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4654 break;
4655 case BFI_DIAG_I2H_LEDTEST:
4656 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4657 break;
4658 default:
4659 bfa_trc(diag, msg->mh.msg_id);
4660 WARN_ON(1);
4661 }
4662}
4663
4664/*
4665 * Gen RAM Test
4666 *
4667 * @param[in] *diag - diag data struct
4668 * @param[in] *memtest - mem test params input from upper layer,
4669 * @param[in] pattern - mem test pattern
4670 * @param[in] *result - mem test result
4671 * @param[in] cbfn - mem test callback functioin
4672 * @param[in] cbarg - callback functioin arg
4673 *
4674 * @param[out]
4675 */
4676bfa_status_t
4677bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4678 u32 pattern, struct bfa_diag_memtest_result *result,
4679 bfa_cb_diag_t cbfn, void *cbarg)
4680{
4681 bfa_trc(diag, pattern);
4682
4683 if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4684 return BFA_STATUS_ADAPTER_ENABLED;
4685
4686 /* check to see if there is another destructive diag cmd running */
4687 if (diag->block) {
4688 bfa_trc(diag, diag->block);
4689 return BFA_STATUS_DEVBUSY;
4690 } else
4691 diag->block = 1;
4692
4693 diag->result = result;
4694 diag->cbfn = cbfn;
4695 diag->cbarg = cbarg;
4696
4697 /* download memtest code and take LPU0 out of reset */
4698 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4699
4700 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4701 bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
4702 diag->timer_active = 1;
4703 return BFA_STATUS_OK;
4704}
4705
4706/*
4707 * DIAG firmware ping command
4708 *
4709 * @param[in] *diag - diag data struct
4710 * @param[in] cnt - dma loop count for testing PCIE
4711 * @param[in] data - data pattern to pass in fw
4712 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4713 * @param[in] cbfn - callback function
4714 * @param[in] *cbarg - callback functioin arg
4715 *
4716 * @param[out]
4717 */
4718bfa_status_t
4719bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4720 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4721 void *cbarg)
4722{
4723 bfa_trc(diag, cnt);
4724 bfa_trc(diag, data);
4725
4726 if (!bfa_ioc_is_operational(diag->ioc))
4727 return BFA_STATUS_IOC_NON_OP;
4728
4729 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4730 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4731 return BFA_STATUS_CMD_NOTSUPP;
4732
4733 /* check to see if there is another destructive diag cmd running */
4734 if (diag->block || diag->fwping.lock) {
4735 bfa_trc(diag, diag->block);
4736 bfa_trc(diag, diag->fwping.lock);
4737 return BFA_STATUS_DEVBUSY;
4738 }
4739
4740 /* Initialization */
4741 diag->fwping.lock = 1;
4742 diag->fwping.cbfn = cbfn;
4743 diag->fwping.cbarg = cbarg;
4744 diag->fwping.result = result;
4745 diag->fwping.data = data;
4746 diag->fwping.count = cnt;
4747
4748 /* Init test results */
4749 diag->fwping.result->data = 0;
4750 diag->fwping.result->status = BFA_STATUS_OK;
4751
4752 /* kick off the first ping */
4753 diag_fwping_send(diag);
4754 return BFA_STATUS_OK;
4755}
4756
4757/*
4758 * Read Temperature Sensor
4759 *
4760 * @param[in] *diag - diag data struct
4761 * @param[in] *result - pt to bfa_diag_temp_t data struct
4762 * @param[in] cbfn - callback function
4763 * @param[in] *cbarg - callback functioin arg
4764 *
4765 * @param[out]
4766 */
4767bfa_status_t
4768bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4769 struct bfa_diag_results_tempsensor_s *result,
4770 bfa_cb_diag_t cbfn, void *cbarg)
4771{
4772 /* check to see if there is a destructive diag cmd running */
4773 if (diag->block || diag->tsensor.lock) {
4774 bfa_trc(diag, diag->block);
4775 bfa_trc(diag, diag->tsensor.lock);
4776 return BFA_STATUS_DEVBUSY;
4777 }
4778
4779 if (!bfa_ioc_is_operational(diag->ioc))
4780 return BFA_STATUS_IOC_NON_OP;
4781
4782 /* Init diag mod params */
4783 diag->tsensor.lock = 1;
4784 diag->tsensor.temp = result;
4785 diag->tsensor.cbfn = cbfn;
4786 diag->tsensor.cbarg = cbarg;
4787
4788 /* Send msg to fw */
4789 diag_tempsensor_send(diag);
4790
4791 return BFA_STATUS_OK;
4792}
4793
4794/*
4795 * LED Test command
4796 *
4797 * @param[in] *diag - diag data struct
4798 * @param[in] *ledtest - pt to ledtest data structure
4799 *
4800 * @param[out]
4801 */
4802bfa_status_t
4803bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4804{
4805 bfa_trc(diag, ledtest->cmd);
4806
4807 if (!bfa_ioc_is_operational(diag->ioc))
4808 return BFA_STATUS_IOC_NON_OP;
4809
4810 if (diag->beacon.state)
4811 return BFA_STATUS_BEACON_ON;
4812
4813 if (diag->ledtest.lock)
4814 return BFA_STATUS_LEDTEST_OP;
4815
4816 /* Send msg to fw */
4817 diag->ledtest.lock = BFA_TRUE;
4818 diag_ledtest_send(diag, ledtest);
4819
4820 return BFA_STATUS_OK;
4821}
4822
4823/*
4824 * Port beaconing command
4825 *
4826 * @param[in] *diag - diag data struct
4827 * @param[in] beacon - port beaconing 1:ON 0:OFF
4828 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
4829 * @param[in] sec - beaconing duration in seconds
4830 *
4831 * @param[out]
4832 */
4833bfa_status_t
4834bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
4835 bfa_boolean_t link_e2e_beacon, uint32_t sec)
4836{
4837 bfa_trc(diag, beacon);
4838 bfa_trc(diag, link_e2e_beacon);
4839 bfa_trc(diag, sec);
4840
4841 if (!bfa_ioc_is_operational(diag->ioc))
4842 return BFA_STATUS_IOC_NON_OP;
4843
4844 if (diag->ledtest.lock)
4845 return BFA_STATUS_LEDTEST_OP;
4846
4847 if (diag->beacon.state && beacon) /* beacon alread on */
4848 return BFA_STATUS_BEACON_ON;
4849
4850 diag->beacon.state = beacon;
4851 diag->beacon.link_e2e = link_e2e_beacon;
4852 if (diag->cbfn_beacon)
4853 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
4854
4855 /* Send msg to fw */
4856 diag_portbeacon_send(diag, beacon, sec);
4857
4858 return BFA_STATUS_OK;
4859}
4860
4861/*
4862 * Return DMA memory needed by diag module.
4863 */
4864u32
4865bfa_diag_meminfo(void)
4866{
4867 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4868}
4869
4870/*
4871 * Attach virtual and physical memory for Diag.
4872 */
4873void
4874bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
4875 bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
4876{
4877 diag->dev = dev;
4878 diag->ioc = ioc;
4879 diag->trcmod = trcmod;
4880
4881 diag->block = 0;
4882 diag->cbfn = NULL;
4883 diag->cbarg = NULL;
4884 diag->result = NULL;
4885 diag->cbfn_beacon = cbfn_beacon;
4886
4887 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
4888 bfa_q_qe_init(&diag->ioc_notify);
4889 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
4890 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
4891}
4892
4893void
4894bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
4895{
4896 diag->fwping.dbuf_kva = dm_kva;
4897 diag->fwping.dbuf_pa = dm_pa;
4898 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
4899}
Krishna Gudipati3350d982011-06-24 20:28:37 -07004900
4901/*
4902 * PHY module specific
4903 */
4904#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
4905#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
4906
4907static void
4908bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
4909{
4910 int i, m = sz >> 2;
4911
4912 for (i = 0; i < m; i++)
4913 obuf[i] = be32_to_cpu(ibuf[i]);
4914}
4915
4916static bfa_boolean_t
4917bfa_phy_present(struct bfa_phy_s *phy)
4918{
4919 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
4920}
4921
4922static void
4923bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
4924{
4925 struct bfa_phy_s *phy = cbarg;
4926
4927 bfa_trc(phy, event);
4928
4929 switch (event) {
4930 case BFA_IOC_E_DISABLED:
4931 case BFA_IOC_E_FAILED:
4932 if (phy->op_busy) {
4933 phy->status = BFA_STATUS_IOC_FAILURE;
4934 phy->cbfn(phy->cbarg, phy->status);
4935 phy->op_busy = 0;
4936 }
4937 break;
4938
4939 default:
4940 break;
4941 }
4942}
4943
4944/*
4945 * Send phy attribute query request.
4946 *
4947 * @param[in] cbarg - callback argument
4948 */
4949static void
4950bfa_phy_query_send(void *cbarg)
4951{
4952 struct bfa_phy_s *phy = cbarg;
4953 struct bfi_phy_query_req_s *msg =
4954 (struct bfi_phy_query_req_s *) phy->mb.msg;
4955
4956 msg->instance = phy->instance;
4957 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
4958 bfa_ioc_portid(phy->ioc));
4959 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
4960 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
4961}
4962
4963/*
4964 * Send phy write request.
4965 *
4966 * @param[in] cbarg - callback argument
4967 */
4968static void
4969bfa_phy_write_send(void *cbarg)
4970{
4971 struct bfa_phy_s *phy = cbarg;
4972 struct bfi_phy_write_req_s *msg =
4973 (struct bfi_phy_write_req_s *) phy->mb.msg;
4974 u32 len;
4975 u16 *buf, *dbuf;
4976 int i, sz;
4977
4978 msg->instance = phy->instance;
4979 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
4980 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
4981 phy->residue : BFA_PHY_DMA_BUF_SZ;
4982 msg->length = cpu_to_be32(len);
4983
4984 /* indicate if it's the last msg of the whole write operation */
4985 msg->last = (len == phy->residue) ? 1 : 0;
4986
4987 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
4988 bfa_ioc_portid(phy->ioc));
4989 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
4990
4991 buf = (u16 *) (phy->ubuf + phy->offset);
4992 dbuf = (u16 *)phy->dbuf_kva;
4993 sz = len >> 1;
4994 for (i = 0; i < sz; i++)
4995 buf[i] = cpu_to_be16(dbuf[i]);
4996
4997 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
4998
4999 phy->residue -= len;
5000 phy->offset += len;
5001}
5002
5003/*
5004 * Send phy read request.
5005 *
5006 * @param[in] cbarg - callback argument
5007 */
5008static void
5009bfa_phy_read_send(void *cbarg)
5010{
5011 struct bfa_phy_s *phy = cbarg;
5012 struct bfi_phy_read_req_s *msg =
5013 (struct bfi_phy_read_req_s *) phy->mb.msg;
5014 u32 len;
5015
5016 msg->instance = phy->instance;
5017 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5018 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5019 phy->residue : BFA_PHY_DMA_BUF_SZ;
5020 msg->length = cpu_to_be32(len);
5021 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5022 bfa_ioc_portid(phy->ioc));
5023 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5024 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5025}
5026
5027/*
5028 * Send phy stats request.
5029 *
5030 * @param[in] cbarg - callback argument
5031 */
5032static void
5033bfa_phy_stats_send(void *cbarg)
5034{
5035 struct bfa_phy_s *phy = cbarg;
5036 struct bfi_phy_stats_req_s *msg =
5037 (struct bfi_phy_stats_req_s *) phy->mb.msg;
5038
5039 msg->instance = phy->instance;
5040 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5041 bfa_ioc_portid(phy->ioc));
5042 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5043 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5044}
5045
5046/*
5047 * Flash memory info API.
5048 *
5049 * @param[in] mincfg - minimal cfg variable
5050 */
5051u32
5052bfa_phy_meminfo(bfa_boolean_t mincfg)
5053{
5054 /* min driver doesn't need phy */
5055 if (mincfg)
5056 return 0;
5057
5058 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5059}
5060
5061/*
5062 * Flash attach API.
5063 *
5064 * @param[in] phy - phy structure
5065 * @param[in] ioc - ioc structure
5066 * @param[in] dev - device structure
5067 * @param[in] trcmod - trace module
5068 * @param[in] logmod - log module
5069 */
5070void
5071bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5072 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5073{
5074 phy->ioc = ioc;
5075 phy->trcmod = trcmod;
5076 phy->cbfn = NULL;
5077 phy->cbarg = NULL;
5078 phy->op_busy = 0;
5079
5080 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5081 bfa_q_qe_init(&phy->ioc_notify);
5082 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5083 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5084
5085 /* min driver doesn't need phy */
5086 if (mincfg) {
5087 phy->dbuf_kva = NULL;
5088 phy->dbuf_pa = 0;
5089 }
5090}
5091
5092/*
5093 * Claim memory for phy
5094 *
5095 * @param[in] phy - phy structure
5096 * @param[in] dm_kva - pointer to virtual memory address
5097 * @param[in] dm_pa - physical memory address
5098 * @param[in] mincfg - minimal cfg variable
5099 */
5100void
5101bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5102 bfa_boolean_t mincfg)
5103{
5104 if (mincfg)
5105 return;
5106
5107 phy->dbuf_kva = dm_kva;
5108 phy->dbuf_pa = dm_pa;
5109 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5110 dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5111 dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5112}
5113
5114bfa_boolean_t
5115bfa_phy_busy(struct bfa_ioc_s *ioc)
5116{
5117 void __iomem *rb;
5118
5119 rb = bfa_ioc_bar0(ioc);
5120 return readl(rb + BFA_PHY_LOCK_STATUS);
5121}
5122
5123/*
5124 * Get phy attribute.
5125 *
5126 * @param[in] phy - phy structure
5127 * @param[in] attr - phy attribute structure
5128 * @param[in] cbfn - callback function
5129 * @param[in] cbarg - callback argument
5130 *
5131 * Return status.
5132 */
5133bfa_status_t
5134bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5135 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5136{
5137 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5138 bfa_trc(phy, instance);
5139
5140 if (!bfa_phy_present(phy))
5141 return BFA_STATUS_PHY_NOT_PRESENT;
5142
5143 if (!bfa_ioc_is_operational(phy->ioc))
5144 return BFA_STATUS_IOC_NON_OP;
5145
5146 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5147 bfa_trc(phy, phy->op_busy);
5148 return BFA_STATUS_DEVBUSY;
5149 }
5150
5151 phy->op_busy = 1;
5152 phy->cbfn = cbfn;
5153 phy->cbarg = cbarg;
5154 phy->instance = instance;
5155 phy->ubuf = (uint8_t *) attr;
5156 bfa_phy_query_send(phy);
5157
5158 return BFA_STATUS_OK;
5159}
5160
5161/*
5162 * Get phy stats.
5163 *
5164 * @param[in] phy - phy structure
5165 * @param[in] instance - phy image instance
5166 * @param[in] stats - pointer to phy stats
5167 * @param[in] cbfn - callback function
5168 * @param[in] cbarg - callback argument
5169 *
5170 * Return status.
5171 */
5172bfa_status_t
5173bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5174 struct bfa_phy_stats_s *stats,
5175 bfa_cb_phy_t cbfn, void *cbarg)
5176{
5177 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5178 bfa_trc(phy, instance);
5179
5180 if (!bfa_phy_present(phy))
5181 return BFA_STATUS_PHY_NOT_PRESENT;
5182
5183 if (!bfa_ioc_is_operational(phy->ioc))
5184 return BFA_STATUS_IOC_NON_OP;
5185
5186 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5187 bfa_trc(phy, phy->op_busy);
5188 return BFA_STATUS_DEVBUSY;
5189 }
5190
5191 phy->op_busy = 1;
5192 phy->cbfn = cbfn;
5193 phy->cbarg = cbarg;
5194 phy->instance = instance;
5195 phy->ubuf = (u8 *) stats;
5196 bfa_phy_stats_send(phy);
5197
5198 return BFA_STATUS_OK;
5199}
5200
5201/*
5202 * Update phy image.
5203 *
5204 * @param[in] phy - phy structure
5205 * @param[in] instance - phy image instance
5206 * @param[in] buf - update data buffer
5207 * @param[in] len - data buffer length
5208 * @param[in] offset - offset relative to starting address
5209 * @param[in] cbfn - callback function
5210 * @param[in] cbarg - callback argument
5211 *
5212 * Return status.
5213 */
5214bfa_status_t
5215bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5216 void *buf, u32 len, u32 offset,
5217 bfa_cb_phy_t cbfn, void *cbarg)
5218{
5219 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5220 bfa_trc(phy, instance);
5221 bfa_trc(phy, len);
5222 bfa_trc(phy, offset);
5223
5224 if (!bfa_phy_present(phy))
5225 return BFA_STATUS_PHY_NOT_PRESENT;
5226
5227 if (!bfa_ioc_is_operational(phy->ioc))
5228 return BFA_STATUS_IOC_NON_OP;
5229
5230 /* 'len' must be in word (4-byte) boundary */
5231 if (!len || (len & 0x03))
5232 return BFA_STATUS_FAILED;
5233
5234 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5235 bfa_trc(phy, phy->op_busy);
5236 return BFA_STATUS_DEVBUSY;
5237 }
5238
5239 phy->op_busy = 1;
5240 phy->cbfn = cbfn;
5241 phy->cbarg = cbarg;
5242 phy->instance = instance;
5243 phy->residue = len;
5244 phy->offset = 0;
5245 phy->addr_off = offset;
5246 phy->ubuf = buf;
5247
5248 bfa_phy_write_send(phy);
5249 return BFA_STATUS_OK;
5250}
5251
5252/*
5253 * Read phy image.
5254 *
5255 * @param[in] phy - phy structure
5256 * @param[in] instance - phy image instance
5257 * @param[in] buf - read data buffer
5258 * @param[in] len - data buffer length
5259 * @param[in] offset - offset relative to starting address
5260 * @param[in] cbfn - callback function
5261 * @param[in] cbarg - callback argument
5262 *
5263 * Return status.
5264 */
5265bfa_status_t
5266bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5267 void *buf, u32 len, u32 offset,
5268 bfa_cb_phy_t cbfn, void *cbarg)
5269{
5270 bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5271 bfa_trc(phy, instance);
5272 bfa_trc(phy, len);
5273 bfa_trc(phy, offset);
5274
5275 if (!bfa_phy_present(phy))
5276 return BFA_STATUS_PHY_NOT_PRESENT;
5277
5278 if (!bfa_ioc_is_operational(phy->ioc))
5279 return BFA_STATUS_IOC_NON_OP;
5280
5281 /* 'len' must be in word (4-byte) boundary */
5282 if (!len || (len & 0x03))
5283 return BFA_STATUS_FAILED;
5284
5285 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5286 bfa_trc(phy, phy->op_busy);
5287 return BFA_STATUS_DEVBUSY;
5288 }
5289
5290 phy->op_busy = 1;
5291 phy->cbfn = cbfn;
5292 phy->cbarg = cbarg;
5293 phy->instance = instance;
5294 phy->residue = len;
5295 phy->offset = 0;
5296 phy->addr_off = offset;
5297 phy->ubuf = buf;
5298 bfa_phy_read_send(phy);
5299
5300 return BFA_STATUS_OK;
5301}
5302
5303/*
5304 * Process phy response messages upon receiving interrupts.
5305 *
5306 * @param[in] phyarg - phy structure
5307 * @param[in] msg - message structure
5308 */
5309void
5310bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5311{
5312 struct bfa_phy_s *phy = phyarg;
5313 u32 status;
5314
5315 union {
5316 struct bfi_phy_query_rsp_s *query;
5317 struct bfi_phy_stats_rsp_s *stats;
5318 struct bfi_phy_write_rsp_s *write;
5319 struct bfi_phy_read_rsp_s *read;
5320 struct bfi_mbmsg_s *msg;
5321 } m;
5322
5323 m.msg = msg;
5324 bfa_trc(phy, msg->mh.msg_id);
5325
5326 if (!phy->op_busy) {
5327 /* receiving response after ioc failure */
5328 bfa_trc(phy, 0x9999);
5329 return;
5330 }
5331
5332 switch (msg->mh.msg_id) {
5333 case BFI_PHY_I2H_QUERY_RSP:
5334 status = be32_to_cpu(m.query->status);
5335 bfa_trc(phy, status);
5336
5337 if (status == BFA_STATUS_OK) {
5338 struct bfa_phy_attr_s *attr =
5339 (struct bfa_phy_attr_s *) phy->ubuf;
5340 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5341 sizeof(struct bfa_phy_attr_s));
5342 bfa_trc(phy, attr->status);
5343 bfa_trc(phy, attr->length);
5344 }
5345
5346 phy->status = status;
5347 phy->op_busy = 0;
5348 if (phy->cbfn)
5349 phy->cbfn(phy->cbarg, phy->status);
5350 break;
5351 case BFI_PHY_I2H_STATS_RSP:
5352 status = be32_to_cpu(m.stats->status);
5353 bfa_trc(phy, status);
5354
5355 if (status == BFA_STATUS_OK) {
5356 struct bfa_phy_stats_s *stats =
5357 (struct bfa_phy_stats_s *) phy->ubuf;
5358 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5359 sizeof(struct bfa_phy_stats_s));
5360 bfa_trc(phy, stats->status);
5361 }
5362
5363 phy->status = status;
5364 phy->op_busy = 0;
5365 if (phy->cbfn)
5366 phy->cbfn(phy->cbarg, phy->status);
5367 break;
5368 case BFI_PHY_I2H_WRITE_RSP:
5369 status = be32_to_cpu(m.write->status);
5370 bfa_trc(phy, status);
5371
5372 if (status != BFA_STATUS_OK || phy->residue == 0) {
5373 phy->status = status;
5374 phy->op_busy = 0;
5375 if (phy->cbfn)
5376 phy->cbfn(phy->cbarg, phy->status);
5377 } else {
5378 bfa_trc(phy, phy->offset);
5379 bfa_phy_write_send(phy);
5380 }
5381 break;
5382 case BFI_PHY_I2H_READ_RSP:
5383 status = be32_to_cpu(m.read->status);
5384 bfa_trc(phy, status);
5385
5386 if (status != BFA_STATUS_OK) {
5387 phy->status = status;
5388 phy->op_busy = 0;
5389 if (phy->cbfn)
5390 phy->cbfn(phy->cbarg, phy->status);
5391 } else {
5392 u32 len = be32_to_cpu(m.read->length);
5393 u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5394 u16 *dbuf = (u16 *)phy->dbuf_kva;
5395 int i, sz = len >> 1;
5396
5397 bfa_trc(phy, phy->offset);
5398 bfa_trc(phy, len);
5399
5400 for (i = 0; i < sz; i++)
5401 buf[i] = be16_to_cpu(dbuf[i]);
5402
5403 phy->residue -= len;
5404 phy->offset += len;
5405
5406 if (phy->residue == 0) {
5407 phy->status = status;
5408 phy->op_busy = 0;
5409 if (phy->cbfn)
5410 phy->cbfn(phy->cbarg, phy->status);
5411 } else
5412 bfa_phy_read_send(phy);
5413 }
5414 break;
5415 default:
5416 WARN_ON(1);
5417 }
5418}