blob: 69b1a80e36877a178a60f320dd6f274ea3587fad [file] [log] [blame]
Thomas Gleixner6e9ef502019-05-29 16:57:52 -07001// SPDX-License-Identifier: GPL-2.0-only
Jitendra Bhivare942b7652017-03-24 14:11:48 +05302/*
Jitendra Bhivare0172dc62017-10-10 16:18:19 +05303 * Copyright 2017 Broadcom. All Rights Reserved.
Jitendra Bhivare942b7652017-03-24 14:11:48 +05304 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05305 *
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05306 * Contact Information:
Jitendra Bhivare60f36e02016-08-19 15:20:24 +05307 * linux-drivers@broadcom.com
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05308 */
9
John Soni Jose21771992012-04-03 23:41:49 -050010#include <scsi/iscsi_proto.h>
11
Jayamohan Kallickal4eea99d2013-09-28 15:35:48 -070012#include "be_main.h"
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053013#include "be.h"
14#include "be_mgmt.h"
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053015
Jitendra Bhivared1d5ca82016-08-19 15:20:18 +053016/* UE Status Low CSR */
17static const char * const desc_ue_status_low[] = {
18 "CEV",
19 "CTX",
20 "DBUF",
21 "ERX",
22 "Host",
23 "MPU",
24 "NDMA",
25 "PTC ",
26 "RDMA ",
27 "RXF ",
28 "RXIPS ",
29 "RXULP0 ",
30 "RXULP1 ",
31 "RXULP2 ",
32 "TIM ",
33 "TPOST ",
34 "TPRE ",
35 "TXIPS ",
36 "TXULP0 ",
37 "TXULP1 ",
38 "UC ",
39 "WDMA ",
40 "TXULP2 ",
41 "HOST1 ",
42 "P0_OB_LINK ",
43 "P1_OB_LINK ",
44 "HOST_GPIO ",
45 "MBOX ",
46 "AXGMAC0",
47 "AXGMAC1",
48 "JTAG",
49 "MPU_INTPEND"
50};
51
52/* UE Status High CSR */
53static const char * const desc_ue_status_hi[] = {
54 "LPCMEMHOST",
55 "MGMT_MAC",
56 "PCS0ONLINE",
57 "MPU_IRAM",
58 "PCS1ONLINE",
59 "PCTL0",
60 "PCTL1",
61 "PMEM",
62 "RR",
63 "TXPB",
64 "RXPP",
65 "XAUI",
66 "TXP",
67 "ARM",
68 "IPC",
69 "HOST2",
70 "HOST3",
71 "HOST4",
72 "HOST5",
73 "HOST6",
74 "HOST7",
75 "HOST8",
76 "HOST9",
77 "NETC",
78 "Unknown",
79 "Unknown",
80 "Unknown",
81 "Unknown",
82 "Unknown",
83 "Unknown",
84 "Unknown",
85 "Unknown"
86};
87
Jitendra Bhivare090e2182016-02-04 15:49:17 +053088struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
89 unsigned int *ref_tag)
Jitendra Bhivare69fd6d72016-02-04 15:49:14 +053090{
Jitendra Bhivare090e2182016-02-04 15:49:17 +053091 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
92 struct be_mcc_wrb *wrb = NULL;
93 unsigned int tag;
94
Jitendra Bhivare96eb8d42016-08-19 15:19:59 +053095 spin_lock(&phba->ctrl.mcc_lock);
Jitendra Bhivare090e2182016-02-04 15:49:17 +053096 if (mccq->used == mccq->len) {
97 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
98 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
99 "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
100 mccq->used, phba->ctrl.mcc_tag_available);
101 goto alloc_failed;
102 }
103
104 if (!phba->ctrl.mcc_tag_available)
105 goto alloc_failed;
106
107 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
108 if (!tag) {
109 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
110 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
111 "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
112 phba->ctrl.mcc_tag_available,
113 phba->ctrl.mcc_alloc_index);
114 goto alloc_failed;
115 }
116
117 /* return this tag for further reference */
118 *ref_tag = tag;
119 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
120 phba->ctrl.mcc_tag_status[tag] = 0;
121 phba->ctrl.ptag_state[tag].tag_state = 0;
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530122 phba->ctrl.ptag_state[tag].cbfn = NULL;
Jitendra Bhivare090e2182016-02-04 15:49:17 +0530123 phba->ctrl.mcc_tag_available--;
124 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
125 phba->ctrl.mcc_alloc_index = 0;
126 else
127 phba->ctrl.mcc_alloc_index++;
128
129 wrb = queue_head_node(mccq);
130 memset(wrb, 0, sizeof(*wrb));
131 wrb->tag0 = tag;
132 wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK;
133 queue_head_inc(mccq);
134 mccq->used++;
135
136alloc_failed:
Jitendra Bhivare96eb8d42016-08-19 15:19:59 +0530137 spin_unlock(&phba->ctrl.mcc_lock);
Jitendra Bhivare090e2182016-02-04 15:49:17 +0530138 return wrb;
139}
140
141void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
142{
143 struct be_queue_info *mccq = &ctrl->mcc_obj.q;
144
Jitendra Bhivare96eb8d42016-08-19 15:19:59 +0530145 spin_lock(&ctrl->mcc_lock);
Jitendra Bhivare69fd6d72016-02-04 15:49:14 +0530146 tag = tag & MCC_Q_CMD_TAG_MASK;
147 ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
148 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
149 ctrl->mcc_free_index = 0;
150 else
151 ctrl->mcc_free_index++;
152 ctrl->mcc_tag_available++;
Jitendra Bhivare090e2182016-02-04 15:49:17 +0530153 mccq->used--;
Jitendra Bhivare96eb8d42016-08-19 15:19:59 +0530154 spin_unlock(&ctrl->mcc_lock);
Jitendra Bhivare69fd6d72016-02-04 15:49:14 +0530155}
156
John Soni Josee175def2012-10-20 04:45:40 +0530157/*
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530158 * beiscsi_mcc_compl_status - Return the status of MCC completion
159 * @phba: Driver private structure
160 * @tag: Tag for the MBX Command
161 * @wrb: the WRB used for the MBX Command
162 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
163 *
164 * return
165 * Success: 0
166 * Failure: Non-Zero
167 */
168int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
169 unsigned int tag,
170 struct be_mcc_wrb **wrb,
171 struct be_dma_mem *mbx_cmd_mem)
172{
173 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
174 uint16_t status = 0, addl_status = 0, wrb_num = 0;
175 struct be_cmd_resp_hdr *mbx_resp_hdr;
176 struct be_cmd_req_hdr *mbx_hdr;
177 struct be_mcc_wrb *temp_wrb;
178 uint32_t mcc_tag_status;
179 int rc = 0;
180
181 mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
182 status = (mcc_tag_status & CQE_STATUS_MASK);
183 addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
184 CQE_STATUS_ADDL_SHIFT);
185
186 if (mbx_cmd_mem) {
187 mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
188 } else {
189 wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
190 CQE_STATUS_WRB_SHIFT;
191 temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
192 mbx_hdr = embedded_payload(temp_wrb);
193
194 if (wrb)
195 *wrb = temp_wrb;
196 }
197
198 if (status || addl_status) {
199 beiscsi_log(phba, KERN_WARNING,
200 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
201 BEISCSI_LOG_CONFIG,
202 "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
203 mbx_hdr->subsystem, mbx_hdr->opcode,
204 status, addl_status);
205 rc = -EIO;
206 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
207 mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
208 beiscsi_log(phba, KERN_WARNING,
209 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
210 BEISCSI_LOG_CONFIG,
211 "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
212 mbx_resp_hdr->response_length,
213 mbx_resp_hdr->actual_resp_len);
214 rc = -EAGAIN;
215 }
216 }
217
218 return rc;
219}
220
221/*
Jitendra Bhivare88840332016-02-04 15:49:12 +0530222 * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
John Soni Josee175def2012-10-20 04:45:40 +0530223 * @phba: Driver private structure
224 * @tag: Tag for the MBX Command
225 * @wrb: the WRB used for the MBX Command
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500226 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
John Soni Josee175def2012-10-20 04:45:40 +0530227 *
228 * Waits for MBX completion with the passed TAG.
229 *
230 * return
231 * Success: 0
232 * Failure: Non-Zero
233 **/
Jitendra Bhivare88840332016-02-04 15:49:12 +0530234int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530235 unsigned int tag,
236 struct be_mcc_wrb **wrb,
Jitendra Bhivare88840332016-02-04 15:49:12 +0530237 struct be_dma_mem *mbx_cmd_mem)
John Soni Josee175def2012-10-20 04:45:40 +0530238{
239 int rc = 0;
John Soni Josee175def2012-10-20 04:45:40 +0530240
Jitendra Bhivareeb419222017-03-24 14:11:40 +0530241 if (!tag || tag > MAX_MCC_CMD) {
242 __beiscsi_log(phba, KERN_ERR,
243 "BC_%d : invalid tag %u\n", tag);
244 return -EINVAL;
245 }
246
Jitendra Bhivare9122e992016-08-19 15:20:11 +0530247 if (beiscsi_hba_in_error(phba)) {
248 clear_bit(MCC_TAG_STATE_RUNNING,
249 &phba->ctrl.ptag_state[tag].tag_state);
250 return -EIO;
251 }
John Soni Jose7a158002012-10-20 04:45:51 +0530252
John Soni Josee175def2012-10-20 04:45:40 +0530253 /* wait for the mccq completion */
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530254 rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
255 phba->ctrl.mcc_tag_status[tag],
256 msecs_to_jiffies(
257 BEISCSI_HOST_MBX_TIMEOUT));
Jitendra Bhivared1d5ca82016-08-19 15:20:18 +0530258 /**
259 * Return EIO if port is being disabled. Associated DMA memory, if any,
260 * is freed by the caller. When port goes offline, MCCQ is cleaned up
261 * so does WRB.
262 */
263 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
264 clear_bit(MCC_TAG_STATE_RUNNING,
265 &phba->ctrl.ptag_state[tag].tag_state);
266 return -EIO;
267 }
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530268
Jitendra Bhivarecdde6682016-01-20 14:10:47 +0530269 /**
270 * If MBOX cmd timeout expired, tag and resource allocated
271 * for cmd is not freed until FW returns completion.
272 */
John Soni Josee175def2012-10-20 04:45:40 +0530273 if (rc <= 0) {
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500274 struct be_dma_mem *tag_mem;
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500275
Jitendra Bhivarecdde6682016-01-20 14:10:47 +0530276 /**
277 * PCI/DMA memory allocated and posted in non-embedded mode
278 * will have mbx_cmd_mem != NULL.
279 * Save virtual and bus addresses for the command so that it
280 * can be freed later.
281 **/
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500282 tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
283 if (mbx_cmd_mem) {
284 tag_mem->size = mbx_cmd_mem->size;
285 tag_mem->va = mbx_cmd_mem->va;
286 tag_mem->dma = mbx_cmd_mem->dma;
287 } else
288 tag_mem->size = 0;
289
Jitendra Bhivarecdde6682016-01-20 14:10:47 +0530290 /* first make tag_mem_state visible to all */
291 wmb();
292 set_bit(MCC_TAG_STATE_TIMEOUT,
293 &phba->ctrl.ptag_state[tag].tag_state);
294
John Soni Josee175def2012-10-20 04:45:40 +0530295 beiscsi_log(phba, KERN_ERR,
296 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
297 BEISCSI_LOG_CONFIG,
298 "BC_%d : MBX Cmd Completion timed out\n");
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500299 return -EBUSY;
Jayamohan Kallickal1957aa72014-01-29 02:16:39 -0500300 }
John Soni Josee175def2012-10-20 04:45:40 +0530301
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530302 rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
John Soni Josee175def2012-10-20 04:45:40 +0530303
Jitendra Bhivare090e2182016-02-04 15:49:17 +0530304 free_mcc_wrb(&phba->ctrl, tag);
John Soni Josee175def2012-10-20 04:45:40 +0530305 return rc;
306}
307
John Soni Josee175def2012-10-20 04:45:40 +0530308/*
Jitendra Bhivare88840332016-02-04 15:49:12 +0530309 * beiscsi_process_mbox_compl()- Check the MBX completion status
John Soni Josee175def2012-10-20 04:45:40 +0530310 * @ctrl: Function specific MBX data structure
311 * @compl: Completion status of MBX Command
312 *
313 * Check for the MBX completion status when BMBX method used
314 *
315 * return
316 * Success: Zero
317 * Failure: Non-Zero
318 **/
Jitendra Bhivare88840332016-02-04 15:49:12 +0530319static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
320 struct be_mcc_compl *compl)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530321{
John Soni Josee175def2012-10-20 04:45:40 +0530322 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
John Soni Jose99bc5d52012-08-20 23:00:18 +0530323 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
John Soni Josee175def2012-10-20 04:45:40 +0530324 struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
Jitendra Bhivare66940952016-08-19 15:20:14 +0530325 u16 compl_status, extd_status;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530326
Jitendra Bhivarec4484272016-02-04 15:49:15 +0530327 /**
328 * To check if valid bit is set, check the entire word as we don't know
329 * the endianness of the data (old entry is host endian while a new
330 * entry is little endian)
331 */
332 if (!compl->flags) {
John Soni Jose99bc5d52012-08-20 23:00:18 +0530333 beiscsi_log(phba, KERN_ERR,
Jitendra Bhivarec4484272016-02-04 15:49:15 +0530334 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
335 "BC_%d : BMBX busy, no completion\n");
336 return -EBUSY;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530337 }
Jitendra Bhivarec4484272016-02-04 15:49:15 +0530338 compl->flags = le32_to_cpu(compl->flags);
339 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
340
341 /**
342 * Just swap the status to host endian;
343 * mcc tag is opaquely copied from mcc_wrb.
344 */
345 be_dws_le_to_cpu(compl, 4);
346 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
347 CQE_STATUS_COMPL_MASK;
348 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
349 CQE_STATUS_EXTD_MASK;
350 /* Need to reset the entire word that houses the valid bit */
351 compl->flags = 0;
352
353 if (compl_status == MCC_STATUS_SUCCESS)
354 return 0;
355
356 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
357 "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
358 hdr->subsystem, hdr->opcode, compl_status, extd_status);
Jitendra Bhivare66940952016-08-19 15:20:14 +0530359 return compl_status;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530360}
361
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530362static void beiscsi_process_async_link(struct beiscsi_hba *phba,
363 struct be_mcc_compl *compl)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530364{
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530365 struct be_async_event_link_state *evt;
366
367 evt = (struct be_async_event_link_state *)compl;
368
Jitendra Bhivare048084c2016-01-20 14:10:58 +0530369 phba->port_speed = evt->port_speed;
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530370 /**
371 * Check logical link status in ASYNC event.
372 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
373 **/
374 if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530375 set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
376 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
377 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530378 __beiscsi_log(phba, KERN_ERR,
379 "BC_%d : Link Up on Port %d tag 0x%x\n",
380 evt->physical_port, evt->event_tag);
381 } else {
Jitendra Bhivare9122e992016-08-19 15:20:11 +0530382 clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530383 __beiscsi_log(phba, KERN_ERR,
384 "BC_%d : Link Down on Port %d tag 0x%x\n",
385 evt->physical_port, evt->event_tag);
386 iscsi_host_for_each_session(phba->shost,
Jitendra Bhivare480195c2016-08-19 15:20:15 +0530387 beiscsi_session_fail);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530388 }
389}
390
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530391static char *beiscsi_port_misconf_event_msg[] = {
392 "Physical Link is functional.",
393 "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
394 "Optics of two types installed - Remove one optic or install matching pair of optics.",
395 "Incompatible optics - Replace with compatible optics for card to function.",
396 "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
397 "Uncertified optics - Replace with Avago Certified optics to enable link operation."
398};
399
400static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
401 struct be_mcc_compl *compl)
402{
403 struct be_async_event_sli *async_sli;
404 u8 evt_type, state, old_state, le;
405 char *sev = KERN_WARNING;
406 char *msg = NULL;
407
408 evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
409 evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
410
411 /* processing only MISCONFIGURED physical port event */
412 if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
413 return;
414
415 async_sli = (struct be_async_event_sli *)compl;
416 state = async_sli->event_data1 >>
417 (phba->fw_config.phys_port * 8) & 0xff;
418 le = async_sli->event_data2 >>
419 (phba->fw_config.phys_port * 8) & 0xff;
420
421 old_state = phba->optic_state;
422 phba->optic_state = state;
423
424 if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
425 /* fw is reporting a state we don't know, log and return */
426 __beiscsi_log(phba, KERN_ERR,
427 "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
428 phba->port_name, async_sli->event_data1);
429 return;
430 }
431
432 if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
433 /* log link effect for unqualified-4, uncertified-5 optics */
434 if (state > 3)
435 msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
436 " Link is non-operational." :
437 " Link is operational.";
438 /* 1 - info */
439 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
440 sev = KERN_INFO;
441 /* 2 - error */
442 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
443 sev = KERN_ERR;
444 }
445
446 if (old_state != phba->optic_state)
447 __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
448 phba->port_name,
449 beiscsi_port_misconf_event_msg[state],
450 !msg ? "" : msg);
451}
452
453void beiscsi_process_async_event(struct beiscsi_hba *phba,
454 struct be_mcc_compl *compl)
455{
456 char *sev = KERN_INFO;
457 u8 evt_code;
458
459 /* interpret flags as an async trailer */
460 evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
461 evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
462 switch (evt_code) {
463 case ASYNC_EVENT_CODE_LINK_STATE:
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530464 beiscsi_process_async_link(phba, compl);
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530465 break;
466 case ASYNC_EVENT_CODE_ISCSI:
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530467 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
468 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530469 sev = KERN_ERR;
470 break;
471 case ASYNC_EVENT_CODE_SLI:
472 beiscsi_process_async_sli(phba, compl);
473 break;
474 default:
475 /* event not registered */
476 sev = KERN_ERR;
477 }
478
479 beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
Jitendra Bhivare9c4f8b02016-01-20 14:10:59 +0530480 "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
481 evt_code, compl->status, compl->flags);
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530482}
483
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530484int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
485 struct be_mcc_compl *compl)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530486{
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530487 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
488 u16 compl_status, extd_status;
489 struct be_dma_mem *tag_mem;
490 unsigned int tag, wrb_idx;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530491
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530492 be_dws_le_to_cpu(compl, 4);
493 tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK);
494 wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT;
495
496 if (!test_bit(MCC_TAG_STATE_RUNNING,
497 &ctrl->ptag_state[tag].tag_state)) {
498 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
499 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
500 "BC_%d : MBX cmd completed but not posted\n");
501 return 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530502 }
503
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530504 /* end MCC with this tag */
505 clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
506
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530507 if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
508 beiscsi_log(phba, KERN_WARNING,
509 BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
510 BEISCSI_LOG_CONFIG,
511 "BC_%d : MBX Completion for timeout Command from FW\n");
512 /**
513 * Check for the size before freeing resource.
514 * Only for non-embedded cmd, PCI resource is allocated.
515 **/
516 tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530517 if (tag_mem->size) {
Christoph Hellwig26a4c992018-10-10 18:22:24 +0200518 dma_free_coherent(&ctrl->pdev->dev, tag_mem->size,
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530519 tag_mem->va, tag_mem->dma);
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530520 tag_mem->size = 0;
521 }
Jitendra Bhivare090e2182016-02-04 15:49:17 +0530522 free_mcc_wrb(ctrl, tag);
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530523 return 0;
524 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530525
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530526 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
527 CQE_STATUS_COMPL_MASK;
528 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
529 CQE_STATUS_EXTD_MASK;
530 /* The ctrl.mcc_tag_status[tag] is filled with
531 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
532 * [7:0] = compl_status
533 */
534 ctrl->mcc_tag_status[tag] = CQE_VALID_MASK;
535 ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT);
536 ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) &
537 CQE_STATUS_ADDL_MASK;
538 ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
539
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530540 if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
541 if (ctrl->ptag_state[tag].cbfn)
542 ctrl->ptag_state[tag].cbfn(phba, tag);
543 else
Jitendra Bhivare66940952016-08-19 15:20:14 +0530544 __beiscsi_log(phba, KERN_ERR,
545 "BC_%d : MBX ASYNC command with no callback\n");
Jitendra Bhivare50a4b822016-08-19 15:20:12 +0530546 free_mcc_wrb(ctrl, tag);
547 return 0;
548 }
549
Jitendra Bhivare10bcd472016-08-19 15:20:13 +0530550 if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
551 /* just check completion status and free wrb */
552 __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
553 free_mcc_wrb(ctrl, tag);
554 return 0;
555 }
556
Jitendra Bhivare2e4e8f62016-02-04 15:49:11 +0530557 wake_up_interruptible(&ctrl->mcc_wait[tag]);
558 return 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530559}
560
Jitendra Bhivare69fd6d72016-02-04 15:49:14 +0530561void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
562{
563 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
564 u32 val = 0;
565
566 set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
567 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
568 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
569 /* make request available for DMA */
570 wmb();
571 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530572}
573
John Soni Josee175def2012-10-20 04:45:40 +0530574/*
Jitendra Bhivare88840332016-02-04 15:49:12 +0530575 * be_mbox_db_ready_poll()- Check ready status
John Soni Josee175def2012-10-20 04:45:40 +0530576 * @ctrl: Function specific MBX data structure
577 *
578 * Check for the ready status of FW to send BMBX
579 * commands to adapter.
580 *
581 * return
582 * Success: 0
583 * Failure: Non-Zero
584 **/
Jitendra Bhivare88840332016-02-04 15:49:12 +0530585static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530586{
Jitendra Bhivare9ec6f6b2016-01-20 14:10:49 +0530587 /* wait 30s for generic non-flash MBOX operation */
588#define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530589 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
John Soni Josee175def2012-10-20 04:45:40 +0530590 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal92665a62013-09-28 15:35:43 -0700591 unsigned long timeout;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530592 u32 ready;
Jayamohan Kallickal92665a62013-09-28 15:35:43 -0700593
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530594 /*
595 * This BMBX busy wait path is used during init only.
596 * For the commands executed during init, 5s should suffice.
597 */
598 timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530599 do {
Jitendra Bhivare9122e992016-08-19 15:20:11 +0530600 if (beiscsi_hba_in_error(phba))
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530601 return -EIO;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530602
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530603 ready = ioread32(db);
604 if (ready == 0xffffffff)
605 return -EIO;
Jayamohan Kallickal92665a62013-09-28 15:35:43 -0700606
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530607 ready &= MPU_MAILBOX_DB_RDY_MASK;
608 if (ready)
609 return 0;
Jayamohan Kallickal92665a62013-09-28 15:35:43 -0700610
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530611 if (time_after(jiffies, timeout))
612 break;
Jitendra Bhivare3c9e36a2016-08-19 15:20:00 +0530613 /* 1ms sleep is enough in most cases */
614 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530615 } while (!ready);
616
617 beiscsi_log(phba, KERN_ERR,
618 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
619 "BC_%d : FW Timed Out\n");
Jitendra Bhivare9122e992016-08-19 15:20:11 +0530620 set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530621 return -EBUSY;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530622}
623
John Soni Josee175def2012-10-20 04:45:40 +0530624/*
625 * be_mbox_notify: Notify adapter of new BMBX command
626 * @ctrl: Function specific MBX data structure
627 *
628 * Ring doorbell to inform adapter of a BMBX command
629 * to process
630 *
631 * return
632 * Success: 0
633 * Failure: Non-Zero
634 **/
Jitendra Bhivare480195c2016-08-19 15:20:15 +0530635static int be_mbox_notify(struct be_ctrl_info *ctrl)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530636{
637 int status;
638 u32 val = 0;
639 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
640 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
641 struct be_mcc_mailbox *mbox = mbox_mem->va;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530642
Jitendra Bhivare88840332016-02-04 15:49:12 +0530643 status = be_mbox_db_ready_poll(ctrl);
Jayamohan Kallickal1e234bb2013-04-05 20:38:23 -0700644 if (status)
645 return status;
646
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530647 val &= ~MPU_MAILBOX_DB_RDY_MASK;
648 val |= MPU_MAILBOX_DB_HI_MASK;
649 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
650 iowrite32(val, db);
651
Jitendra Bhivare88840332016-02-04 15:49:12 +0530652 status = be_mbox_db_ready_poll(ctrl);
John Soni Josee175def2012-10-20 04:45:40 +0530653 if (status)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530654 return status;
John Soni Josee175def2012-10-20 04:45:40 +0530655
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530656 val = 0;
657 val &= ~MPU_MAILBOX_DB_RDY_MASK;
658 val &= ~MPU_MAILBOX_DB_HI_MASK;
659 val |= (u32) (mbox_mem->dma >> 4) << 2;
660 iowrite32(val, db);
661
Jitendra Bhivare88840332016-02-04 15:49:12 +0530662 status = be_mbox_db_ready_poll(ctrl);
John Soni Josee175def2012-10-20 04:45:40 +0530663 if (status)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530664 return status;
John Soni Josee175def2012-10-20 04:45:40 +0530665
Jitendra Bhivare6ac999e2016-01-20 14:10:45 +0530666 /* RDY is set; small delay before CQE read. */
667 udelay(1);
668
Jitendra Bhivarea264f5e2016-02-04 15:49:13 +0530669 status = beiscsi_process_mbox_compl(ctrl, &mbox->compl);
670 return status;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530671}
672
Jitendra Bhivarea39e9f72017-10-10 16:18:14 +0530673void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len,
674 bool embedded, u8 sge_cnt)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530675{
676 if (embedded)
Jitendra Bhivarefa1261c2016-12-13 15:56:01 +0530677 wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530678 else
Jitendra Bhivarefa1261c2016-12-13 15:56:01 +0530679 wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
680 MCC_WRB_SGE_CNT_SHIFT;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530681 wrb->payload_length = payload_len;
682 be_dws_cpu_to_le(wrb, 8);
683}
684
685void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
Jitendra Bhivarea39e9f72017-10-10 16:18:14 +0530686 u8 subsystem, u8 opcode, u32 cmd_len)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530687{
688 req_hdr->opcode = opcode;
689 req_hdr->subsystem = subsystem;
690 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
John Soni Josee175def2012-10-20 04:45:40 +0530691 req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530692}
693
694static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
695 struct be_dma_mem *mem)
696{
697 int i, buf_pages;
698 u64 dma = (u64) mem->dma;
699
700 buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
701 for (i = 0; i < buf_pages; i++) {
702 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
703 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
704 dma += PAGE_SIZE_4K;
705 }
706}
707
708static u32 eq_delay_to_mult(u32 usec_delay)
709{
710#define MAX_INTR_RATE 651042
711 const u32 round = 10;
712 u32 multiplier;
713
714 if (usec_delay == 0)
715 multiplier = 0;
716 else {
717 u32 interrupt_rate = 1000000 / usec_delay;
718 if (interrupt_rate == 0)
719 multiplier = 1023;
720 else {
721 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
722 multiplier /= interrupt_rate;
723 multiplier = (multiplier + round / 2) / round;
724 multiplier = min(multiplier, (u32) 1023);
725 }
726 }
727 return multiplier;
728}
729
730struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
731{
732 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
733}
734
735int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
736 struct be_queue_info *eq, int eq_delay)
737{
738 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
739 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
740 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
741 struct be_dma_mem *q_mem = &eq->dma_mem;
742 int status;
743
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530744 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530745 memset(wrb, 0, sizeof(*wrb));
746
747 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
748
749 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
750 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
751
752 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
753
754 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
755 PCI_FUNC(ctrl->pdev->devfn));
756 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
757 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
758 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
759 __ilog2_u32(eq->len / 256));
760 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
761 eq_delay_to_mult(eq_delay));
762 be_dws_cpu_to_le(req->context, sizeof(req->context));
763
764 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
765
766 status = be_mbox_notify(ctrl);
767 if (!status) {
768 eq->id = le16_to_cpu(resp->eq_id);
769 eq->created = true;
770 }
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530771 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530772 return status;
773}
774
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530775int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
776 struct be_queue_info *cq, struct be_queue_info *eq,
777 bool sol_evts, bool no_delay, int coalesce_wm)
778{
779 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
780 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
781 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
John Soni Jose99bc5d52012-08-20 23:00:18 +0530782 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530783 struct be_dma_mem *q_mem = &cq->dma_mem;
784 void *ctxt = &req->context;
785 int status;
786
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530787 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530788 memset(wrb, 0, sizeof(*wrb));
789
790 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
791
792 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
793 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530794
795 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Jayamohan Kallickal2c9dfd32013-04-05 20:38:26 -0700796 if (is_chip_be2_be3r(phba)) {
John Soni Joseeaae5262012-10-20 04:43:44 +0530797 AMAP_SET_BITS(struct amap_cq_context, coalescwm,
798 ctxt, coalesce_wm);
799 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
800 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
801 __ilog2_u32(cq->len / 256));
802 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
803 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
804 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
805 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
806 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
807 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
808 PCI_FUNC(ctrl->pdev->devfn));
Jayamohan Kallickal2c9dfd32013-04-05 20:38:26 -0700809 } else {
810 req->hdr.version = MBX_CMD_VER2;
811 req->page_size = 1;
812 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
813 ctxt, coalesce_wm);
814 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
815 ctxt, no_delay);
816 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
817 __ilog2_u32(cq->len / 256));
818 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
819 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
820 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
821 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
John Soni Joseeaae5262012-10-20 04:43:44 +0530822 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530823
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530824 be_dws_cpu_to_le(ctxt, sizeof(req->context));
825
826 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
827
828 status = be_mbox_notify(ctrl);
829 if (!status) {
830 cq->id = le16_to_cpu(resp->cq_id);
831 cq->created = true;
832 } else
John Soni Jose99bc5d52012-08-20 23:00:18 +0530833 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
834 "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
835 status);
836
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530837 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530838
839 return status;
840}
841
842static u32 be_encoded_q_len(int q_len)
843{
844 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
845 if (len_encoded == 16)
846 len_encoded = 0;
847 return len_encoded;
848}
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530849
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530850int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530851 struct be_queue_info *mccq,
852 struct be_queue_info *cq)
853{
854 struct be_mcc_wrb *wrb;
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530855 struct be_cmd_req_mcc_create_ext *req;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530856 struct be_dma_mem *q_mem = &mccq->dma_mem;
857 struct be_ctrl_info *ctrl;
858 void *ctxt;
859 int status;
860
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530861 mutex_lock(&phba->ctrl.mbox_lock);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530862 ctrl = &phba->ctrl;
863 wrb = wrb_from_mbox(&ctrl->mbox_mem);
Jayamohan Kallickal37609762011-10-07 19:31:11 -0500864 memset(wrb, 0, sizeof(*wrb));
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530865 req = embedded_payload(wrb);
866 ctxt = &req->context;
867
868 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
869
870 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530871 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530872
873 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
Jitendra Bhivare53aefe22016-01-20 14:10:53 +0530874 req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
875 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
876 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530877
878 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
879 PCI_FUNC(phba->pcidev->devfn));
880 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
881 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
882 be_encoded_q_len(mccq->len));
883 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
884
885 be_dws_cpu_to_le(ctxt, sizeof(req->context));
886
887 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
888
Jitendra Bhivarea264f5e2016-02-04 15:49:13 +0530889 status = be_mbox_notify(ctrl);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530890 if (!status) {
891 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
892 mccq->id = le16_to_cpu(resp->id);
893 mccq->created = true;
894 }
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530895 mutex_unlock(&phba->ctrl.mbox_lock);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530896
897 return status;
898}
899
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530900int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
901 int queue_type)
902{
903 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
904 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
John Soni Jose99bc5d52012-08-20 23:00:18 +0530905 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530906 u8 subsys = 0, opcode = 0;
907 int status;
908
John Soni Jose99bc5d52012-08-20 23:00:18 +0530909 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
910 "BC_%d : In beiscsi_cmd_q_destroy "
911 "queue_type : %d\n", queue_type);
912
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530913 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530914 memset(wrb, 0, sizeof(*wrb));
915 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
916
917 switch (queue_type) {
918 case QTYPE_EQ:
919 subsys = CMD_SUBSYSTEM_COMMON;
920 opcode = OPCODE_COMMON_EQ_DESTROY;
921 break;
922 case QTYPE_CQ:
923 subsys = CMD_SUBSYSTEM_COMMON;
924 opcode = OPCODE_COMMON_CQ_DESTROY;
925 break;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530926 case QTYPE_MCCQ:
927 subsys = CMD_SUBSYSTEM_COMMON;
928 opcode = OPCODE_COMMON_MCC_DESTROY;
929 break;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530930 case QTYPE_WRBQ:
931 subsys = CMD_SUBSYSTEM_ISCSI;
932 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
933 break;
934 case QTYPE_DPDUQ:
935 subsys = CMD_SUBSYSTEM_ISCSI;
936 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
937 break;
938 case QTYPE_SGL:
939 subsys = CMD_SUBSYSTEM_ISCSI;
940 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
941 break;
942 default:
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530943 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530944 BUG();
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530945 }
946 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
947 if (queue_type != QTYPE_SGL)
948 req->id = cpu_to_le16(q->id);
949
950 status = be_mbox_notify(ctrl);
951
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530952 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530953 return status;
954}
955
Jayamohan Kallickal8a86e832013-09-28 15:35:45 -0700956/**
957 * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
958 * @ctrl: ptr to ctrl_info
959 * @cq: Completion Queue
960 * @dq: Default Queue
Matteo Croce92684bf2019-01-04 22:38:54 +0100961 * @length: ring size
Jayamohan Kallickal8a86e832013-09-28 15:35:45 -0700962 * @entry_size: size of each entry in DEFQ
963 * @is_header: Header or Data DEFQ
964 * @ulp_num: Bind to which ULP
965 *
966 * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
967 * on this queue by the FW
968 *
969 * return
970 * Success: 0
971 * Failure: Non-Zero Value
972 *
973 **/
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530974int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
975 struct be_queue_info *cq,
976 struct be_queue_info *dq, int length,
Jayamohan Kallickal8a86e832013-09-28 15:35:45 -0700977 int entry_size, uint8_t is_header,
978 uint8_t ulp_num)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530979{
980 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
981 struct be_defq_create_req *req = embedded_payload(wrb);
982 struct be_dma_mem *q_mem = &dq->dma_mem;
Jayamohan Kallickalef9e1b92013-04-05 20:38:27 -0700983 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530984 void *ctxt = &req->context;
985 int status;
986
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +0530987 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530988 memset(wrb, 0, sizeof(*wrb));
989
990 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
991
992 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
993 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
994
995 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
Jayamohan Kallickal8a86e832013-09-28 15:35:45 -0700996 if (phba->fw_config.dual_ulp_aware) {
997 req->ulp_num = ulp_num;
998 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
999 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1000 }
Jayamohan Kallickalef9e1b92013-04-05 20:38:27 -07001001
1002 if (is_chip_be2_be3r(phba)) {
1003 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1004 rx_pdid, ctxt, 0);
1005 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1006 rx_pdid_valid, ctxt, 1);
1007 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1008 pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
1009 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1010 ring_size, ctxt,
1011 be_encoded_q_len(length /
1012 sizeof(struct phys_addr)));
1013 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1014 default_buffer_size, ctxt, entry_size);
1015 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1016 cq_id_recv, ctxt, cq->id);
1017 } else {
1018 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1019 rx_pdid, ctxt, 0);
1020 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1021 rx_pdid_valid, ctxt, 1);
1022 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1023 ring_size, ctxt,
1024 be_encoded_q_len(length /
1025 sizeof(struct phys_addr)));
1026 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1027 default_buffer_size, ctxt, entry_size);
1028 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1029 cq_id_recv, ctxt, cq->id);
1030 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301031
1032 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1033
1034 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1035
1036 status = be_mbox_notify(ctrl);
1037 if (!status) {
Jayamohan Kallickal8a86e832013-09-28 15:35:45 -07001038 struct be_ring *defq_ring;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301039 struct be_defq_create_resp *resp = embedded_payload(wrb);
1040
1041 dq->id = le16_to_cpu(resp->id);
1042 dq->created = true;
Jayamohan Kallickal8a86e832013-09-28 15:35:45 -07001043 if (is_header)
1044 defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
1045 else
1046 defq_ring = &phba->phwi_ctrlr->
1047 default_pdu_data[ulp_num];
1048
1049 defq_ring->id = dq->id;
1050
1051 if (!phba->fw_config.dual_ulp_aware) {
1052 defq_ring->ulp_num = BEISCSI_ULP0;
1053 defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
1054 } else {
1055 defq_ring->ulp_num = resp->ulp_num;
1056 defq_ring->doorbell_offset = resp->doorbell_offset;
1057 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301058 }
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301059 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301060
1061 return status;
1062}
1063
Jayamohan Kallickal4eea99d2013-09-28 15:35:48 -07001064/**
1065 * be_cmd_wrbq_create()- Create WRBQ
1066 * @ctrl: ptr to ctrl_info
1067 * @q_mem: memory details for the queue
1068 * @wrbq: queue info
1069 * @pwrb_context: ptr to wrb_context
1070 * @ulp_num: ULP on which the WRBQ is to be created
1071 *
1072 * Create WRBQ on the passed ULP_NUM.
1073 *
1074 **/
1075int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
1076 struct be_dma_mem *q_mem,
1077 struct be_queue_info *wrbq,
1078 struct hwi_wrb_context *pwrb_context,
1079 uint8_t ulp_num)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301080{
1081 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1082 struct be_wrbq_create_req *req = embedded_payload(wrb);
1083 struct be_wrbq_create_resp *resp = embedded_payload(wrb);
Jayamohan Kallickal4eea99d2013-09-28 15:35:48 -07001084 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301085 int status;
1086
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301087 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301088 memset(wrb, 0, sizeof(*wrb));
1089
1090 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1091
1092 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1093 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
1094 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
Jayamohan Kallickal4eea99d2013-09-28 15:35:48 -07001095
1096 if (phba->fw_config.dual_ulp_aware) {
1097 req->ulp_num = ulp_num;
1098 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1099 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1100 }
1101
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301102 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1103
1104 status = be_mbox_notify(ctrl);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301105 if (!status) {
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301106 wrbq->id = le16_to_cpu(resp->cid);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301107 wrbq->created = true;
Jayamohan Kallickal4eea99d2013-09-28 15:35:48 -07001108
1109 pwrb_context->cid = wrbq->id;
1110 if (!phba->fw_config.dual_ulp_aware) {
1111 pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
1112 pwrb_context->ulp_num = BEISCSI_ULP0;
1113 } else {
1114 pwrb_context->ulp_num = resp->ulp_num;
1115 pwrb_context->doorbell_offset = resp->doorbell_offset;
1116 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301117 }
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301118 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301119 return status;
1120}
1121
Jayamohan Kallickal15a90fe2013-09-28 15:35:38 -07001122int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
1123 struct be_dma_mem *q_mem)
1124{
1125 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1126 struct be_post_template_pages_req *req = embedded_payload(wrb);
1127 int status;
1128
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301129 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal15a90fe2013-09-28 15:35:38 -07001130
1131 memset(wrb, 0, sizeof(*wrb));
1132 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1133 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1134 OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
1135 sizeof(*req));
1136
1137 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1138 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1139 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1140
1141 status = be_mbox_notify(ctrl);
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301142 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal15a90fe2013-09-28 15:35:38 -07001143 return status;
1144}
1145
1146int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
1147{
1148 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1149 struct be_remove_template_pages_req *req = embedded_payload(wrb);
1150 int status;
1151
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301152 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal15a90fe2013-09-28 15:35:38 -07001153
1154 memset(wrb, 0, sizeof(*wrb));
1155 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1156 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1157 OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
1158 sizeof(*req));
1159
1160 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1161
1162 status = be_mbox_notify(ctrl);
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301163 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal15a90fe2013-09-28 15:35:38 -07001164 return status;
1165}
1166
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301167int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
1168 struct be_dma_mem *q_mem,
1169 u32 page_offset, u32 num_pages)
1170{
1171 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1172 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
John Soni Jose99bc5d52012-08-20 23:00:18 +05301173 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301174 int status;
1175 unsigned int curr_pages;
1176 u32 internal_page_offset = 0;
1177 u32 temp_num_pages = num_pages;
1178
1179 if (num_pages == 0xff)
1180 num_pages = 1;
1181
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301182 mutex_lock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301183 do {
1184 memset(wrb, 0, sizeof(*wrb));
1185 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1186 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1187 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
1188 sizeof(*req));
1189 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
1190 pages);
1191 req->num_pages = min(num_pages, curr_pages);
1192 req->page_offset = page_offset;
1193 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
1194 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
1195 internal_page_offset += req->num_pages;
1196 page_offset += req->num_pages;
1197 num_pages -= req->num_pages;
1198
1199 if (temp_num_pages == 0xff)
1200 req->num_pages = temp_num_pages;
1201
1202 status = be_mbox_notify(ctrl);
1203 if (status) {
John Soni Jose99bc5d52012-08-20 23:00:18 +05301204 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1205 "BC_%d : FW CMD to map iscsi frags failed.\n");
1206
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301207 goto error;
1208 }
1209 } while (num_pages > 0);
1210error:
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301211 mutex_unlock(&ctrl->mbox_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301212 if (status != 0)
1213 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
1214 return status;
1215}
Jayamohan Kallickale5285862011-10-07 19:31:08 -05001216
John Soni Jose6f722382012-08-20 23:00:43 +05301217/**
1218 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1219 * @phba: device priv structure instance
1220 * @vlan_tag: TAG to be set
1221 *
1222 * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
1223 *
1224 * returns
1225 * TAG for the MBX Cmd
1226 * **/
1227int be_cmd_set_vlan(struct beiscsi_hba *phba,
1228 uint16_t vlan_tag)
1229{
Jitendra Bhivare090e2182016-02-04 15:49:17 +05301230 unsigned int tag;
John Soni Jose6f722382012-08-20 23:00:43 +05301231 struct be_mcc_wrb *wrb;
1232 struct be_cmd_set_vlan_req *req;
1233 struct be_ctrl_info *ctrl = &phba->ctrl;
1234
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301235 if (mutex_lock_interruptible(&ctrl->mbox_lock))
1236 return 0;
Jitendra Bhivare090e2182016-02-04 15:49:17 +05301237 wrb = alloc_mcc_wrb(phba, &tag);
1238 if (!wrb) {
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301239 mutex_unlock(&ctrl->mbox_lock);
Jitendra Bhivare090e2182016-02-04 15:49:17 +05301240 return 0;
John Soni Jose6f722382012-08-20 23:00:43 +05301241 }
1242
John Soni Jose6f722382012-08-20 23:00:43 +05301243 req = embedded_payload(wrb);
John Soni Jose6f722382012-08-20 23:00:43 +05301244 be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
1245 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1246 OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
1247 sizeof(*req));
1248
1249 req->interface_hndl = phba->interface_handle;
1250 req->vlan_priority = vlan_tag;
1251
Jitendra Bhivarecdde6682016-01-20 14:10:47 +05301252 be_mcc_notify(phba, tag);
Jitendra Bhivarec03a50f2016-01-20 14:10:46 +05301253 mutex_unlock(&ctrl->mbox_lock);
John Soni Jose6f722382012-08-20 23:00:43 +05301254
1255 return tag;
1256}
Jitendra Bhivare66940952016-08-19 15:20:14 +05301257
Jitendra Bhivare480195c2016-08-19 15:20:15 +05301258int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
1259 struct beiscsi_hba *phba)
1260{
1261 struct be_dma_mem nonemb_cmd;
1262 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1263 struct be_mgmt_controller_attributes *req;
1264 struct be_sge *sge = nonembedded_sgl(wrb);
1265 int status = 0;
1266
Christoph Hellwig26a4c992018-10-10 18:22:24 +02001267 nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev,
Jitendra Bhivare480195c2016-08-19 15:20:15 +05301268 sizeof(struct be_mgmt_controller_attributes),
Christoph Hellwig26a4c992018-10-10 18:22:24 +02001269 &nonemb_cmd.dma, GFP_KERNEL);
Jitendra Bhivare480195c2016-08-19 15:20:15 +05301270 if (nonemb_cmd.va == NULL) {
1271 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
Christoph Hellwig26a4c992018-10-10 18:22:24 +02001272 "BG_%d : dma_alloc_coherent failed in %s\n",
Jitendra Bhivare480195c2016-08-19 15:20:15 +05301273 __func__);
1274 return -ENOMEM;
1275 }
1276 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
1277 req = nonemb_cmd.va;
1278 memset(req, 0, sizeof(*req));
1279 mutex_lock(&ctrl->mbox_lock);
1280 memset(wrb, 0, sizeof(*wrb));
1281 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
1282 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1283 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
1284 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
1285 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
1286 sge->len = cpu_to_le32(nonemb_cmd.size);
1287 status = be_mbox_notify(ctrl);
1288 if (!status) {
1289 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
1290
1291 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1292 "BG_%d : Firmware Version of CMD : %s\n"
1293 "Firmware Version is : %s\n"
1294 "Developer Build, not performing version check...\n",
1295 resp->params.hba_attribs
1296 .flashrom_version_string,
1297 resp->params.hba_attribs.
1298 firmware_version_string);
1299
1300 phba->fw_config.iscsi_features =
1301 resp->params.hba_attribs.iscsi_features;
1302 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1303 "BM_%d : phba->fw_config.iscsi_features = %d\n",
1304 phba->fw_config.iscsi_features);
1305 memcpy(phba->fw_ver_str, resp->params.hba_attribs.
1306 firmware_version_string, BEISCSI_VER_STRLEN);
1307 } else
1308 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1309 "BG_%d : Failed in beiscsi_check_supported_fw\n");
1310 mutex_unlock(&ctrl->mbox_lock);
1311 if (nonemb_cmd.va)
Christoph Hellwig26a4c992018-10-10 18:22:24 +02001312 dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size,
Jitendra Bhivare480195c2016-08-19 15:20:15 +05301313 nonemb_cmd.va, nonemb_cmd.dma);
1314
1315 return status;
1316}
1317
1318/**
1319 * beiscsi_get_fw_config()- Get the FW config for the function
1320 * @ctrl: ptr to Ctrl Info
1321 * @phba: ptr to the dev priv structure
1322 *
1323 * Get the FW config and resources available for the function.
1324 * The resources are created based on the count received here.
1325 *
1326 * return
1327 * Success: 0
1328 * Failure: Non-Zero Value
1329 **/
1330int beiscsi_get_fw_config(struct be_ctrl_info *ctrl,
1331 struct beiscsi_hba *phba)
1332{
1333 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1334 struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
1335 uint32_t cid_count, icd_count;
1336 int status = -EINVAL;
1337 uint8_t ulp_num = 0;
1338
1339 mutex_lock(&ctrl->mbox_lock);
1340 memset(wrb, 0, sizeof(*wrb));
1341 be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
1342
1343 be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
1344 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
1345 EMBED_MBX_MAX_PAYLOAD_SIZE);
1346
1347 if (be_mbox_notify(ctrl)) {
1348 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1349 "BG_%d : Failed in beiscsi_get_fw_config\n");
1350 goto fail_init;
1351 }
1352
1353 /* FW response formats depend on port id */
1354 phba->fw_config.phys_port = pfw_cfg->phys_port;
1355 if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
1356 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1357 "BG_%d : invalid physical port id %d\n",
1358 phba->fw_config.phys_port);
1359 goto fail_init;
1360 }
1361
1362 /* populate and check FW config against min and max values */
1363 if (!is_chip_be2_be3r(phba)) {
1364 phba->fw_config.eqid_count = pfw_cfg->eqid_count;
1365 phba->fw_config.cqid_count = pfw_cfg->cqid_count;
1366 if (phba->fw_config.eqid_count == 0 ||
1367 phba->fw_config.eqid_count > 2048) {
1368 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1369 "BG_%d : invalid EQ count %d\n",
1370 phba->fw_config.eqid_count);
1371 goto fail_init;
1372 }
1373 if (phba->fw_config.cqid_count == 0 ||
1374 phba->fw_config.cqid_count > 4096) {
1375 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1376 "BG_%d : invalid CQ count %d\n",
1377 phba->fw_config.cqid_count);
1378 goto fail_init;
1379 }
1380 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1381 "BG_%d : EQ_Count : %d CQ_Count : %d\n",
1382 phba->fw_config.eqid_count,
1383 phba->fw_config.cqid_count);
1384 }
1385
1386 /**
1387 * Check on which all ULP iSCSI Protocol is loaded.
1388 * Set the Bit for those ULP. This set flag is used
1389 * at all places in the code to check on which ULP
1390 * iSCSi Protocol is loaded
1391 **/
1392 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
1393 if (pfw_cfg->ulp[ulp_num].ulp_mode &
1394 BEISCSI_ULP_ISCSI_INI_MODE) {
1395 set_bit(ulp_num, &phba->fw_config.ulp_supported);
1396
1397 /* Get the CID, ICD and Chain count for each ULP */
1398 phba->fw_config.iscsi_cid_start[ulp_num] =
1399 pfw_cfg->ulp[ulp_num].sq_base;
1400 phba->fw_config.iscsi_cid_count[ulp_num] =
1401 pfw_cfg->ulp[ulp_num].sq_count;
1402
1403 phba->fw_config.iscsi_icd_start[ulp_num] =
1404 pfw_cfg->ulp[ulp_num].icd_base;
1405 phba->fw_config.iscsi_icd_count[ulp_num] =
1406 pfw_cfg->ulp[ulp_num].icd_count;
1407
1408 phba->fw_config.iscsi_chain_start[ulp_num] =
1409 pfw_cfg->chain_icd[ulp_num].chain_base;
1410 phba->fw_config.iscsi_chain_count[ulp_num] =
1411 pfw_cfg->chain_icd[ulp_num].chain_count;
1412
1413 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1414 "BG_%d : Function loaded on ULP : %d\n"
1415 "\tiscsi_cid_count : %d\n"
1416 "\tiscsi_cid_start : %d\n"
1417 "\t iscsi_icd_count : %d\n"
1418 "\t iscsi_icd_start : %d\n",
1419 ulp_num,
1420 phba->fw_config.
1421 iscsi_cid_count[ulp_num],
1422 phba->fw_config.
1423 iscsi_cid_start[ulp_num],
1424 phba->fw_config.
1425 iscsi_icd_count[ulp_num],
1426 phba->fw_config.
1427 iscsi_icd_start[ulp_num]);
1428 }
1429 }
1430
1431 if (phba->fw_config.ulp_supported == 0) {
1432 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1433 "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
1434 pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
1435 pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
1436 goto fail_init;
1437 }
1438
1439 /**
1440 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
1441 **/
1442 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
1443 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
1444 break;
1445 icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
1446 if (icd_count == 0 || icd_count > 65536) {
1447 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1448 "BG_%d: invalid ICD count %d\n", icd_count);
1449 goto fail_init;
1450 }
1451
1452 cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
1453 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
1454 if (cid_count == 0 || cid_count > 4096) {
1455 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1456 "BG_%d: invalid CID count %d\n", cid_count);
1457 goto fail_init;
1458 }
1459
1460 /**
1461 * Check FW is dual ULP aware i.e. can handle either
1462 * of the protocols.
1463 */
1464 phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
1465 BEISCSI_FUNC_DUA_MODE);
1466
1467 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1468 "BG_%d : DUA Mode : 0x%x\n",
1469 phba->fw_config.dual_ulp_aware);
1470
1471 /* all set, continue using this FW config */
1472 status = 0;
1473fail_init:
1474 mutex_unlock(&ctrl->mbox_lock);
1475 return status;
1476}
1477
1478/**
1479 * beiscsi_get_port_name()- Get port name for the function
1480 * @ctrl: ptr to Ctrl Info
1481 * @phba: ptr to the dev priv structure
1482 *
1483 * Get the alphanumeric character for port
1484 *
1485 **/
1486int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
1487{
1488 int ret = 0;
1489 struct be_mcc_wrb *wrb;
1490 struct be_cmd_get_port_name *ioctl;
1491
1492 mutex_lock(&ctrl->mbox_lock);
1493 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1494 memset(wrb, 0, sizeof(*wrb));
1495 ioctl = embedded_payload(wrb);
1496
1497 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1498 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1499 OPCODE_COMMON_GET_PORT_NAME,
1500 EMBED_MBX_MAX_PAYLOAD_SIZE);
1501 ret = be_mbox_notify(ctrl);
1502 phba->port_name = 0;
1503 if (!ret) {
1504 phba->port_name = ioctl->p.resp.port_names >>
1505 (phba->fw_config.phys_port * 8) & 0xff;
1506 } else {
1507 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1508 "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
1509 ret, ioctl->h.resp_hdr.status);
1510 }
1511
1512 if (phba->port_name == 0)
1513 phba->port_name = '?';
1514
1515 mutex_unlock(&ctrl->mbox_lock);
1516 return ret;
1517}
1518
Jitendra Bhivare1cb3c3f2017-10-10 16:18:17 +05301519int beiscsi_set_host_data(struct beiscsi_hba *phba)
1520{
1521 struct be_ctrl_info *ctrl = &phba->ctrl;
1522 struct be_cmd_set_host_data *ioctl;
1523 struct be_mcc_wrb *wrb;
1524 int ret = 0;
1525
1526 if (is_chip_be2_be3r(phba))
1527 return ret;
1528
1529 mutex_lock(&ctrl->mbox_lock);
1530 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1531 memset(wrb, 0, sizeof(*wrb));
1532 ioctl = embedded_payload(wrb);
1533
1534 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1535 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1536 OPCODE_COMMON_SET_HOST_DATA,
1537 EMBED_MBX_MAX_PAYLOAD_SIZE);
1538 ioctl->param.req.param_id = BE_CMD_SET_HOST_PARAM_ID;
1539 ioctl->param.req.param_len =
1540 snprintf((char *)ioctl->param.req.param_data,
1541 sizeof(ioctl->param.req.param_data),
1542 "Linux iSCSI v%s", BUILD_STR);
Jitendra Bhivarea81dde72018-06-07 13:54:41 +05301543 ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4);
Jitendra Bhivare1cb3c3f2017-10-10 16:18:17 +05301544 if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
1545 ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
1546 ret = be_mbox_notify(ctrl);
1547 if (!ret) {
1548 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1549 "BG_%d : HBA set host driver version\n");
1550 } else {
1551 /**
1552 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1553 * Older FW versions return this error.
1554 */
1555 if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1556 ret == MCC_STATUS_INVALID_LENGTH)
1557 __beiscsi_log(phba, KERN_INFO,
1558 "BG_%d : HBA failed to set host driver version\n");
1559 }
1560
1561 mutex_unlock(&ctrl->mbox_lock);
1562 return ret;
1563}
1564
Jitendra Bhivare66940952016-08-19 15:20:14 +05301565int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
1566{
1567 struct be_ctrl_info *ctrl = &phba->ctrl;
1568 struct be_cmd_set_features *ioctl;
1569 struct be_mcc_wrb *wrb;
1570 int ret = 0;
1571
1572 mutex_lock(&ctrl->mbox_lock);
1573 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1574 memset(wrb, 0, sizeof(*wrb));
1575 ioctl = embedded_payload(wrb);
1576
1577 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1578 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1579 OPCODE_COMMON_SET_FEATURES,
1580 EMBED_MBX_MAX_PAYLOAD_SIZE);
1581 ioctl->feature = BE_CMD_SET_FEATURE_UER;
1582 ioctl->param_len = sizeof(ioctl->param.req);
1583 ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
1584 ret = be_mbox_notify(ctrl);
1585 if (!ret) {
1586 phba->ue2rp = ioctl->param.resp.ue2rp;
1587 set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
1588 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1589 "BG_%d : HBA error recovery supported\n");
1590 } else {
1591 /**
1592 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1593 * Older FW versions return this error.
1594 */
1595 if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1596 ret == MCC_STATUS_INVALID_LENGTH)
1597 __beiscsi_log(phba, KERN_INFO,
1598 "BG_%d : HBA error recovery not supported\n");
1599 }
1600
1601 mutex_unlock(&ctrl->mbox_lock);
1602 return ret;
1603}
Jitendra Bhivare4d2ee1e2016-08-19 15:20:16 +05301604
1605static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba)
1606{
1607 u32 sem;
1608
1609 if (is_chip_be2_be3r(phba))
1610 sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx);
1611 else
1612 pci_read_config_dword(phba->pcidev,
1613 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
1614 return sem;
1615}
1616
1617int beiscsi_check_fw_rdy(struct beiscsi_hba *phba)
1618{
1619 u32 loop, post, rdy = 0;
1620
1621 loop = 1000;
1622 while (loop--) {
1623 post = beiscsi_get_post_stage(phba);
1624 if (post & POST_ERROR_BIT)
1625 break;
1626 if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) {
1627 rdy = 1;
1628 break;
1629 }
1630 msleep(60);
1631 }
1632
1633 if (!rdy) {
1634 __beiscsi_log(phba, KERN_ERR,
1635 "BC_%d : FW not ready 0x%x\n", post);
1636 }
1637
1638 return rdy;
1639}
1640
Jitendra Bhivare4ee1ec42016-08-19 15:20:20 +05301641int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
Jitendra Bhivare4d2ee1e2016-08-19 15:20:16 +05301642{
1643 struct be_ctrl_info *ctrl = &phba->ctrl;
1644 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
Jitendra Bhivarefa1261c2016-12-13 15:56:01 +05301645 struct be_post_sgl_pages_req *req;
Jitendra Bhivare4d2ee1e2016-08-19 15:20:16 +05301646 int status;
1647
1648 mutex_lock(&ctrl->mbox_lock);
1649
1650 req = embedded_payload(wrb);
1651 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1652 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1653 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1654 status = be_mbox_notify(ctrl);
1655
1656 mutex_unlock(&ctrl->mbox_lock);
1657 return status;
1658}
1659
1660int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load)
1661{
1662 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1663 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1664 u8 *endian_check;
1665 int status;
1666
1667 mutex_lock(&ctrl->mbox_lock);
1668 memset(wrb, 0, sizeof(*wrb));
1669
1670 endian_check = (u8 *) wrb;
1671 if (load) {
1672 /* to start communicating */
1673 *endian_check++ = 0xFF;
1674 *endian_check++ = 0x12;
1675 *endian_check++ = 0x34;
1676 *endian_check++ = 0xFF;
1677 *endian_check++ = 0xFF;
1678 *endian_check++ = 0x56;
1679 *endian_check++ = 0x78;
1680 *endian_check++ = 0xFF;
1681 } else {
1682 /* to stop communicating */
1683 *endian_check++ = 0xFF;
1684 *endian_check++ = 0xAA;
1685 *endian_check++ = 0xBB;
1686 *endian_check++ = 0xFF;
1687 *endian_check++ = 0xFF;
1688 *endian_check++ = 0xCC;
1689 *endian_check++ = 0xDD;
1690 *endian_check = 0xFF;
1691 }
1692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
1693
1694 status = be_mbox_notify(ctrl);
1695 if (status)
1696 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1697 "BC_%d : special WRB message failed\n");
1698 mutex_unlock(&ctrl->mbox_lock);
1699 return status;
1700}
1701
1702int beiscsi_init_sliport(struct beiscsi_hba *phba)
1703{
1704 int status;
1705
1706 /* check POST stage before talking to FW */
1707 status = beiscsi_check_fw_rdy(phba);
1708 if (!status)
1709 return -EIO;
1710
Jitendra Bhivared1d5ca82016-08-19 15:20:18 +05301711 /* clear all error states after checking FW rdy */
1712 phba->state &= ~BEISCSI_HBA_IN_ERR;
1713
1714 /* check again UER support */
1715 phba->state &= ~BEISCSI_HBA_UER_SUPP;
1716
Jitendra Bhivare4d2ee1e2016-08-19 15:20:16 +05301717 /*
1718 * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
1719 * It should clean up any stale info in FW for this fn.
1720 */
1721 status = beiscsi_cmd_function_reset(phba);
1722 if (status) {
1723 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1724 "BC_%d : SLI Function Reset failed\n");
1725 return status;
1726 }
1727
1728 /* indicate driver is loading */
1729 return beiscsi_cmd_special_wrb(&phba->ctrl, 1);
1730}
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301731
1732/**
1733 * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
1734 * @phba: pointer to dev priv structure
1735 * @ulp: ULP number.
1736 *
1737 * return
1738 * Success: 0
1739 * Failure: Non-Zero Value
1740 **/
1741int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
1742{
1743 struct be_ctrl_info *ctrl = &phba->ctrl;
1744 struct iscsi_cleanup_req_v1 *req_v1;
1745 struct iscsi_cleanup_req *req;
Jitendra Bhivared7401052016-12-13 15:55:59 +05301746 u16 hdr_ring_id, data_ring_id;
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301747 struct be_mcc_wrb *wrb;
1748 int status;
1749
1750 mutex_lock(&ctrl->mbox_lock);
1751 wrb = wrb_from_mbox(&ctrl->mbox_mem);
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301752
Jitendra Bhivared7401052016-12-13 15:55:59 +05301753 hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
1754 data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301755 if (is_chip_be2_be3r(phba)) {
Jitendra Bhivared7401052016-12-13 15:55:59 +05301756 req = embedded_payload(wrb);
1757 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1758 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1759 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301760 req->chute = (1 << ulp);
Jitendra Bhivared7401052016-12-13 15:55:59 +05301761 /* BE2/BE3 FW creates 8-bit ring id */
1762 req->hdr_ring_id = hdr_ring_id;
1763 req->data_ring_id = data_ring_id;
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301764 } else {
Jitendra Bhivared7401052016-12-13 15:55:59 +05301765 req_v1 = embedded_payload(wrb);
1766 be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0);
1767 be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI,
1768 OPCODE_COMMON_ISCSI_CLEANUP,
1769 sizeof(*req_v1));
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301770 req_v1->hdr.version = 1;
Jitendra Bhivared7401052016-12-13 15:55:59 +05301771 req_v1->chute = (1 << ulp);
1772 req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id);
1773 req_v1->data_ring_id = cpu_to_le16(data_ring_id);
Jitendra Bhivaref79929d2016-08-19 15:20:17 +05301774 }
1775
1776 status = be_mbox_notify(ctrl);
1777 if (status)
1778 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
1779 "BG_%d : %s failed %d\n", __func__, ulp);
1780 mutex_unlock(&ctrl->mbox_lock);
1781 return status;
1782}
Jitendra Bhivared1d5ca82016-08-19 15:20:18 +05301783
1784/*
1785 * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
1786 * @phba: Driver priv structure
1787 *
1788 * Read registers linked to UE and check for the UE status
1789 **/
1790int beiscsi_detect_ue(struct beiscsi_hba *phba)
1791{
1792 uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
1793 uint32_t ue_hi = 0, ue_lo = 0;
1794 uint8_t i = 0;
1795 int ret = 0;
1796
1797 pci_read_config_dword(phba->pcidev,
1798 PCICFG_UE_STATUS_LOW, &ue_lo);
1799 pci_read_config_dword(phba->pcidev,
1800 PCICFG_UE_STATUS_MASK_LOW,
1801 &ue_mask_lo);
1802 pci_read_config_dword(phba->pcidev,
1803 PCICFG_UE_STATUS_HIGH,
1804 &ue_hi);
1805 pci_read_config_dword(phba->pcidev,
1806 PCICFG_UE_STATUS_MASK_HI,
1807 &ue_mask_hi);
1808
1809 ue_lo = (ue_lo & ~ue_mask_lo);
1810 ue_hi = (ue_hi & ~ue_mask_hi);
1811
1812
1813 if (ue_lo || ue_hi) {
1814 set_bit(BEISCSI_HBA_IN_UE, &phba->state);
1815 __beiscsi_log(phba, KERN_ERR,
1816 "BC_%d : HBA error detected\n");
1817 ret = 1;
1818 }
1819
1820 if (ue_lo) {
1821 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
1822 if (ue_lo & 1)
1823 __beiscsi_log(phba, KERN_ERR,
1824 "BC_%d : UE_LOW %s bit set\n",
1825 desc_ue_status_low[i]);
1826 }
1827 }
1828
1829 if (ue_hi) {
1830 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
1831 if (ue_hi & 1)
1832 __beiscsi_log(phba, KERN_ERR,
1833 "BC_%d : UE_HIGH %s bit set\n",
1834 desc_ue_status_hi[i]);
1835 }
1836 }
1837 return ret;
1838}
1839
1840/*
1841 * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
1842 * @phba: Driver priv structure
1843 *
1844 * Read SLIPORT SEMAPHORE register to check for UER
1845 *
1846 **/
1847int beiscsi_detect_tpe(struct beiscsi_hba *phba)
1848{
1849 u32 post, status;
1850 int ret = 0;
1851
1852 post = beiscsi_get_post_stage(phba);
1853 status = post & POST_STAGE_MASK;
1854 if ((status & POST_ERR_RECOVERY_CODE_MASK) ==
1855 POST_STAGE_RECOVERABLE_ERR) {
1856 set_bit(BEISCSI_HBA_IN_TPE, &phba->state);
1857 __beiscsi_log(phba, KERN_INFO,
1858 "BC_%d : HBA error recoverable: 0x%x\n", post);
1859 ret = 1;
1860 } else {
1861 __beiscsi_log(phba, KERN_INFO,
1862 "BC_%d : HBA in UE: 0x%x\n", post);
1863 }
1864
1865 return ret;
1866}