blob: 81ee64c0a4b730a6b49a99284b819f09a1a9014c [file] [log] [blame]
Dan Williams6f231dd2011-07-02 22:56:22 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
Dan Williamscc9203b2011-05-08 17:34:44 -070055#include <linux/device.h>
56#include <scsi/sas.h>
57#include "host.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070058#include "isci.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070059#include "port.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070060#include "host.h"
Dan Williamsd044af12011-03-08 09:52:49 -080061#include "probe_roms.h"
Dan Williamscc9203b2011-05-08 17:34:44 -070062#include "remote_device.h"
63#include "request.h"
Dan Williamscc9203b2011-05-08 17:34:44 -070064#include "scu_completion_codes.h"
65#include "scu_event_codes.h"
Dan Williams63a3a152011-05-08 21:36:46 -070066#include "registers.h"
Dan Williamscc9203b2011-05-08 17:34:44 -070067#include "scu_remote_node_context.h"
68#include "scu_task_context.h"
69#include "scu_unsolicited_frame.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070070
Dan Williamscc9203b2011-05-08 17:34:44 -070071#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
72
73/**
74 * smu_dcc_get_max_ports() -
75 *
76 * This macro returns the maximum number of logical ports supported by the
77 * hardware. The caller passes in the value read from the device context
78 * capacity register and this macro will mash and shift the value appropriately.
79 */
80#define smu_dcc_get_max_ports(dcc_value) \
81 (\
82 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
83 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
84 )
85
86/**
87 * smu_dcc_get_max_task_context() -
88 *
89 * This macro returns the maximum number of task contexts supported by the
90 * hardware. The caller passes in the value read from the device context
91 * capacity register and this macro will mash and shift the value appropriately.
92 */
93#define smu_dcc_get_max_task_context(dcc_value) \
94 (\
95 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
96 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
97 )
98
99/**
100 * smu_dcc_get_max_remote_node_context() -
101 *
102 * This macro returns the maximum number of remote node contexts supported by
103 * the hardware. The caller passes in the value read from the device context
104 * capacity register and this macro will mash and shift the value appropriately.
105 */
106#define smu_dcc_get_max_remote_node_context(dcc_value) \
107 (\
108 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
109 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
110 )
111
112
Dan Williamscc9203b2011-05-08 17:34:44 -0700113#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
114
115/**
116 *
117 *
118 * The number of milliseconds to wait while a given phy is consuming power
119 * before allowing another set of phys to consume power. Ultimately, this will
120 * be specified by OEM parameter.
121 */
122#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
123
124/**
125 * NORMALIZE_PUT_POINTER() -
126 *
127 * This macro will normalize the completion queue put pointer so its value can
128 * be used as an array inde
129 */
130#define NORMALIZE_PUT_POINTER(x) \
131 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
132
133
134/**
135 * NORMALIZE_EVENT_POINTER() -
136 *
137 * This macro will normalize the completion queue event entry so its value can
138 * be used as an index.
139 */
140#define NORMALIZE_EVENT_POINTER(x) \
141 (\
142 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
143 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
144 )
145
146/**
147 * INCREMENT_COMPLETION_QUEUE_GET() -
148 *
149 * This macro will increment the controllers completion queue index value and
150 * possibly toggle the cycle bit if the completion queue index wraps back to 0.
151 */
152#define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
153 INCREMENT_QUEUE_GET(\
154 (index), \
155 (cycle), \
156 (controller)->completion_queue_entries, \
157 SMU_CQGR_CYCLE_BIT \
158 )
159
160/**
161 * INCREMENT_EVENT_QUEUE_GET() -
162 *
163 * This macro will increment the controllers event queue index value and
164 * possibly toggle the event cycle bit if the event queue index wraps back to 0.
165 */
166#define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
167 INCREMENT_QUEUE_GET(\
168 (index), \
169 (cycle), \
170 (controller)->completion_event_entries, \
171 SMU_CQGR_EVENT_CYCLE_BIT \
172 )
173
174
175/**
176 * NORMALIZE_GET_POINTER() -
177 *
178 * This macro will normalize the completion queue get pointer so its value can
179 * be used as an index into an array
180 */
181#define NORMALIZE_GET_POINTER(x) \
182 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
183
184/**
185 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
186 *
187 * This macro will normalize the completion queue cycle pointer so it matches
188 * the completion queue cycle bit
189 */
190#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
191 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
192
193/**
194 * COMPLETION_QUEUE_CYCLE_BIT() -
195 *
196 * This macro will return the cycle bit of the completion queue entry
197 */
198#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
199
200static bool scic_sds_controller_completion_queue_has_entries(
201 struct scic_sds_controller *scic)
202{
203 u32 get_value = scic->completion_queue_get;
204 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
205
206 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
207 COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index]))
208 return true;
209
210 return false;
211}
212
213static bool scic_sds_controller_isr(struct scic_sds_controller *scic)
214{
215 if (scic_sds_controller_completion_queue_has_entries(scic)) {
216 return true;
217 } else {
218 /*
219 * we have a spurious interrupt it could be that we have already
220 * emptied the completion queue from a previous interrupt */
221 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
222
223 /*
224 * There is a race in the hardware that could cause us not to be notified
225 * of an interrupt completion if we do not take this step. We will mask
226 * then unmask the interrupts so if there is another interrupt pending
227 * the clearing of the interrupt source we get the next interrupt message. */
228 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
229 writel(0, &scic->smu_registers->interrupt_mask);
230 }
231
232 return false;
233}
234
Dan Williamsc7ef4032011-02-18 09:25:05 -0800235irqreturn_t isci_msix_isr(int vec, void *data)
Dan Williams6f231dd2011-07-02 22:56:22 -0700236{
Dan Williamsc7ef4032011-02-18 09:25:05 -0800237 struct isci_host *ihost = data;
Dan Williams6f231dd2011-07-02 22:56:22 -0700238
Artur Wojcikcc3dbd02011-05-04 07:58:16 +0000239 if (scic_sds_controller_isr(&ihost->sci))
Dan Williams0cf89d12011-02-18 09:25:07 -0800240 tasklet_schedule(&ihost->completion_tasklet);
Dan Williams6f231dd2011-07-02 22:56:22 -0700241
Dan Williamsc7ef4032011-02-18 09:25:05 -0800242 return IRQ_HANDLED;
Dan Williams6f231dd2011-07-02 22:56:22 -0700243}
244
Dan Williamscc9203b2011-05-08 17:34:44 -0700245static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic)
246{
247 u32 interrupt_status;
248
249 interrupt_status =
250 readl(&scic->smu_registers->interrupt_status);
251 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
252
253 if (interrupt_status != 0) {
254 /*
255 * There is an error interrupt pending so let it through and handle
256 * in the callback */
257 return true;
258 }
259
260 /*
261 * There is a race in the hardware that could cause us not to be notified
262 * of an interrupt completion if we do not take this step. We will mask
263 * then unmask the error interrupts so if there was another interrupt
264 * pending we will be notified.
265 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
266 writel(0xff, &scic->smu_registers->interrupt_mask);
267 writel(0, &scic->smu_registers->interrupt_mask);
268
269 return false;
270}
271
272static void scic_sds_controller_task_completion(struct scic_sds_controller *scic,
273 u32 completion_entry)
274{
275 u32 index;
276 struct scic_sds_request *io_request;
277
278 index = SCU_GET_COMPLETION_INDEX(completion_entry);
279 io_request = scic->io_request_table[index];
280
281 /* Make sure that we really want to process this IO request */
282 if (
283 (io_request != NULL)
284 && (io_request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG)
285 && (
286 scic_sds_io_tag_get_sequence(io_request->io_tag)
287 == scic->io_request_sequence[index]
288 )
289 ) {
290 /* Yep this is a valid io request pass it along to the io request handler */
291 scic_sds_io_request_tc_completion(io_request, completion_entry);
292 }
293}
294
295static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic,
296 u32 completion_entry)
297{
298 u32 index;
299 struct scic_sds_request *io_request;
300 struct scic_sds_remote_device *device;
301
302 index = SCU_GET_COMPLETION_INDEX(completion_entry);
303
304 switch (scu_get_command_request_type(completion_entry)) {
305 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
306 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
307 io_request = scic->io_request_table[index];
308 dev_warn(scic_to_dev(scic),
309 "%s: SCIC SDS Completion type SDMA %x for io request "
310 "%p\n",
311 __func__,
312 completion_entry,
313 io_request);
314 /* @todo For a post TC operation we need to fail the IO
315 * request
316 */
317 break;
318
319 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
320 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
321 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
322 device = scic->device_table[index];
323 dev_warn(scic_to_dev(scic),
324 "%s: SCIC SDS Completion type SDMA %x for remote "
325 "device %p\n",
326 __func__,
327 completion_entry,
328 device);
329 /* @todo For a port RNC operation we need to fail the
330 * device
331 */
332 break;
333
334 default:
335 dev_warn(scic_to_dev(scic),
336 "%s: SCIC SDS Completion unknown SDMA completion "
337 "type %x\n",
338 __func__,
339 completion_entry);
340 break;
341
342 }
343}
344
345static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *scic,
346 u32 completion_entry)
347{
348 u32 index;
349 u32 frame_index;
350
351 struct isci_host *ihost = scic_to_ihost(scic);
352 struct scu_unsolicited_frame_header *frame_header;
353 struct scic_sds_phy *phy;
354 struct scic_sds_remote_device *device;
355
356 enum sci_status result = SCI_FAILURE;
357
358 frame_index = SCU_GET_FRAME_INDEX(completion_entry);
359
360 frame_header = scic->uf_control.buffers.array[frame_index].header;
361 scic->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
362
363 if (SCU_GET_FRAME_ERROR(completion_entry)) {
364 /*
365 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
366 * / this cause a problem? We expect the phy initialization will
367 * / fail if there is an error in the frame. */
368 scic_sds_controller_release_frame(scic, frame_index);
369 return;
370 }
371
372 if (frame_header->is_address_frame) {
373 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
374 phy = &ihost->phys[index].sci;
375 result = scic_sds_phy_frame_handler(phy, frame_index);
376 } else {
377
378 index = SCU_GET_COMPLETION_INDEX(completion_entry);
379
380 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
381 /*
382 * This is a signature fis or a frame from a direct attached SATA
383 * device that has not yet been created. In either case forwared
384 * the frame to the PE and let it take care of the frame data. */
385 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
386 phy = &ihost->phys[index].sci;
387 result = scic_sds_phy_frame_handler(phy, frame_index);
388 } else {
389 if (index < scic->remote_node_entries)
390 device = scic->device_table[index];
391 else
392 device = NULL;
393
394 if (device != NULL)
395 result = scic_sds_remote_device_frame_handler(device, frame_index);
396 else
397 scic_sds_controller_release_frame(scic, frame_index);
398 }
399 }
400
401 if (result != SCI_SUCCESS) {
402 /*
403 * / @todo Is there any reason to report some additional error message
404 * / when we get this failure notifiction? */
405 }
406}
407
408static void scic_sds_controller_event_completion(struct scic_sds_controller *scic,
409 u32 completion_entry)
410{
411 struct isci_host *ihost = scic_to_ihost(scic);
412 struct scic_sds_request *io_request;
413 struct scic_sds_remote_device *device;
414 struct scic_sds_phy *phy;
415 u32 index;
416
417 index = SCU_GET_COMPLETION_INDEX(completion_entry);
418
419 switch (scu_get_event_type(completion_entry)) {
420 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
421 /* / @todo The driver did something wrong and we need to fix the condtion. */
422 dev_err(scic_to_dev(scic),
423 "%s: SCIC Controller 0x%p received SMU command error "
424 "0x%x\n",
425 __func__,
426 scic,
427 completion_entry);
428 break;
429
430 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
431 case SCU_EVENT_TYPE_SMU_ERROR:
432 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
433 /*
434 * / @todo This is a hardware failure and its likely that we want to
435 * / reset the controller. */
436 dev_err(scic_to_dev(scic),
437 "%s: SCIC Controller 0x%p received fatal controller "
438 "event 0x%x\n",
439 __func__,
440 scic,
441 completion_entry);
442 break;
443
444 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
445 io_request = scic->io_request_table[index];
446 scic_sds_io_request_event_handler(io_request, completion_entry);
447 break;
448
449 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
450 switch (scu_get_event_specifier(completion_entry)) {
451 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
452 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
453 io_request = scic->io_request_table[index];
454 if (io_request != NULL)
455 scic_sds_io_request_event_handler(io_request, completion_entry);
456 else
457 dev_warn(scic_to_dev(scic),
458 "%s: SCIC Controller 0x%p received "
459 "event 0x%x for io request object "
460 "that doesnt exist.\n",
461 __func__,
462 scic,
463 completion_entry);
464
465 break;
466
467 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
468 device = scic->device_table[index];
469 if (device != NULL)
470 scic_sds_remote_device_event_handler(device, completion_entry);
471 else
472 dev_warn(scic_to_dev(scic),
473 "%s: SCIC Controller 0x%p received "
474 "event 0x%x for remote device object "
475 "that doesnt exist.\n",
476 __func__,
477 scic,
478 completion_entry);
479
480 break;
481 }
482 break;
483
484 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
485 /*
486 * direct the broadcast change event to the phy first and then let
487 * the phy redirect the broadcast change to the port object */
488 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
489 /*
490 * direct error counter event to the phy object since that is where
491 * we get the event notification. This is a type 4 event. */
492 case SCU_EVENT_TYPE_OSSP_EVENT:
493 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
494 phy = &ihost->phys[index].sci;
495 scic_sds_phy_event_handler(phy, completion_entry);
496 break;
497
498 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
499 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
500 case SCU_EVENT_TYPE_RNC_OPS_MISC:
501 if (index < scic->remote_node_entries) {
502 device = scic->device_table[index];
503
504 if (device != NULL)
505 scic_sds_remote_device_event_handler(device, completion_entry);
506 } else
507 dev_err(scic_to_dev(scic),
508 "%s: SCIC Controller 0x%p received event 0x%x "
509 "for remote device object 0x%0x that doesnt "
510 "exist.\n",
511 __func__,
512 scic,
513 completion_entry,
514 index);
515
516 break;
517
518 default:
519 dev_warn(scic_to_dev(scic),
520 "%s: SCIC Controller received unknown event code %x\n",
521 __func__,
522 completion_entry);
523 break;
524 }
525}
526
527
528
529static void scic_sds_controller_process_completions(struct scic_sds_controller *scic)
530{
531 u32 completion_count = 0;
532 u32 completion_entry;
533 u32 get_index;
534 u32 get_cycle;
535 u32 event_index;
536 u32 event_cycle;
537
538 dev_dbg(scic_to_dev(scic),
539 "%s: completion queue begining get:0x%08x\n",
540 __func__,
541 scic->completion_queue_get);
542
543 /* Get the component parts of the completion queue */
544 get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get);
545 get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get;
546
547 event_index = NORMALIZE_EVENT_POINTER(scic->completion_queue_get);
548 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get;
549
550 while (
551 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
552 == COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index])
553 ) {
554 completion_count++;
555
556 completion_entry = scic->completion_queue[get_index];
557 INCREMENT_COMPLETION_QUEUE_GET(scic, get_index, get_cycle);
558
559 dev_dbg(scic_to_dev(scic),
560 "%s: completion queue entry:0x%08x\n",
561 __func__,
562 completion_entry);
563
564 switch (SCU_GET_COMPLETION_TYPE(completion_entry)) {
565 case SCU_COMPLETION_TYPE_TASK:
566 scic_sds_controller_task_completion(scic, completion_entry);
567 break;
568
569 case SCU_COMPLETION_TYPE_SDMA:
570 scic_sds_controller_sdma_completion(scic, completion_entry);
571 break;
572
573 case SCU_COMPLETION_TYPE_UFI:
574 scic_sds_controller_unsolicited_frame(scic, completion_entry);
575 break;
576
577 case SCU_COMPLETION_TYPE_EVENT:
578 INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
579 scic_sds_controller_event_completion(scic, completion_entry);
580 break;
581
582 case SCU_COMPLETION_TYPE_NOTIFY:
583 /*
584 * Presently we do the same thing with a notify event that we do with the
585 * other event codes. */
586 INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
587 scic_sds_controller_event_completion(scic, completion_entry);
588 break;
589
590 default:
591 dev_warn(scic_to_dev(scic),
592 "%s: SCIC Controller received unknown "
593 "completion type %x\n",
594 __func__,
595 completion_entry);
596 break;
597 }
598 }
599
600 /* Update the get register if we completed one or more entries */
601 if (completion_count > 0) {
602 scic->completion_queue_get =
603 SMU_CQGR_GEN_BIT(ENABLE) |
604 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
605 event_cycle |
606 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index) |
607 get_cycle |
608 SMU_CQGR_GEN_VAL(POINTER, get_index);
609
610 writel(scic->completion_queue_get,
611 &scic->smu_registers->completion_queue_get);
612
613 }
614
615 dev_dbg(scic_to_dev(scic),
616 "%s: completion queue ending get:0x%08x\n",
617 __func__,
618 scic->completion_queue_get);
619
620}
621
622static void scic_sds_controller_error_handler(struct scic_sds_controller *scic)
623{
624 u32 interrupt_status;
625
626 interrupt_status =
627 readl(&scic->smu_registers->interrupt_status);
628
629 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
630 scic_sds_controller_completion_queue_has_entries(scic)) {
631
632 scic_sds_controller_process_completions(scic);
633 writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status);
634 } else {
635 dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__,
636 interrupt_status);
637
Edmund Nadolskie3013702011-06-02 00:10:43 +0000638 sci_change_state(&scic->sm, SCIC_FAILED);
Dan Williamscc9203b2011-05-08 17:34:44 -0700639
640 return;
641 }
642
643 /* If we dont process any completions I am not sure that we want to do this.
644 * We are in the middle of a hardware fault and should probably be reset.
645 */
646 writel(0, &scic->smu_registers->interrupt_mask);
647}
648
Dan Williamsc7ef4032011-02-18 09:25:05 -0800649irqreturn_t isci_intx_isr(int vec, void *data)
Dan Williams6f231dd2011-07-02 22:56:22 -0700650{
Dan Williams6f231dd2011-07-02 22:56:22 -0700651 irqreturn_t ret = IRQ_NONE;
Dan Williams31e824e2011-04-19 12:32:51 -0700652 struct isci_host *ihost = data;
Artur Wojcikcc3dbd02011-05-04 07:58:16 +0000653 struct scic_sds_controller *scic = &ihost->sci;
Dan Williams6f231dd2011-07-02 22:56:22 -0700654
Dan Williams31e824e2011-04-19 12:32:51 -0700655 if (scic_sds_controller_isr(scic)) {
656 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
657 tasklet_schedule(&ihost->completion_tasklet);
658 ret = IRQ_HANDLED;
659 } else if (scic_sds_controller_error_isr(scic)) {
660 spin_lock(&ihost->scic_lock);
661 scic_sds_controller_error_handler(scic);
662 spin_unlock(&ihost->scic_lock);
663 ret = IRQ_HANDLED;
Dan Williams6f231dd2011-07-02 22:56:22 -0700664 }
Dan Williams92f4f0f2011-02-18 09:25:11 -0800665
Dan Williams6f231dd2011-07-02 22:56:22 -0700666 return ret;
667}
668
Dan Williams92f4f0f2011-02-18 09:25:11 -0800669irqreturn_t isci_error_isr(int vec, void *data)
670{
671 struct isci_host *ihost = data;
Dan Williams92f4f0f2011-02-18 09:25:11 -0800672
Artur Wojcikcc3dbd02011-05-04 07:58:16 +0000673 if (scic_sds_controller_error_isr(&ihost->sci))
674 scic_sds_controller_error_handler(&ihost->sci);
Dan Williams92f4f0f2011-02-18 09:25:11 -0800675
676 return IRQ_HANDLED;
677}
Dan Williams6f231dd2011-07-02 22:56:22 -0700678
679/**
680 * isci_host_start_complete() - This function is called by the core library,
681 * through the ISCI Module, to indicate controller start status.
682 * @isci_host: This parameter specifies the ISCI host object
683 * @completion_status: This parameter specifies the completion status from the
684 * core library.
685 *
686 */
Dan Williamscc9203b2011-05-08 17:34:44 -0700687static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
Dan Williams6f231dd2011-07-02 22:56:22 -0700688{
Dan Williams0cf89d12011-02-18 09:25:07 -0800689 if (completion_status != SCI_SUCCESS)
690 dev_info(&ihost->pdev->dev,
691 "controller start timed out, continuing...\n");
692 isci_host_change_state(ihost, isci_ready);
693 clear_bit(IHOST_START_PENDING, &ihost->flags);
694 wake_up(&ihost->eventq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700695}
696
Dan Williamsc7ef4032011-02-18 09:25:05 -0800697int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
Dan Williams6f231dd2011-07-02 22:56:22 -0700698{
Dan Williams4393aa42011-03-31 13:10:44 -0700699 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
Dan Williams6f231dd2011-07-02 22:56:22 -0700700
Edmund Nadolski77950f52011-02-18 09:25:09 -0800701 if (test_bit(IHOST_START_PENDING, &ihost->flags))
Dan Williams6f231dd2011-07-02 22:56:22 -0700702 return 0;
Dan Williams6f231dd2011-07-02 22:56:22 -0700703
Edmund Nadolski77950f52011-02-18 09:25:09 -0800704 /* todo: use sas_flush_discovery once it is upstream */
705 scsi_flush_work(shost);
706
707 scsi_flush_work(shost);
Dan Williams6f231dd2011-07-02 22:56:22 -0700708
Dan Williams0cf89d12011-02-18 09:25:07 -0800709 dev_dbg(&ihost->pdev->dev,
710 "%s: ihost->status = %d, time = %ld\n",
711 __func__, isci_host_get_state(ihost), time);
Dan Williams6f231dd2011-07-02 22:56:22 -0700712
Dan Williams6f231dd2011-07-02 22:56:22 -0700713 return 1;
714
715}
716
Dan Williamscc9203b2011-05-08 17:34:44 -0700717/**
718 * scic_controller_get_suggested_start_timeout() - This method returns the
719 * suggested scic_controller_start() timeout amount. The user is free to
720 * use any timeout value, but this method provides the suggested minimum
721 * start timeout value. The returned value is based upon empirical
722 * information determined as a result of interoperability testing.
723 * @controller: the handle to the controller object for which to return the
724 * suggested start timeout.
725 *
726 * This method returns the number of milliseconds for the suggested start
727 * operation timeout.
728 */
729static u32 scic_controller_get_suggested_start_timeout(
730 struct scic_sds_controller *sc)
731{
732 /* Validate the user supplied parameters. */
733 if (sc == NULL)
734 return 0;
735
736 /*
737 * The suggested minimum timeout value for a controller start operation:
738 *
739 * Signature FIS Timeout
740 * + Phy Start Timeout
741 * + Number of Phy Spin Up Intervals
742 * ---------------------------------
743 * Number of milliseconds for the controller start operation.
744 *
745 * NOTE: The number of phy spin up intervals will be equivalent
746 * to the number of phys divided by the number phys allowed
747 * per interval - 1 (once OEM parameters are supported).
748 * Currently we assume only 1 phy per interval. */
749
750 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
751 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
752 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
753}
754
755static void scic_controller_enable_interrupts(
756 struct scic_sds_controller *scic)
757{
758 BUG_ON(scic->smu_registers == NULL);
759 writel(0, &scic->smu_registers->interrupt_mask);
760}
761
762void scic_controller_disable_interrupts(
763 struct scic_sds_controller *scic)
764{
765 BUG_ON(scic->smu_registers == NULL);
766 writel(0xffffffff, &scic->smu_registers->interrupt_mask);
767}
768
769static void scic_sds_controller_enable_port_task_scheduler(
770 struct scic_sds_controller *scic)
771{
772 u32 port_task_scheduler_value;
773
774 port_task_scheduler_value =
775 readl(&scic->scu_registers->peg0.ptsg.control);
776 port_task_scheduler_value |=
777 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
778 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
779 writel(port_task_scheduler_value,
780 &scic->scu_registers->peg0.ptsg.control);
781}
782
783static void scic_sds_controller_assign_task_entries(struct scic_sds_controller *scic)
784{
785 u32 task_assignment;
786
787 /*
788 * Assign all the TCs to function 0
789 * TODO: Do we actually need to read this register to write it back?
790 */
791
792 task_assignment =
793 readl(&scic->smu_registers->task_context_assignment[0]);
794
795 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
796 (SMU_TCA_GEN_VAL(ENDING, scic->task_context_entries - 1)) |
797 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
798
799 writel(task_assignment,
800 &scic->smu_registers->task_context_assignment[0]);
801
802}
803
804static void scic_sds_controller_initialize_completion_queue(struct scic_sds_controller *scic)
805{
806 u32 index;
807 u32 completion_queue_control_value;
808 u32 completion_queue_get_value;
809 u32 completion_queue_put_value;
810
811 scic->completion_queue_get = 0;
812
813 completion_queue_control_value = (
814 SMU_CQC_QUEUE_LIMIT_SET(scic->completion_queue_entries - 1)
815 | SMU_CQC_EVENT_LIMIT_SET(scic->completion_event_entries - 1)
816 );
817
818 writel(completion_queue_control_value,
819 &scic->smu_registers->completion_queue_control);
820
821
822 /* Set the completion queue get pointer and enable the queue */
823 completion_queue_get_value = (
824 (SMU_CQGR_GEN_VAL(POINTER, 0))
825 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
826 | (SMU_CQGR_GEN_BIT(ENABLE))
827 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
828 );
829
830 writel(completion_queue_get_value,
831 &scic->smu_registers->completion_queue_get);
832
833 /* Set the completion queue put pointer */
834 completion_queue_put_value = (
835 (SMU_CQPR_GEN_VAL(POINTER, 0))
836 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
837 );
838
839 writel(completion_queue_put_value,
840 &scic->smu_registers->completion_queue_put);
841
842 /* Initialize the cycle bit of the completion queue entries */
843 for (index = 0; index < scic->completion_queue_entries; index++) {
844 /*
845 * If get.cycle_bit != completion_queue.cycle_bit
846 * its not a valid completion queue entry
847 * so at system start all entries are invalid */
848 scic->completion_queue[index] = 0x80000000;
849 }
850}
851
852static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_sds_controller *scic)
853{
854 u32 frame_queue_control_value;
855 u32 frame_queue_get_value;
856 u32 frame_queue_put_value;
857
858 /* Write the queue size */
859 frame_queue_control_value =
860 SCU_UFQC_GEN_VAL(QUEUE_SIZE,
861 scic->uf_control.address_table.count);
862
863 writel(frame_queue_control_value,
864 &scic->scu_registers->sdma.unsolicited_frame_queue_control);
865
866 /* Setup the get pointer for the unsolicited frame queue */
867 frame_queue_get_value = (
868 SCU_UFQGP_GEN_VAL(POINTER, 0)
869 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
870 );
871
872 writel(frame_queue_get_value,
873 &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
874 /* Setup the put pointer for the unsolicited frame queue */
875 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
876 writel(frame_queue_put_value,
877 &scic->scu_registers->sdma.unsolicited_frame_put_pointer);
878}
879
880/**
881 * This method will attempt to transition into the ready state for the
882 * controller and indicate that the controller start operation has completed
883 * if all criteria are met.
884 * @scic: This parameter indicates the controller object for which
885 * to transition to ready.
886 * @status: This parameter indicates the status value to be pass into the call
887 * to scic_cb_controller_start_complete().
888 *
889 * none.
890 */
891static void scic_sds_controller_transition_to_ready(
892 struct scic_sds_controller *scic,
893 enum sci_status status)
894{
895 struct isci_host *ihost = scic_to_ihost(scic);
896
Edmund Nadolskie3013702011-06-02 00:10:43 +0000897 if (scic->sm.current_state_id == SCIC_STARTING) {
Dan Williamscc9203b2011-05-08 17:34:44 -0700898 /*
899 * We move into the ready state, because some of the phys/ports
900 * may be up and operational.
901 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000902 sci_change_state(&scic->sm, SCIC_READY);
Dan Williamscc9203b2011-05-08 17:34:44 -0700903
904 isci_host_start_complete(ihost, status);
905 }
906}
907
Adam Gruchala4a33c522011-05-10 23:54:23 +0000908static bool is_phy_starting(struct scic_sds_phy *sci_phy)
909{
910 enum scic_sds_phy_states state;
911
Edmund Nadolskie3013702011-06-02 00:10:43 +0000912 state = sci_phy->sm.current_state_id;
Adam Gruchala4a33c522011-05-10 23:54:23 +0000913 switch (state) {
Edmund Nadolskie3013702011-06-02 00:10:43 +0000914 case SCI_PHY_STARTING:
915 case SCI_PHY_SUB_INITIAL:
916 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
917 case SCI_PHY_SUB_AWAIT_IAF_UF:
918 case SCI_PHY_SUB_AWAIT_SAS_POWER:
919 case SCI_PHY_SUB_AWAIT_SATA_POWER:
920 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
921 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
922 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
923 case SCI_PHY_SUB_FINAL:
Adam Gruchala4a33c522011-05-10 23:54:23 +0000924 return true;
925 default:
926 return false;
927 }
928}
929
Dan Williamscc9203b2011-05-08 17:34:44 -0700930/**
931 * scic_sds_controller_start_next_phy - start phy
932 * @scic: controller
933 *
934 * If all the phys have been started, then attempt to transition the
935 * controller to the READY state and inform the user
936 * (scic_cb_controller_start_complete()).
937 */
938static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic)
939{
940 struct isci_host *ihost = scic_to_ihost(scic);
941 struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
942 struct scic_sds_phy *sci_phy;
943 enum sci_status status;
944
945 status = SCI_SUCCESS;
946
947 if (scic->phy_startup_timer_pending)
948 return status;
949
950 if (scic->next_phy_to_start >= SCI_MAX_PHYS) {
951 bool is_controller_start_complete = true;
952 u32 state;
953 u8 index;
954
955 for (index = 0; index < SCI_MAX_PHYS; index++) {
956 sci_phy = &ihost->phys[index].sci;
Edmund Nadolskie3013702011-06-02 00:10:43 +0000957 state = sci_phy->sm.current_state_id;
Dan Williamscc9203b2011-05-08 17:34:44 -0700958
Dan Williams4f20ef42011-05-12 06:00:31 -0700959 if (!phy_get_non_dummy_port(sci_phy))
Dan Williamscc9203b2011-05-08 17:34:44 -0700960 continue;
961
962 /* The controller start operation is complete iff:
963 * - all links have been given an opportunity to start
964 * - have no indication of a connected device
965 * - have an indication of a connected device and it has
966 * finished the link training process.
967 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000968 if ((sci_phy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
969 (sci_phy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
970 (sci_phy->is_in_link_training == true && is_phy_starting(sci_phy))) {
Dan Williamscc9203b2011-05-08 17:34:44 -0700971 is_controller_start_complete = false;
972 break;
973 }
974 }
975
976 /*
977 * The controller has successfully finished the start process.
978 * Inform the SCI Core user and transition to the READY state. */
979 if (is_controller_start_complete == true) {
980 scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS);
Edmund Nadolskibb3dbdf2011-05-19 20:26:02 -0700981 sci_del_timer(&scic->phy_timer);
982 scic->phy_startup_timer_pending = false;
Dan Williamscc9203b2011-05-08 17:34:44 -0700983 }
984 } else {
985 sci_phy = &ihost->phys[scic->next_phy_to_start].sci;
986
987 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
Dan Williams4f20ef42011-05-12 06:00:31 -0700988 if (phy_get_non_dummy_port(sci_phy) == NULL) {
Dan Williamscc9203b2011-05-08 17:34:44 -0700989 scic->next_phy_to_start++;
990
991 /* Caution recursion ahead be forwarned
992 *
993 * The PHY was never added to a PORT in MPC mode
994 * so start the next phy in sequence This phy
995 * will never go link up and will not draw power
996 * the OEM parameters either configured the phy
997 * incorrectly for the PORT or it was never
998 * assigned to a PORT
999 */
1000 return scic_sds_controller_start_next_phy(scic);
1001 }
1002 }
1003
1004 status = scic_sds_phy_start(sci_phy);
1005
1006 if (status == SCI_SUCCESS) {
Edmund Nadolskibb3dbdf2011-05-19 20:26:02 -07001007 sci_mod_timer(&scic->phy_timer,
1008 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
1009 scic->phy_startup_timer_pending = true;
Dan Williamscc9203b2011-05-08 17:34:44 -07001010 } else {
1011 dev_warn(scic_to_dev(scic),
1012 "%s: Controller stop operation failed "
1013 "to stop phy %d because of status "
1014 "%d.\n",
1015 __func__,
1016 ihost->phys[scic->next_phy_to_start].sci.phy_index,
1017 status);
1018 }
1019
1020 scic->next_phy_to_start++;
1021 }
1022
1023 return status;
1024}
1025
Edmund Nadolskibb3dbdf2011-05-19 20:26:02 -07001026static void phy_startup_timeout(unsigned long data)
Dan Williamscc9203b2011-05-08 17:34:44 -07001027{
Edmund Nadolskibb3dbdf2011-05-19 20:26:02 -07001028 struct sci_timer *tmr = (struct sci_timer *)data;
1029 struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), phy_timer);
1030 struct isci_host *ihost = scic_to_ihost(scic);
1031 unsigned long flags;
Dan Williamscc9203b2011-05-08 17:34:44 -07001032 enum sci_status status;
1033
Edmund Nadolskibb3dbdf2011-05-19 20:26:02 -07001034 spin_lock_irqsave(&ihost->scic_lock, flags);
1035
1036 if (tmr->cancel)
1037 goto done;
1038
Dan Williamscc9203b2011-05-08 17:34:44 -07001039 scic->phy_startup_timer_pending = false;
Edmund Nadolskibb3dbdf2011-05-19 20:26:02 -07001040
1041 do {
Dan Williamscc9203b2011-05-08 17:34:44 -07001042 status = scic_sds_controller_start_next_phy(scic);
Edmund Nadolskibb3dbdf2011-05-19 20:26:02 -07001043 } while (status != SCI_SUCCESS);
1044
1045done:
1046 spin_unlock_irqrestore(&ihost->scic_lock, flags);
Dan Williamscc9203b2011-05-08 17:34:44 -07001047}
1048
1049static enum sci_status scic_controller_start(struct scic_sds_controller *scic,
1050 u32 timeout)
1051{
1052 struct isci_host *ihost = scic_to_ihost(scic);
1053 enum sci_status result;
1054 u16 index;
1055
Edmund Nadolskie3013702011-06-02 00:10:43 +00001056 if (scic->sm.current_state_id != SCIC_INITIALIZED) {
Dan Williamscc9203b2011-05-08 17:34:44 -07001057 dev_warn(scic_to_dev(scic),
1058 "SCIC Controller start operation requested in "
1059 "invalid state\n");
1060 return SCI_FAILURE_INVALID_STATE;
1061 }
1062
1063 /* Build the TCi free pool */
1064 sci_pool_initialize(scic->tci_pool);
1065 for (index = 0; index < scic->task_context_entries; index++)
1066 sci_pool_put(scic->tci_pool, index);
1067
1068 /* Build the RNi free pool */
1069 scic_sds_remote_node_table_initialize(
1070 &scic->available_remote_nodes,
1071 scic->remote_node_entries);
1072
1073 /*
1074 * Before anything else lets make sure we will not be
1075 * interrupted by the hardware.
1076 */
1077 scic_controller_disable_interrupts(scic);
1078
1079 /* Enable the port task scheduler */
1080 scic_sds_controller_enable_port_task_scheduler(scic);
1081
1082 /* Assign all the task entries to scic physical function */
1083 scic_sds_controller_assign_task_entries(scic);
1084
1085 /* Now initialize the completion queue */
1086 scic_sds_controller_initialize_completion_queue(scic);
1087
1088 /* Initialize the unsolicited frame queue for use */
1089 scic_sds_controller_initialize_unsolicited_frame_queue(scic);
1090
1091 /* Start all of the ports on this controller */
1092 for (index = 0; index < scic->logical_port_entries; index++) {
1093 struct scic_sds_port *sci_port = &ihost->ports[index].sci;
1094
Piotr Sawickid76f71d2011-05-11 23:52:26 +00001095 result = scic_sds_port_start(sci_port);
Dan Williamscc9203b2011-05-08 17:34:44 -07001096 if (result)
1097 return result;
1098 }
1099
1100 scic_sds_controller_start_next_phy(scic);
1101
Edmund Nadolski6cb58532011-05-19 11:59:56 +00001102 sci_mod_timer(&scic->timer, timeout);
Dan Williamscc9203b2011-05-08 17:34:44 -07001103
Edmund Nadolskie3013702011-06-02 00:10:43 +00001104 sci_change_state(&scic->sm, SCIC_STARTING);
Dan Williamscc9203b2011-05-08 17:34:44 -07001105
1106 return SCI_SUCCESS;
1107}
1108
Dan Williams6f231dd2011-07-02 22:56:22 -07001109void isci_host_scan_start(struct Scsi_Host *shost)
1110{
Dan Williams4393aa42011-03-31 13:10:44 -07001111 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001112 unsigned long tmo = scic_controller_get_suggested_start_timeout(&ihost->sci);
Dan Williams6f231dd2011-07-02 22:56:22 -07001113
Dan Williams0cf89d12011-02-18 09:25:07 -08001114 set_bit(IHOST_START_PENDING, &ihost->flags);
Edmund Nadolski77950f52011-02-18 09:25:09 -08001115
1116 spin_lock_irq(&ihost->scic_lock);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001117 scic_controller_start(&ihost->sci, tmo);
1118 scic_controller_enable_interrupts(&ihost->sci);
Edmund Nadolski77950f52011-02-18 09:25:09 -08001119 spin_unlock_irq(&ihost->scic_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07001120}
1121
Dan Williamscc9203b2011-05-08 17:34:44 -07001122static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
Dan Williams6f231dd2011-07-02 22:56:22 -07001123{
Dan Williams0cf89d12011-02-18 09:25:07 -08001124 isci_host_change_state(ihost, isci_stopped);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001125 scic_controller_disable_interrupts(&ihost->sci);
Dan Williams0cf89d12011-02-18 09:25:07 -08001126 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1127 wake_up(&ihost->eventq);
Dan Williams6f231dd2011-07-02 22:56:22 -07001128}
1129
Dan Williamscc9203b2011-05-08 17:34:44 -07001130static void scic_sds_controller_completion_handler(struct scic_sds_controller *scic)
1131{
1132 /* Empty out the completion queue */
1133 if (scic_sds_controller_completion_queue_has_entries(scic))
1134 scic_sds_controller_process_completions(scic);
1135
1136 /* Clear the interrupt and enable all interrupts again */
1137 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
1138 /* Could we write the value of SMU_ISR_COMPLETION? */
1139 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
1140 writel(0, &scic->smu_registers->interrupt_mask);
1141}
1142
Dan Williams6f231dd2011-07-02 22:56:22 -07001143/**
1144 * isci_host_completion_routine() - This function is the delayed service
1145 * routine that calls the sci core library's completion handler. It's
1146 * scheduled as a tasklet from the interrupt service routine when interrupts
1147 * in use, or set as the timeout function in polled mode.
1148 * @data: This parameter specifies the ISCI host object
1149 *
1150 */
1151static void isci_host_completion_routine(unsigned long data)
1152{
1153 struct isci_host *isci_host = (struct isci_host *)data;
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001154 struct list_head completed_request_list;
1155 struct list_head errored_request_list;
1156 struct list_head *current_position;
1157 struct list_head *next_position;
Dan Williams6f231dd2011-07-02 22:56:22 -07001158 struct isci_request *request;
1159 struct isci_request *next_request;
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001160 struct sas_task *task;
Dan Williams6f231dd2011-07-02 22:56:22 -07001161
1162 INIT_LIST_HEAD(&completed_request_list);
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001163 INIT_LIST_HEAD(&errored_request_list);
Dan Williams6f231dd2011-07-02 22:56:22 -07001164
1165 spin_lock_irq(&isci_host->scic_lock);
1166
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001167 scic_sds_controller_completion_handler(&isci_host->sci);
Dan Williamsc7ef4032011-02-18 09:25:05 -08001168
Dan Williams6f231dd2011-07-02 22:56:22 -07001169 /* Take the lists of completed I/Os from the host. */
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001170
Dan Williams6f231dd2011-07-02 22:56:22 -07001171 list_splice_init(&isci_host->requests_to_complete,
1172 &completed_request_list);
1173
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001174 /* Take the list of errored I/Os from the host. */
1175 list_splice_init(&isci_host->requests_to_errorback,
1176 &errored_request_list);
Dan Williams6f231dd2011-07-02 22:56:22 -07001177
1178 spin_unlock_irq(&isci_host->scic_lock);
1179
1180 /* Process any completions in the lists. */
1181 list_for_each_safe(current_position, next_position,
1182 &completed_request_list) {
1183
1184 request = list_entry(current_position, struct isci_request,
1185 completed_node);
1186 task = isci_request_access_task(request);
1187
1188 /* Normal notification (task_done) */
1189 dev_dbg(&isci_host->pdev->dev,
1190 "%s: Normal - request/task = %p/%p\n",
1191 __func__,
1192 request,
1193 task);
1194
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001195 /* Return the task to libsas */
1196 if (task != NULL) {
Dan Williams6f231dd2011-07-02 22:56:22 -07001197
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001198 task->lldd_task = NULL;
1199 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1200
1201 /* If the task is already in the abort path,
1202 * the task_done callback cannot be called.
1203 */
1204 task->task_done(task);
1205 }
1206 }
Dan Williams6f231dd2011-07-02 22:56:22 -07001207 /* Free the request object. */
1208 isci_request_free(isci_host, request);
1209 }
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001210 list_for_each_entry_safe(request, next_request, &errored_request_list,
Dan Williams6f231dd2011-07-02 22:56:22 -07001211 completed_node) {
1212
1213 task = isci_request_access_task(request);
1214
1215 /* Use sas_task_abort */
1216 dev_warn(&isci_host->pdev->dev,
1217 "%s: Error - request/task = %p/%p\n",
1218 __func__,
1219 request,
1220 task);
1221
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001222 if (task != NULL) {
1223
1224 /* Put the task into the abort path if it's not there
1225 * already.
1226 */
1227 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
1228 sas_task_abort(task);
1229
1230 } else {
1231 /* This is a case where the request has completed with a
1232 * status such that it needed further target servicing,
1233 * but the sas_task reference has already been removed
1234 * from the request. Since it was errored, it was not
1235 * being aborted, so there is nothing to do except free
1236 * it.
1237 */
1238
1239 spin_lock_irq(&isci_host->scic_lock);
1240 /* Remove the request from the remote device's list
1241 * of pending requests.
1242 */
1243 list_del_init(&request->dev_node);
1244 spin_unlock_irq(&isci_host->scic_lock);
1245
1246 /* Free the request object. */
1247 isci_request_free(isci_host, request);
1248 }
Dan Williams6f231dd2011-07-02 22:56:22 -07001249 }
1250
1251}
1252
Dan Williamscc9203b2011-05-08 17:34:44 -07001253/**
1254 * scic_controller_stop() - This method will stop an individual controller
1255 * object.This method will invoke the associated user callback upon
1256 * completion. The completion callback is called when the following
1257 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
1258 * controller has been quiesced. This method will ensure that all IO
1259 * requests are quiesced, phys are stopped, and all additional operation by
1260 * the hardware is halted.
1261 * @controller: the handle to the controller object to stop.
1262 * @timeout: This parameter specifies the number of milliseconds in which the
1263 * stop operation should complete.
1264 *
1265 * The controller must be in the STARTED or STOPPED state. Indicate if the
1266 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1267 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1268 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1269 * controller is not either in the STARTED or STOPPED states.
1270 */
1271static enum sci_status scic_controller_stop(struct scic_sds_controller *scic,
1272 u32 timeout)
1273{
Edmund Nadolskie3013702011-06-02 00:10:43 +00001274 if (scic->sm.current_state_id != SCIC_READY) {
Dan Williamscc9203b2011-05-08 17:34:44 -07001275 dev_warn(scic_to_dev(scic),
1276 "SCIC Controller stop operation requested in "
1277 "invalid state\n");
1278 return SCI_FAILURE_INVALID_STATE;
1279 }
1280
Edmund Nadolski6cb58532011-05-19 11:59:56 +00001281 sci_mod_timer(&scic->timer, timeout);
Edmund Nadolskie3013702011-06-02 00:10:43 +00001282 sci_change_state(&scic->sm, SCIC_STOPPING);
Dan Williamscc9203b2011-05-08 17:34:44 -07001283 return SCI_SUCCESS;
1284}
1285
1286/**
1287 * scic_controller_reset() - This method will reset the supplied core
1288 * controller regardless of the state of said controller. This operation is
1289 * considered destructive. In other words, all current operations are wiped
1290 * out. No IO completions for outstanding devices occur. Outstanding IO
1291 * requests are not aborted or completed at the actual remote device.
1292 * @controller: the handle to the controller object to reset.
1293 *
1294 * Indicate if the controller reset method succeeded or failed in some way.
1295 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1296 * the controller reset operation is unable to complete.
1297 */
1298static enum sci_status scic_controller_reset(struct scic_sds_controller *scic)
1299{
Edmund Nadolskie3013702011-06-02 00:10:43 +00001300 switch (scic->sm.current_state_id) {
1301 case SCIC_RESET:
1302 case SCIC_READY:
1303 case SCIC_STOPPED:
1304 case SCIC_FAILED:
Dan Williamscc9203b2011-05-08 17:34:44 -07001305 /*
1306 * The reset operation is not a graceful cleanup, just
1307 * perform the state transition.
1308 */
Edmund Nadolskie3013702011-06-02 00:10:43 +00001309 sci_change_state(&scic->sm, SCIC_RESETTING);
Dan Williamscc9203b2011-05-08 17:34:44 -07001310 return SCI_SUCCESS;
1311 default:
1312 dev_warn(scic_to_dev(scic),
1313 "SCIC Controller reset operation requested in "
1314 "invalid state\n");
1315 return SCI_FAILURE_INVALID_STATE;
1316 }
1317}
1318
Dan Williams0cf89d12011-02-18 09:25:07 -08001319void isci_host_deinit(struct isci_host *ihost)
Dan Williams6f231dd2011-07-02 22:56:22 -07001320{
1321 int i;
1322
Dan Williams0cf89d12011-02-18 09:25:07 -08001323 isci_host_change_state(ihost, isci_stopping);
Dan Williams6f231dd2011-07-02 22:56:22 -07001324 for (i = 0; i < SCI_MAX_PORTS; i++) {
Dan Williamse5313812011-05-07 10:11:43 -07001325 struct isci_port *iport = &ihost->ports[i];
Dan Williams0cf89d12011-02-18 09:25:07 -08001326 struct isci_remote_device *idev, *d;
1327
Dan Williamse5313812011-05-07 10:11:43 -07001328 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
Dan Williams0cf89d12011-02-18 09:25:07 -08001329 isci_remote_device_change_state(idev, isci_stopping);
Dan Williams6ad31fe2011-03-04 12:10:29 -08001330 isci_remote_device_stop(ihost, idev);
Dan Williams6f231dd2011-07-02 22:56:22 -07001331 }
1332 }
1333
Dan Williams0cf89d12011-02-18 09:25:07 -08001334 set_bit(IHOST_STOP_PENDING, &ihost->flags);
Dan Williams7c40a802011-03-02 11:49:26 -08001335
1336 spin_lock_irq(&ihost->scic_lock);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001337 scic_controller_stop(&ihost->sci, SCIC_CONTROLLER_STOP_TIMEOUT);
Dan Williams7c40a802011-03-02 11:49:26 -08001338 spin_unlock_irq(&ihost->scic_lock);
1339
Dan Williams0cf89d12011-02-18 09:25:07 -08001340 wait_for_stop(ihost);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001341 scic_controller_reset(&ihost->sci);
Edmund Nadolski5553ba22011-05-19 11:59:10 +00001342
1343 /* Cancel any/all outstanding port timers */
1344 for (i = 0; i < ihost->sci.logical_port_entries; i++) {
1345 struct scic_sds_port *sci_port = &ihost->ports[i].sci;
1346 del_timer_sync(&sci_port->timer.timer);
1347 }
1348
Edmund Nadolskia628d472011-05-19 11:59:36 +00001349 /* Cancel any/all outstanding phy timers */
1350 for (i = 0; i < SCI_MAX_PHYS; i++) {
1351 struct scic_sds_phy *sci_phy = &ihost->phys[i].sci;
1352 del_timer_sync(&sci_phy->sata_timer.timer);
1353 }
1354
Edmund Nadolskiac0eeb42011-05-19 20:00:51 -07001355 del_timer_sync(&ihost->sci.port_agent.timer.timer);
1356
Edmund Nadolski04736612011-05-19 20:17:47 -07001357 del_timer_sync(&ihost->sci.power_control.timer.timer);
1358
Edmund Nadolski6cb58532011-05-19 11:59:56 +00001359 del_timer_sync(&ihost->sci.timer.timer);
1360
Edmund Nadolskibb3dbdf2011-05-19 20:26:02 -07001361 del_timer_sync(&ihost->sci.phy_timer.timer);
Dan Williams6f231dd2011-07-02 22:56:22 -07001362}
1363
Dan Williams6f231dd2011-07-02 22:56:22 -07001364static void __iomem *scu_base(struct isci_host *isci_host)
1365{
1366 struct pci_dev *pdev = isci_host->pdev;
1367 int id = isci_host->id;
1368
1369 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1370}
1371
1372static void __iomem *smu_base(struct isci_host *isci_host)
1373{
1374 struct pci_dev *pdev = isci_host->pdev;
1375 int id = isci_host->id;
1376
1377 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1378}
1379
Dave Jiangb5f18a22011-03-16 14:57:23 -07001380static void isci_user_parameters_get(
1381 struct isci_host *isci_host,
1382 union scic_user_parameters *scic_user_params)
1383{
1384 struct scic_sds_user_parameters *u = &scic_user_params->sds1;
1385 int i;
1386
1387 for (i = 0; i < SCI_MAX_PHYS; i++) {
1388 struct sci_phy_user_params *u_phy = &u->phys[i];
1389
1390 u_phy->max_speed_generation = phy_gen;
1391
1392 /* we are not exporting these for now */
1393 u_phy->align_insertion_frequency = 0x7f;
1394 u_phy->in_connection_align_insertion_frequency = 0xff;
1395 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
1396 }
1397
1398 u->stp_inactivity_timeout = stp_inactive_to;
1399 u->ssp_inactivity_timeout = ssp_inactive_to;
1400 u->stp_max_occupancy_timeout = stp_max_occ_to;
1401 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
1402 u->no_outbound_task_timeout = no_outbound_task_to;
1403 u->max_number_concurrent_device_spin_up = max_concurr_spinup;
1404}
1405
Dan Williams9269e0e2011-05-12 07:42:17 -07001406static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001407{
Edmund Nadolskie3013702011-06-02 00:10:43 +00001408 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
Dan Williamscc9203b2011-05-08 17:34:44 -07001409
Edmund Nadolskie3013702011-06-02 00:10:43 +00001410 sci_change_state(&scic->sm, SCIC_RESET);
Dan Williamscc9203b2011-05-08 17:34:44 -07001411}
1412
Dan Williams9269e0e2011-05-12 07:42:17 -07001413static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001414{
Edmund Nadolskie3013702011-06-02 00:10:43 +00001415 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
Dan Williamscc9203b2011-05-08 17:34:44 -07001416
Edmund Nadolski6cb58532011-05-19 11:59:56 +00001417 sci_del_timer(&scic->timer);
Dan Williamscc9203b2011-05-08 17:34:44 -07001418}
1419
1420#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1421#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1422#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1423#define INTERRUPT_COALESCE_NUMBER_MAX 256
1424#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1425#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1426
1427/**
1428 * scic_controller_set_interrupt_coalescence() - This method allows the user to
1429 * configure the interrupt coalescence.
1430 * @controller: This parameter represents the handle to the controller object
1431 * for which its interrupt coalesce register is overridden.
1432 * @coalesce_number: Used to control the number of entries in the Completion
1433 * Queue before an interrupt is generated. If the number of entries exceed
1434 * this number, an interrupt will be generated. The valid range of the input
1435 * is [0, 256]. A setting of 0 results in coalescing being disabled.
1436 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1437 * input is [0, 2700000] . A setting of 0 is allowed and results in no
1438 * interrupt coalescing timeout.
1439 *
1440 * Indicate if the user successfully set the interrupt coalesce parameters.
1441 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1442 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1443 */
1444static enum sci_status scic_controller_set_interrupt_coalescence(
1445 struct scic_sds_controller *scic_controller,
1446 u32 coalesce_number,
1447 u32 coalesce_timeout)
1448{
1449 u8 timeout_encode = 0;
1450 u32 min = 0;
1451 u32 max = 0;
1452
1453 /* Check if the input parameters fall in the range. */
1454 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1455 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1456
1457 /*
1458 * Defined encoding for interrupt coalescing timeout:
1459 * Value Min Max Units
1460 * ----- --- --- -----
1461 * 0 - - Disabled
1462 * 1 13.3 20.0 ns
1463 * 2 26.7 40.0
1464 * 3 53.3 80.0
1465 * 4 106.7 160.0
1466 * 5 213.3 320.0
1467 * 6 426.7 640.0
1468 * 7 853.3 1280.0
1469 * 8 1.7 2.6 us
1470 * 9 3.4 5.1
1471 * 10 6.8 10.2
1472 * 11 13.7 20.5
1473 * 12 27.3 41.0
1474 * 13 54.6 81.9
1475 * 14 109.2 163.8
1476 * 15 218.5 327.7
1477 * 16 436.9 655.4
1478 * 17 873.8 1310.7
1479 * 18 1.7 2.6 ms
1480 * 19 3.5 5.2
1481 * 20 7.0 10.5
1482 * 21 14.0 21.0
1483 * 22 28.0 41.9
1484 * 23 55.9 83.9
1485 * 24 111.8 167.8
1486 * 25 223.7 335.5
1487 * 26 447.4 671.1
1488 * 27 894.8 1342.2
1489 * 28 1.8 2.7 s
1490 * Others Undefined */
1491
1492 /*
1493 * Use the table above to decide the encode of interrupt coalescing timeout
1494 * value for register writing. */
1495 if (coalesce_timeout == 0)
1496 timeout_encode = 0;
1497 else{
1498 /* make the timeout value in unit of (10 ns). */
1499 coalesce_timeout = coalesce_timeout * 100;
1500 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1501 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1502
1503 /* get the encode of timeout for register writing. */
1504 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1505 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1506 timeout_encode++) {
1507 if (min <= coalesce_timeout && max > coalesce_timeout)
1508 break;
1509 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1510 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1511 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1512 break;
1513 else{
1514 timeout_encode++;
1515 break;
1516 }
1517 } else {
1518 max = max * 2;
1519 min = min * 2;
1520 }
1521 }
1522
1523 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1524 /* the value is out of range. */
1525 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1526 }
1527
1528 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1529 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1530 &scic_controller->smu_registers->interrupt_coalesce_control);
1531
1532
1533 scic_controller->interrupt_coalesce_number = (u16)coalesce_number;
1534 scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100;
1535
1536 return SCI_SUCCESS;
1537}
1538
1539
Dan Williams9269e0e2011-05-12 07:42:17 -07001540static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001541{
Edmund Nadolskie3013702011-06-02 00:10:43 +00001542 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
Dan Williamscc9203b2011-05-08 17:34:44 -07001543
1544 /* set the default interrupt coalescence number and timeout value. */
1545 scic_controller_set_interrupt_coalescence(scic, 0x10, 250);
1546}
1547
Dan Williams9269e0e2011-05-12 07:42:17 -07001548static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001549{
Edmund Nadolskie3013702011-06-02 00:10:43 +00001550 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
Dan Williamscc9203b2011-05-08 17:34:44 -07001551
1552 /* disable interrupt coalescence. */
1553 scic_controller_set_interrupt_coalescence(scic, 0, 0);
1554}
1555
1556static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic)
1557{
1558 u32 index;
1559 enum sci_status status;
1560 enum sci_status phy_status;
1561 struct isci_host *ihost = scic_to_ihost(scic);
1562
1563 status = SCI_SUCCESS;
1564
1565 for (index = 0; index < SCI_MAX_PHYS; index++) {
1566 phy_status = scic_sds_phy_stop(&ihost->phys[index].sci);
1567
1568 if (phy_status != SCI_SUCCESS &&
1569 phy_status != SCI_FAILURE_INVALID_STATE) {
1570 status = SCI_FAILURE;
1571
1572 dev_warn(scic_to_dev(scic),
1573 "%s: Controller stop operation failed to stop "
1574 "phy %d because of status %d.\n",
1575 __func__,
1576 ihost->phys[index].sci.phy_index, phy_status);
1577 }
1578 }
1579
1580 return status;
1581}
1582
1583static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic)
1584{
1585 u32 index;
1586 enum sci_status port_status;
1587 enum sci_status status = SCI_SUCCESS;
1588 struct isci_host *ihost = scic_to_ihost(scic);
1589
1590 for (index = 0; index < scic->logical_port_entries; index++) {
1591 struct scic_sds_port *sci_port = &ihost->ports[index].sci;
Dan Williamscc9203b2011-05-08 17:34:44 -07001592
Piotr Sawicki8bc80d32011-05-11 23:52:31 +00001593 port_status = scic_sds_port_stop(sci_port);
Dan Williamscc9203b2011-05-08 17:34:44 -07001594
1595 if ((port_status != SCI_SUCCESS) &&
1596 (port_status != SCI_FAILURE_INVALID_STATE)) {
1597 status = SCI_FAILURE;
1598
1599 dev_warn(scic_to_dev(scic),
1600 "%s: Controller stop operation failed to "
1601 "stop port %d because of status %d.\n",
1602 __func__,
1603 sci_port->logical_port_index,
1604 port_status);
1605 }
1606 }
1607
1608 return status;
1609}
1610
1611static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic)
1612{
1613 u32 index;
1614 enum sci_status status;
1615 enum sci_status device_status;
1616
1617 status = SCI_SUCCESS;
1618
1619 for (index = 0; index < scic->remote_node_entries; index++) {
1620 if (scic->device_table[index] != NULL) {
1621 /* / @todo What timeout value do we want to provide to this request? */
1622 device_status = scic_remote_device_stop(scic->device_table[index], 0);
1623
1624 if ((device_status != SCI_SUCCESS) &&
1625 (device_status != SCI_FAILURE_INVALID_STATE)) {
1626 dev_warn(scic_to_dev(scic),
1627 "%s: Controller stop operation failed "
1628 "to stop device 0x%p because of "
1629 "status %d.\n",
1630 __func__,
1631 scic->device_table[index], device_status);
1632 }
1633 }
1634 }
1635
1636 return status;
1637}
1638
Dan Williams9269e0e2011-05-12 07:42:17 -07001639static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001640{
Edmund Nadolskie3013702011-06-02 00:10:43 +00001641 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
Dan Williamscc9203b2011-05-08 17:34:44 -07001642
1643 /* Stop all of the components for this controller */
1644 scic_sds_controller_stop_phys(scic);
1645 scic_sds_controller_stop_ports(scic);
1646 scic_sds_controller_stop_devices(scic);
1647}
1648
Dan Williams9269e0e2011-05-12 07:42:17 -07001649static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001650{
Edmund Nadolskie3013702011-06-02 00:10:43 +00001651 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
Dan Williamscc9203b2011-05-08 17:34:44 -07001652
Edmund Nadolski6cb58532011-05-19 11:59:56 +00001653 sci_del_timer(&scic->timer);
Dan Williamscc9203b2011-05-08 17:34:44 -07001654}
1655
1656
1657/**
1658 * scic_sds_controller_reset_hardware() -
1659 *
1660 * This method will reset the controller hardware.
1661 */
1662static void scic_sds_controller_reset_hardware(struct scic_sds_controller *scic)
1663{
1664 /* Disable interrupts so we dont take any spurious interrupts */
1665 scic_controller_disable_interrupts(scic);
1666
1667 /* Reset the SCU */
1668 writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control);
1669
1670 /* Delay for 1ms to before clearing the CQP and UFQPR. */
1671 udelay(1000);
1672
1673 /* The write to the CQGR clears the CQP */
1674 writel(0x00000000, &scic->smu_registers->completion_queue_get);
1675
1676 /* The write to the UFQGP clears the UFQPR */
1677 writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
1678}
1679
Dan Williams9269e0e2011-05-12 07:42:17 -07001680static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001681{
Edmund Nadolskie3013702011-06-02 00:10:43 +00001682 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), sm);
Dan Williamscc9203b2011-05-08 17:34:44 -07001683
1684 scic_sds_controller_reset_hardware(scic);
Edmund Nadolskie3013702011-06-02 00:10:43 +00001685 sci_change_state(&scic->sm, SCIC_RESET);
Dan Williamscc9203b2011-05-08 17:34:44 -07001686}
1687
1688static const struct sci_base_state scic_sds_controller_state_table[] = {
Edmund Nadolskie3013702011-06-02 00:10:43 +00001689 [SCIC_INITIAL] = {
Dan Williamscc9203b2011-05-08 17:34:44 -07001690 .enter_state = scic_sds_controller_initial_state_enter,
1691 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00001692 [SCIC_RESET] = {},
1693 [SCIC_INITIALIZING] = {},
1694 [SCIC_INITIALIZED] = {},
1695 [SCIC_STARTING] = {
Dan Williamscc9203b2011-05-08 17:34:44 -07001696 .exit_state = scic_sds_controller_starting_state_exit,
1697 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00001698 [SCIC_READY] = {
Dan Williamscc9203b2011-05-08 17:34:44 -07001699 .enter_state = scic_sds_controller_ready_state_enter,
1700 .exit_state = scic_sds_controller_ready_state_exit,
1701 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00001702 [SCIC_RESETTING] = {
Dan Williamscc9203b2011-05-08 17:34:44 -07001703 .enter_state = scic_sds_controller_resetting_state_enter,
1704 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00001705 [SCIC_STOPPING] = {
Dan Williamscc9203b2011-05-08 17:34:44 -07001706 .enter_state = scic_sds_controller_stopping_state_enter,
1707 .exit_state = scic_sds_controller_stopping_state_exit,
1708 },
Edmund Nadolskie3013702011-06-02 00:10:43 +00001709 [SCIC_STOPPED] = {},
1710 [SCIC_FAILED] = {}
Dan Williamscc9203b2011-05-08 17:34:44 -07001711};
1712
1713static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic)
1714{
1715 /* these defaults are overridden by the platform / firmware */
1716 struct isci_host *ihost = scic_to_ihost(scic);
1717 u16 index;
1718
1719 /* Default to APC mode. */
1720 scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1721
1722 /* Default to APC mode. */
1723 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1;
1724
1725 /* Default to no SSC operation. */
1726 scic->oem_parameters.sds1.controller.do_enable_ssc = false;
1727
1728 /* Initialize all of the port parameter information to narrow ports. */
1729 for (index = 0; index < SCI_MAX_PORTS; index++) {
1730 scic->oem_parameters.sds1.ports[index].phy_mask = 0;
1731 }
1732
1733 /* Initialize all of the phy parameter information. */
1734 for (index = 0; index < SCI_MAX_PHYS; index++) {
1735 /* Default to 6G (i.e. Gen 3) for now. */
1736 scic->user_parameters.sds1.phys[index].max_speed_generation = 3;
1737
1738 /* the frequencies cannot be 0 */
1739 scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f;
1740 scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff;
1741 scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1742
1743 /*
1744 * Previous Vitesse based expanders had a arbitration issue that
1745 * is worked around by having the upper 32-bits of SAS address
1746 * with a value greater then the Vitesse company identifier.
1747 * Hence, usage of 0x5FCFFFFF. */
1748 scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id;
1749 scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF;
1750 }
1751
1752 scic->user_parameters.sds1.stp_inactivity_timeout = 5;
1753 scic->user_parameters.sds1.ssp_inactivity_timeout = 5;
1754 scic->user_parameters.sds1.stp_max_occupancy_timeout = 5;
1755 scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20;
1756 scic->user_parameters.sds1.no_outbound_task_timeout = 20;
1757}
1758
Edmund Nadolski6cb58532011-05-19 11:59:56 +00001759static void controller_timeout(unsigned long data)
1760{
1761 struct sci_timer *tmr = (struct sci_timer *)data;
1762 struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), timer);
1763 struct isci_host *ihost = scic_to_ihost(scic);
Edmund Nadolskie3013702011-06-02 00:10:43 +00001764 struct sci_base_state_machine *sm = &scic->sm;
Edmund Nadolski6cb58532011-05-19 11:59:56 +00001765 unsigned long flags;
Dan Williamscc9203b2011-05-08 17:34:44 -07001766
Edmund Nadolski6cb58532011-05-19 11:59:56 +00001767 spin_lock_irqsave(&ihost->scic_lock, flags);
1768
1769 if (tmr->cancel)
1770 goto done;
1771
Edmund Nadolskie3013702011-06-02 00:10:43 +00001772 if (sm->current_state_id == SCIC_STARTING)
Edmund Nadolski6cb58532011-05-19 11:59:56 +00001773 scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT);
Edmund Nadolskie3013702011-06-02 00:10:43 +00001774 else if (sm->current_state_id == SCIC_STOPPING) {
1775 sci_change_state(sm, SCIC_FAILED);
Edmund Nadolski6cb58532011-05-19 11:59:56 +00001776 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
1777 } else /* / @todo Now what do we want to do in this case? */
1778 dev_err(scic_to_dev(scic),
1779 "%s: Controller timer fired when controller was not "
1780 "in a state being timed.\n",
1781 __func__);
1782
1783done:
1784 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1785}
Dan Williamscc9203b2011-05-08 17:34:44 -07001786
1787/**
1788 * scic_controller_construct() - This method will attempt to construct a
1789 * controller object utilizing the supplied parameter information.
1790 * @c: This parameter specifies the controller to be constructed.
1791 * @scu_base: mapped base address of the scu registers
1792 * @smu_base: mapped base address of the smu registers
1793 *
1794 * Indicate if the controller was successfully constructed or if it failed in
1795 * some way. SCI_SUCCESS This value is returned if the controller was
1796 * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
1797 * if the interrupt coalescence timer may cause SAS compliance issues for SMP
1798 * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
1799 * This value is returned if the controller does not support the supplied type.
1800 * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
1801 * controller does not support the supplied initialization data version.
1802 */
1803static enum sci_status scic_controller_construct(struct scic_sds_controller *scic,
1804 void __iomem *scu_base,
1805 void __iomem *smu_base)
1806{
1807 struct isci_host *ihost = scic_to_ihost(scic);
1808 u8 i;
1809
Edmund Nadolskie3013702011-06-02 00:10:43 +00001810 sci_base_state_machine_construct(&scic->sm,
Dan Williams9269e0e2011-05-12 07:42:17 -07001811 scic_sds_controller_state_table,
Edmund Nadolskie3013702011-06-02 00:10:43 +00001812 SCIC_INITIAL);
Dan Williamscc9203b2011-05-08 17:34:44 -07001813
Edmund Nadolskie3013702011-06-02 00:10:43 +00001814 sci_base_state_machine_start(&scic->sm);
Dan Williamscc9203b2011-05-08 17:34:44 -07001815
1816 scic->scu_registers = scu_base;
1817 scic->smu_registers = smu_base;
1818
1819 scic_sds_port_configuration_agent_construct(&scic->port_agent);
1820
1821 /* Construct the ports for this controller */
1822 for (i = 0; i < SCI_MAX_PORTS; i++)
1823 scic_sds_port_construct(&ihost->ports[i].sci, i, scic);
1824 scic_sds_port_construct(&ihost->ports[i].sci, SCIC_SDS_DUMMY_PORT, scic);
1825
1826 /* Construct the phys for this controller */
1827 for (i = 0; i < SCI_MAX_PHYS; i++) {
1828 /* Add all the PHYs to the dummy port */
1829 scic_sds_phy_construct(&ihost->phys[i].sci,
1830 &ihost->ports[SCI_MAX_PORTS].sci, i);
1831 }
1832
1833 scic->invalid_phy_mask = 0;
1834
Edmund Nadolski6cb58532011-05-19 11:59:56 +00001835 sci_init_timer(&scic->timer, controller_timeout);
1836
Dan Williamscc9203b2011-05-08 17:34:44 -07001837 /* Set the default maximum values */
1838 scic->completion_event_entries = SCU_EVENT_COUNT;
1839 scic->completion_queue_entries = SCU_COMPLETION_QUEUE_COUNT;
1840 scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
1841 scic->logical_port_entries = SCI_MAX_PORTS;
1842 scic->task_context_entries = SCU_IO_REQUEST_COUNT;
1843 scic->uf_control.buffers.count = SCU_UNSOLICITED_FRAME_COUNT;
1844 scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT;
1845
1846 /* Initialize the User and OEM parameters to default values. */
1847 scic_sds_controller_set_default_config_parameters(scic);
1848
1849 return scic_controller_reset(scic);
1850}
1851
1852int scic_oem_parameters_validate(struct scic_sds_oem_params *oem)
1853{
1854 int i;
1855
1856 for (i = 0; i < SCI_MAX_PORTS; i++)
1857 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1858 return -EINVAL;
1859
1860 for (i = 0; i < SCI_MAX_PHYS; i++)
1861 if (oem->phys[i].sas_address.high == 0 &&
1862 oem->phys[i].sas_address.low == 0)
1863 return -EINVAL;
1864
1865 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1866 for (i = 0; i < SCI_MAX_PHYS; i++)
1867 if (oem->ports[i].phy_mask != 0)
1868 return -EINVAL;
1869 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1870 u8 phy_mask = 0;
1871
1872 for (i = 0; i < SCI_MAX_PHYS; i++)
1873 phy_mask |= oem->ports[i].phy_mask;
1874
1875 if (phy_mask == 0)
1876 return -EINVAL;
1877 } else
1878 return -EINVAL;
1879
1880 if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
1881 return -EINVAL;
1882
1883 return 0;
1884}
1885
1886static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic,
1887 union scic_oem_parameters *scic_parms)
1888{
Edmund Nadolskie3013702011-06-02 00:10:43 +00001889 u32 state = scic->sm.current_state_id;
Dan Williamscc9203b2011-05-08 17:34:44 -07001890
Edmund Nadolskie3013702011-06-02 00:10:43 +00001891 if (state == SCIC_RESET ||
1892 state == SCIC_INITIALIZING ||
1893 state == SCIC_INITIALIZED) {
Dan Williamscc9203b2011-05-08 17:34:44 -07001894
1895 if (scic_oem_parameters_validate(&scic_parms->sds1))
1896 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1897 scic->oem_parameters.sds1 = scic_parms->sds1;
1898
1899 return SCI_SUCCESS;
1900 }
1901
1902 return SCI_FAILURE_INVALID_STATE;
1903}
1904
1905void scic_oem_parameters_get(
1906 struct scic_sds_controller *scic,
1907 union scic_oem_parameters *scic_parms)
1908{
1909 memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms));
1910}
1911
Edmund Nadolski04736612011-05-19 20:17:47 -07001912static void power_control_timeout(unsigned long data)
Dan Williamscc9203b2011-05-08 17:34:44 -07001913{
Edmund Nadolski04736612011-05-19 20:17:47 -07001914 struct sci_timer *tmr = (struct sci_timer *)data;
1915 struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), power_control.timer);
1916 struct isci_host *ihost = scic_to_ihost(scic);
1917 struct scic_sds_phy *sci_phy;
1918 unsigned long flags;
1919 u8 i;
Dan Williamscc9203b2011-05-08 17:34:44 -07001920
Edmund Nadolski04736612011-05-19 20:17:47 -07001921 spin_lock_irqsave(&ihost->scic_lock, flags);
Dan Williamscc9203b2011-05-08 17:34:44 -07001922
Edmund Nadolski04736612011-05-19 20:17:47 -07001923 if (tmr->cancel)
1924 goto done;
Dan Williamscc9203b2011-05-08 17:34:44 -07001925
1926 scic->power_control.phys_granted_power = 0;
1927
1928 if (scic->power_control.phys_waiting == 0) {
1929 scic->power_control.timer_started = false;
Edmund Nadolski04736612011-05-19 20:17:47 -07001930 goto done;
Dan Williamscc9203b2011-05-08 17:34:44 -07001931 }
Edmund Nadolski04736612011-05-19 20:17:47 -07001932
1933 for (i = 0; i < SCI_MAX_PHYS; i++) {
1934
1935 if (scic->power_control.phys_waiting == 0)
1936 break;
1937
1938 sci_phy = scic->power_control.requesters[i];
1939 if (sci_phy == NULL)
1940 continue;
1941
1942 if (scic->power_control.phys_granted_power >=
1943 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up)
1944 break;
1945
1946 scic->power_control.requesters[i] = NULL;
1947 scic->power_control.phys_waiting--;
1948 scic->power_control.phys_granted_power++;
1949 scic_sds_phy_consume_power_handler(sci_phy);
1950 }
1951
1952 /*
1953 * It doesn't matter if the power list is empty, we need to start the
1954 * timer in case another phy becomes ready.
1955 */
1956 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1957 scic->power_control.timer_started = true;
1958
1959done:
1960 spin_unlock_irqrestore(&ihost->scic_lock, flags);
Dan Williamscc9203b2011-05-08 17:34:44 -07001961}
1962
1963/**
1964 * This method inserts the phy in the stagger spinup control queue.
1965 * @scic:
1966 *
1967 *
1968 */
1969void scic_sds_controller_power_control_queue_insert(
1970 struct scic_sds_controller *scic,
1971 struct scic_sds_phy *sci_phy)
1972{
1973 BUG_ON(sci_phy == NULL);
1974
1975 if (scic->power_control.phys_granted_power <
1976 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
1977 scic->power_control.phys_granted_power++;
1978 scic_sds_phy_consume_power_handler(sci_phy);
1979
1980 /*
1981 * stop and start the power_control timer. When the timer fires, the
1982 * no_of_phys_granted_power will be set to 0
1983 */
Edmund Nadolski04736612011-05-19 20:17:47 -07001984 if (scic->power_control.timer_started)
1985 sci_del_timer(&scic->power_control.timer);
1986
1987 sci_mod_timer(&scic->power_control.timer,
1988 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1989 scic->power_control.timer_started = true;
1990
Dan Williamscc9203b2011-05-08 17:34:44 -07001991 } else {
1992 /* Add the phy in the waiting list */
1993 scic->power_control.requesters[sci_phy->phy_index] = sci_phy;
1994 scic->power_control.phys_waiting++;
1995 }
1996}
1997
1998/**
1999 * This method removes the phy from the stagger spinup control queue.
2000 * @scic:
2001 *
2002 *
2003 */
2004void scic_sds_controller_power_control_queue_remove(
2005 struct scic_sds_controller *scic,
2006 struct scic_sds_phy *sci_phy)
2007{
2008 BUG_ON(sci_phy == NULL);
2009
2010 if (scic->power_control.requesters[sci_phy->phy_index] != NULL) {
2011 scic->power_control.phys_waiting--;
2012 }
2013
2014 scic->power_control.requesters[sci_phy->phy_index] = NULL;
2015}
2016
2017#define AFE_REGISTER_WRITE_DELAY 10
2018
2019/* Initialize the AFE for this phy index. We need to read the AFE setup from
2020 * the OEM parameters
2021 */
2022static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic)
2023{
2024 const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
2025 u32 afe_status;
2026 u32 phy_id;
2027
2028 /* Clear DFX Status registers */
2029 writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0);
2030 udelay(AFE_REGISTER_WRITE_DELAY);
2031
2032 if (is_b0()) {
2033 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
2034 * Timer, PM Stagger Timer */
2035 writel(0x0007BFFF, &scic->scu_registers->afe.afe_pmsn_master_control2);
2036 udelay(AFE_REGISTER_WRITE_DELAY);
2037 }
2038
2039 /* Configure bias currents to normal */
2040 if (is_a0())
2041 writel(0x00005500, &scic->scu_registers->afe.afe_bias_control);
2042 else if (is_a2())
2043 writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control);
2044 else if (is_b0())
2045 writel(0x00005F00, &scic->scu_registers->afe.afe_bias_control);
2046
2047 udelay(AFE_REGISTER_WRITE_DELAY);
2048
2049 /* Enable PLL */
2050 if (is_b0())
2051 writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0);
2052 else
2053 writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0);
2054
2055 udelay(AFE_REGISTER_WRITE_DELAY);
2056
2057 /* Wait for the PLL to lock */
2058 do {
2059 afe_status = readl(&scic->scu_registers->afe.afe_common_block_status);
2060 udelay(AFE_REGISTER_WRITE_DELAY);
2061 } while ((afe_status & 0x00001000) == 0);
2062
2063 if (is_a0() || is_a2()) {
2064 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
2065 writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0);
2066 udelay(AFE_REGISTER_WRITE_DELAY);
2067 }
2068
2069 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
2070 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
2071
2072 if (is_b0()) {
2073 /* Configure transmitter SSC parameters */
2074 writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
2075 udelay(AFE_REGISTER_WRITE_DELAY);
2076 } else {
2077 /*
2078 * All defaults, except the Receive Word Alignament/Comma Detect
2079 * Enable....(0xe800) */
2080 writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2081 udelay(AFE_REGISTER_WRITE_DELAY);
2082
2083 writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
2084 udelay(AFE_REGISTER_WRITE_DELAY);
2085 }
2086
2087 /*
2088 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2089 * & increase TX int & ext bias 20%....(0xe85c) */
2090 if (is_a0())
2091 writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2092 else if (is_a2())
2093 writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2094 else {
2095 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
2096 writel(0x000003d7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2097 udelay(AFE_REGISTER_WRITE_DELAY);
2098
2099 /*
2100 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2101 * & increase TX int & ext bias 20%....(0xe85c) */
2102 writel(0x000003d4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2103 }
2104 udelay(AFE_REGISTER_WRITE_DELAY);
2105
2106 if (is_a0() || is_a2()) {
2107 /* Enable TX equalization (0xe824) */
2108 writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2109 udelay(AFE_REGISTER_WRITE_DELAY);
2110 }
2111
2112 /*
2113 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
2114 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
2115 writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2116 udelay(AFE_REGISTER_WRITE_DELAY);
2117
2118 /* Leave DFE/FFE on */
2119 if (is_a0())
2120 writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2121 else if (is_a2())
2122 writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2123 else {
2124 writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2125 udelay(AFE_REGISTER_WRITE_DELAY);
2126 /* Enable TX equalization (0xe824) */
2127 writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2128 }
2129 udelay(AFE_REGISTER_WRITE_DELAY);
2130
2131 writel(oem_phy->afe_tx_amp_control0,
2132 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2133 udelay(AFE_REGISTER_WRITE_DELAY);
2134
2135 writel(oem_phy->afe_tx_amp_control1,
2136 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2137 udelay(AFE_REGISTER_WRITE_DELAY);
2138
2139 writel(oem_phy->afe_tx_amp_control2,
2140 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2141 udelay(AFE_REGISTER_WRITE_DELAY);
2142
2143 writel(oem_phy->afe_tx_amp_control3,
2144 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2145 udelay(AFE_REGISTER_WRITE_DELAY);
2146 }
2147
2148 /* Transfer control to the PEs */
2149 writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0);
2150 udelay(AFE_REGISTER_WRITE_DELAY);
2151}
2152
2153static enum sci_status scic_controller_set_mode(struct scic_sds_controller *scic,
2154 enum sci_controller_mode operating_mode)
2155{
2156 enum sci_status status = SCI_SUCCESS;
2157
Edmund Nadolskie3013702011-06-02 00:10:43 +00002158 if ((scic->sm.current_state_id == SCIC_INITIALIZING) ||
2159 (scic->sm.current_state_id == SCIC_INITIALIZED)) {
Dan Williamscc9203b2011-05-08 17:34:44 -07002160 switch (operating_mode) {
2161 case SCI_MODE_SPEED:
2162 scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
2163 scic->task_context_entries = SCU_IO_REQUEST_COUNT;
2164 scic->uf_control.buffers.count =
2165 SCU_UNSOLICITED_FRAME_COUNT;
2166 scic->completion_event_entries = SCU_EVENT_COUNT;
2167 scic->completion_queue_entries =
2168 SCU_COMPLETION_QUEUE_COUNT;
2169 break;
2170
2171 case SCI_MODE_SIZE:
2172 scic->remote_node_entries = SCI_MIN_REMOTE_DEVICES;
2173 scic->task_context_entries = SCI_MIN_IO_REQUESTS;
2174 scic->uf_control.buffers.count =
2175 SCU_MIN_UNSOLICITED_FRAMES;
2176 scic->completion_event_entries = SCU_MIN_EVENTS;
2177 scic->completion_queue_entries =
2178 SCU_MIN_COMPLETION_QUEUE_ENTRIES;
2179 break;
2180
2181 default:
2182 status = SCI_FAILURE_INVALID_PARAMETER_VALUE;
2183 break;
2184 }
2185 } else
2186 status = SCI_FAILURE_INVALID_STATE;
2187
2188 return status;
2189}
2190
2191static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic)
2192{
Edmund Nadolski04736612011-05-19 20:17:47 -07002193 sci_init_timer(&scic->power_control.timer, power_control_timeout);
Dan Williamscc9203b2011-05-08 17:34:44 -07002194
2195 memset(scic->power_control.requesters, 0,
2196 sizeof(scic->power_control.requesters));
2197
2198 scic->power_control.phys_waiting = 0;
2199 scic->power_control.phys_granted_power = 0;
2200}
2201
2202static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic)
2203{
Edmund Nadolskie3013702011-06-02 00:10:43 +00002204 struct sci_base_state_machine *sm = &scic->sm;
Dan Williamscc9203b2011-05-08 17:34:44 -07002205 enum sci_status result = SCI_SUCCESS;
2206 struct isci_host *ihost = scic_to_ihost(scic);
2207 u32 index, state;
2208
Edmund Nadolskie3013702011-06-02 00:10:43 +00002209 if (scic->sm.current_state_id != SCIC_RESET) {
Dan Williamscc9203b2011-05-08 17:34:44 -07002210 dev_warn(scic_to_dev(scic),
2211 "SCIC Controller initialize operation requested "
2212 "in invalid state\n");
2213 return SCI_FAILURE_INVALID_STATE;
2214 }
2215
Edmund Nadolskie3013702011-06-02 00:10:43 +00002216 sci_change_state(sm, SCIC_INITIALIZING);
Dan Williamscc9203b2011-05-08 17:34:44 -07002217
Edmund Nadolskibb3dbdf2011-05-19 20:26:02 -07002218 sci_init_timer(&scic->phy_timer, phy_startup_timeout);
2219
2220 scic->next_phy_to_start = 0;
2221 scic->phy_startup_timer_pending = false;
Dan Williamscc9203b2011-05-08 17:34:44 -07002222
2223 scic_sds_controller_initialize_power_control(scic);
2224
2225 /*
2226 * There is nothing to do here for B0 since we do not have to
2227 * program the AFE registers.
2228 * / @todo The AFE settings are supposed to be correct for the B0 but
2229 * / presently they seem to be wrong. */
2230 scic_sds_controller_afe_initialization(scic);
2231
2232 if (result == SCI_SUCCESS) {
2233 u32 status;
2234 u32 terminate_loop;
2235
2236 /* Take the hardware out of reset */
2237 writel(0, &scic->smu_registers->soft_reset_control);
2238
2239 /*
2240 * / @todo Provide meaningfull error code for hardware failure
2241 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2242 result = SCI_FAILURE;
2243 terminate_loop = 100;
2244
2245 while (terminate_loop-- && (result != SCI_SUCCESS)) {
2246 /* Loop until the hardware reports success */
2247 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2248 status = readl(&scic->smu_registers->control_status);
2249
2250 if ((status & SCU_RAM_INIT_COMPLETED) ==
2251 SCU_RAM_INIT_COMPLETED)
2252 result = SCI_SUCCESS;
2253 }
2254 }
2255
2256 if (result == SCI_SUCCESS) {
2257 u32 max_supported_ports;
2258 u32 max_supported_devices;
2259 u32 max_supported_io_requests;
2260 u32 device_context_capacity;
2261
2262 /*
2263 * Determine what are the actaul device capacities that the
2264 * hardware will support */
2265 device_context_capacity =
2266 readl(&scic->smu_registers->device_context_capacity);
2267
2268
2269 max_supported_ports = smu_dcc_get_max_ports(device_context_capacity);
2270 max_supported_devices = smu_dcc_get_max_remote_node_context(device_context_capacity);
2271 max_supported_io_requests = smu_dcc_get_max_task_context(device_context_capacity);
2272
2273 /*
2274 * Make all PEs that are unassigned match up with the
2275 * logical ports
2276 */
2277 for (index = 0; index < max_supported_ports; index++) {
2278 struct scu_port_task_scheduler_group_registers __iomem
2279 *ptsg = &scic->scu_registers->peg0.ptsg;
2280
2281 writel(index, &ptsg->protocol_engine[index]);
2282 }
2283
2284 /* Record the smaller of the two capacity values */
2285 scic->logical_port_entries =
2286 min(max_supported_ports, scic->logical_port_entries);
2287
2288 scic->task_context_entries =
2289 min(max_supported_io_requests,
2290 scic->task_context_entries);
2291
2292 scic->remote_node_entries =
2293 min(max_supported_devices, scic->remote_node_entries);
2294
2295 /*
2296 * Now that we have the correct hardware reported minimum values
2297 * build the MDL for the controller. Default to a performance
2298 * configuration.
2299 */
2300 scic_controller_set_mode(scic, SCI_MODE_SPEED);
2301 }
2302
2303 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2304 if (result == SCI_SUCCESS) {
2305 u32 dma_configuration;
2306
2307 /* Configure the payload DMA */
2308 dma_configuration =
2309 readl(&scic->scu_registers->sdma.pdma_configuration);
2310 dma_configuration |=
2311 SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2312 writel(dma_configuration,
2313 &scic->scu_registers->sdma.pdma_configuration);
2314
2315 /* Configure the control DMA */
2316 dma_configuration =
2317 readl(&scic->scu_registers->sdma.cdma_configuration);
2318 dma_configuration |=
2319 SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2320 writel(dma_configuration,
2321 &scic->scu_registers->sdma.cdma_configuration);
2322 }
2323
2324 /*
2325 * Initialize the PHYs before the PORTs because the PHY registers
2326 * are accessed during the port initialization.
2327 */
2328 if (result == SCI_SUCCESS) {
2329 /* Initialize the phys */
2330 for (index = 0;
2331 (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS);
2332 index++) {
2333 result = scic_sds_phy_initialize(
2334 &ihost->phys[index].sci,
2335 &scic->scu_registers->peg0.pe[index].tl,
2336 &scic->scu_registers->peg0.pe[index].ll);
2337 }
2338 }
2339
2340 if (result == SCI_SUCCESS) {
2341 /* Initialize the logical ports */
2342 for (index = 0;
2343 (index < scic->logical_port_entries) &&
2344 (result == SCI_SUCCESS);
2345 index++) {
2346 result = scic_sds_port_initialize(
2347 &ihost->ports[index].sci,
2348 &scic->scu_registers->peg0.ptsg.port[index],
2349 &scic->scu_registers->peg0.ptsg.protocol_engine,
2350 &scic->scu_registers->peg0.viit[index]);
2351 }
2352 }
2353
2354 if (result == SCI_SUCCESS)
2355 result = scic_sds_port_configuration_agent_initialize(
2356 scic,
2357 &scic->port_agent);
2358
2359 /* Advance the controller state machine */
2360 if (result == SCI_SUCCESS)
Edmund Nadolskie3013702011-06-02 00:10:43 +00002361 state = SCIC_INITIALIZED;
Dan Williamscc9203b2011-05-08 17:34:44 -07002362 else
Edmund Nadolskie3013702011-06-02 00:10:43 +00002363 state = SCIC_FAILED;
2364 sci_change_state(sm, state);
Dan Williamscc9203b2011-05-08 17:34:44 -07002365
2366 return result;
2367}
2368
2369static enum sci_status scic_user_parameters_set(
2370 struct scic_sds_controller *scic,
2371 union scic_user_parameters *scic_parms)
2372{
Edmund Nadolskie3013702011-06-02 00:10:43 +00002373 u32 state = scic->sm.current_state_id;
Dan Williamscc9203b2011-05-08 17:34:44 -07002374
Edmund Nadolskie3013702011-06-02 00:10:43 +00002375 if (state == SCIC_RESET ||
2376 state == SCIC_INITIALIZING ||
2377 state == SCIC_INITIALIZED) {
Dan Williamscc9203b2011-05-08 17:34:44 -07002378 u16 index;
2379
2380 /*
2381 * Validate the user parameters. If they are not legal, then
2382 * return a failure.
2383 */
2384 for (index = 0; index < SCI_MAX_PHYS; index++) {
2385 struct sci_phy_user_params *user_phy;
2386
2387 user_phy = &scic_parms->sds1.phys[index];
2388
2389 if (!((user_phy->max_speed_generation <=
2390 SCIC_SDS_PARM_MAX_SPEED) &&
2391 (user_phy->max_speed_generation >
2392 SCIC_SDS_PARM_NO_SPEED)))
2393 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2394
2395 if (user_phy->in_connection_align_insertion_frequency <
2396 3)
2397 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2398
2399 if ((user_phy->in_connection_align_insertion_frequency <
2400 3) ||
2401 (user_phy->align_insertion_frequency == 0) ||
2402 (user_phy->
2403 notify_enable_spin_up_insertion_frequency ==
2404 0))
2405 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2406 }
2407
2408 if ((scic_parms->sds1.stp_inactivity_timeout == 0) ||
2409 (scic_parms->sds1.ssp_inactivity_timeout == 0) ||
2410 (scic_parms->sds1.stp_max_occupancy_timeout == 0) ||
2411 (scic_parms->sds1.ssp_max_occupancy_timeout == 0) ||
2412 (scic_parms->sds1.no_outbound_task_timeout == 0))
2413 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2414
2415 memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms));
2416
2417 return SCI_SUCCESS;
2418 }
2419
2420 return SCI_FAILURE_INVALID_STATE;
2421}
2422
2423static int scic_controller_mem_init(struct scic_sds_controller *scic)
2424{
2425 struct device *dev = scic_to_dev(scic);
2426 dma_addr_t dma_handle;
2427 enum sci_status result;
2428
2429 scic->completion_queue = dmam_alloc_coherent(dev,
2430 scic->completion_queue_entries * sizeof(u32),
2431 &dma_handle, GFP_KERNEL);
2432 if (!scic->completion_queue)
2433 return -ENOMEM;
2434
2435 writel(lower_32_bits(dma_handle),
2436 &scic->smu_registers->completion_queue_lower);
2437 writel(upper_32_bits(dma_handle),
2438 &scic->smu_registers->completion_queue_upper);
2439
2440 scic->remote_node_context_table = dmam_alloc_coherent(dev,
2441 scic->remote_node_entries *
2442 sizeof(union scu_remote_node_context),
2443 &dma_handle, GFP_KERNEL);
2444 if (!scic->remote_node_context_table)
2445 return -ENOMEM;
2446
2447 writel(lower_32_bits(dma_handle),
2448 &scic->smu_registers->remote_node_context_lower);
2449 writel(upper_32_bits(dma_handle),
2450 &scic->smu_registers->remote_node_context_upper);
2451
2452 scic->task_context_table = dmam_alloc_coherent(dev,
2453 scic->task_context_entries *
2454 sizeof(struct scu_task_context),
2455 &dma_handle, GFP_KERNEL);
2456 if (!scic->task_context_table)
2457 return -ENOMEM;
2458
2459 writel(lower_32_bits(dma_handle),
2460 &scic->smu_registers->host_task_table_lower);
2461 writel(upper_32_bits(dma_handle),
2462 &scic->smu_registers->host_task_table_upper);
2463
2464 result = scic_sds_unsolicited_frame_control_construct(scic);
2465 if (result)
2466 return result;
2467
2468 /*
2469 * Inform the silicon as to the location of the UF headers and
2470 * address table.
2471 */
2472 writel(lower_32_bits(scic->uf_control.headers.physical_address),
2473 &scic->scu_registers->sdma.uf_header_base_address_lower);
2474 writel(upper_32_bits(scic->uf_control.headers.physical_address),
2475 &scic->scu_registers->sdma.uf_header_base_address_upper);
2476
2477 writel(lower_32_bits(scic->uf_control.address_table.physical_address),
2478 &scic->scu_registers->sdma.uf_address_table_lower);
2479 writel(upper_32_bits(scic->uf_control.address_table.physical_address),
2480 &scic->scu_registers->sdma.uf_address_table_upper);
2481
2482 return 0;
2483}
2484
Dan Williams6f231dd2011-07-02 22:56:22 -07002485int isci_host_init(struct isci_host *isci_host)
2486{
Dan Williamsd9c37392011-03-03 17:59:32 -08002487 int err = 0, i;
Dan Williams6f231dd2011-07-02 22:56:22 -07002488 enum sci_status status;
Dan Williams4711ba12011-03-11 10:43:57 -08002489 union scic_oem_parameters oem;
Dan Williams6f231dd2011-07-02 22:56:22 -07002490 union scic_user_parameters scic_user_params;
Dan Williamsd044af12011-03-08 09:52:49 -08002491 struct isci_pci_info *pci_info = to_pci_info(isci_host->pdev);
Dan Williams6f231dd2011-07-02 22:56:22 -07002492
Dan Williams6f231dd2011-07-02 22:56:22 -07002493 spin_lock_init(&isci_host->state_lock);
2494 spin_lock_init(&isci_host->scic_lock);
2495 spin_lock_init(&isci_host->queue_lock);
Dan Williams0cf89d12011-02-18 09:25:07 -08002496 init_waitqueue_head(&isci_host->eventq);
Dan Williams6f231dd2011-07-02 22:56:22 -07002497
2498 isci_host_change_state(isci_host, isci_starting);
2499 isci_host->can_queue = ISCI_CAN_QUEUE_VAL;
2500
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002501 status = scic_controller_construct(&isci_host->sci, scu_base(isci_host),
Dan Williams6f231dd2011-07-02 22:56:22 -07002502 smu_base(isci_host));
2503
2504 if (status != SCI_SUCCESS) {
2505 dev_err(&isci_host->pdev->dev,
2506 "%s: scic_controller_construct failed - status = %x\n",
2507 __func__,
2508 status);
Dave Jiang858d4aa2011-02-22 01:27:03 -08002509 return -ENODEV;
Dan Williams6f231dd2011-07-02 22:56:22 -07002510 }
2511
2512 isci_host->sas_ha.dev = &isci_host->pdev->dev;
2513 isci_host->sas_ha.lldd_ha = isci_host;
2514
Dan Williamsd044af12011-03-08 09:52:49 -08002515 /*
2516 * grab initial values stored in the controller object for OEM and USER
2517 * parameters
2518 */
Dave Jiangb5f18a22011-03-16 14:57:23 -07002519 isci_user_parameters_get(isci_host, &scic_user_params);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002520 status = scic_user_parameters_set(&isci_host->sci,
Dan Williamsd044af12011-03-08 09:52:49 -08002521 &scic_user_params);
2522 if (status != SCI_SUCCESS) {
2523 dev_warn(&isci_host->pdev->dev,
2524 "%s: scic_user_parameters_set failed\n",
2525 __func__);
2526 return -ENODEV;
2527 }
Dan Williams6f231dd2011-07-02 22:56:22 -07002528
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002529 scic_oem_parameters_get(&isci_host->sci, &oem);
Dan Williamsd044af12011-03-08 09:52:49 -08002530
2531 /* grab any OEM parameters specified in orom */
2532 if (pci_info->orom) {
Dan Williams4711ba12011-03-11 10:43:57 -08002533 status = isci_parse_oem_parameters(&oem,
Dan Williamsd044af12011-03-08 09:52:49 -08002534 pci_info->orom,
2535 isci_host->id);
Dan Williams6f231dd2011-07-02 22:56:22 -07002536 if (status != SCI_SUCCESS) {
2537 dev_warn(&isci_host->pdev->dev,
2538 "parsing firmware oem parameters failed\n");
Dave Jiang858d4aa2011-02-22 01:27:03 -08002539 return -EINVAL;
Dan Williams6f231dd2011-07-02 22:56:22 -07002540 }
Dan Williams4711ba12011-03-11 10:43:57 -08002541 }
2542
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002543 status = scic_oem_parameters_set(&isci_host->sci, &oem);
Dan Williams4711ba12011-03-11 10:43:57 -08002544 if (status != SCI_SUCCESS) {
2545 dev_warn(&isci_host->pdev->dev,
2546 "%s: scic_oem_parameters_set failed\n",
2547 __func__);
2548 return -ENODEV;
Dan Williams6f231dd2011-07-02 22:56:22 -07002549 }
2550
Dan Williams6f231dd2011-07-02 22:56:22 -07002551 tasklet_init(&isci_host->completion_tasklet,
Dan Williamsc7ef4032011-02-18 09:25:05 -08002552 isci_host_completion_routine, (unsigned long)isci_host);
Dan Williams6f231dd2011-07-02 22:56:22 -07002553
Dan Williams6f231dd2011-07-02 22:56:22 -07002554 INIT_LIST_HEAD(&isci_host->requests_to_complete);
Jeff Skirvin11b00c12011-03-04 14:06:40 -08002555 INIT_LIST_HEAD(&isci_host->requests_to_errorback);
Dan Williams6f231dd2011-07-02 22:56:22 -07002556
Dan Williams7c40a802011-03-02 11:49:26 -08002557 spin_lock_irq(&isci_host->scic_lock);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002558 status = scic_controller_initialize(&isci_host->sci);
Dan Williams7c40a802011-03-02 11:49:26 -08002559 spin_unlock_irq(&isci_host->scic_lock);
2560 if (status != SCI_SUCCESS) {
2561 dev_warn(&isci_host->pdev->dev,
2562 "%s: scic_controller_initialize failed -"
2563 " status = 0x%x\n",
2564 __func__, status);
2565 return -ENODEV;
2566 }
2567
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002568 err = scic_controller_mem_init(&isci_host->sci);
Dan Williams6f231dd2011-07-02 22:56:22 -07002569 if (err)
Dave Jiang858d4aa2011-02-22 01:27:03 -08002570 return err;
Dan Williams6f231dd2011-07-02 22:56:22 -07002571
Dan Williams6f231dd2011-07-02 22:56:22 -07002572 isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev,
Dan Williams67ea8382011-05-08 11:47:15 -07002573 sizeof(struct isci_request),
Dan Williams6f231dd2011-07-02 22:56:22 -07002574 SLAB_HWCACHE_ALIGN, 0);
2575
Dave Jiang858d4aa2011-02-22 01:27:03 -08002576 if (!isci_host->dma_pool)
2577 return -ENOMEM;
Dan Williams6f231dd2011-07-02 22:56:22 -07002578
Dan Williamsd9c37392011-03-03 17:59:32 -08002579 for (i = 0; i < SCI_MAX_PORTS; i++)
Dan Williamse5313812011-05-07 10:11:43 -07002580 isci_port_init(&isci_host->ports[i], isci_host, i);
Dan Williams6f231dd2011-07-02 22:56:22 -07002581
Dan Williamsd9c37392011-03-03 17:59:32 -08002582 for (i = 0; i < SCI_MAX_PHYS; i++)
2583 isci_phy_init(&isci_host->phys[i], isci_host, i);
2584
2585 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
Dan Williams57f20f42011-04-21 18:14:45 -07002586 struct isci_remote_device *idev = &isci_host->devices[i];
Dan Williamsd9c37392011-03-03 17:59:32 -08002587
2588 INIT_LIST_HEAD(&idev->reqs_in_process);
2589 INIT_LIST_HEAD(&idev->node);
2590 spin_lock_init(&idev->state_lock);
2591 }
Dan Williams6f231dd2011-07-02 22:56:22 -07002592
Dave Jiang858d4aa2011-02-22 01:27:03 -08002593 return 0;
Dan Williams6f231dd2011-07-02 22:56:22 -07002594}
Dan Williamscc9203b2011-05-08 17:34:44 -07002595
2596void scic_sds_controller_link_up(struct scic_sds_controller *scic,
2597 struct scic_sds_port *port, struct scic_sds_phy *phy)
2598{
Edmund Nadolskie3013702011-06-02 00:10:43 +00002599 switch (scic->sm.current_state_id) {
2600 case SCIC_STARTING:
Edmund Nadolskibb3dbdf2011-05-19 20:26:02 -07002601 sci_del_timer(&scic->phy_timer);
2602 scic->phy_startup_timer_pending = false;
Dan Williamscc9203b2011-05-08 17:34:44 -07002603 scic->port_agent.link_up_handler(scic, &scic->port_agent,
2604 port, phy);
2605 scic_sds_controller_start_next_phy(scic);
2606 break;
Edmund Nadolskie3013702011-06-02 00:10:43 +00002607 case SCIC_READY:
Dan Williamscc9203b2011-05-08 17:34:44 -07002608 scic->port_agent.link_up_handler(scic, &scic->port_agent,
2609 port, phy);
2610 break;
2611 default:
2612 dev_dbg(scic_to_dev(scic),
2613 "%s: SCIC Controller linkup event from phy %d in "
2614 "unexpected state %d\n", __func__, phy->phy_index,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002615 scic->sm.current_state_id);
Dan Williamscc9203b2011-05-08 17:34:44 -07002616 }
2617}
2618
2619void scic_sds_controller_link_down(struct scic_sds_controller *scic,
2620 struct scic_sds_port *port, struct scic_sds_phy *phy)
2621{
Edmund Nadolskie3013702011-06-02 00:10:43 +00002622 switch (scic->sm.current_state_id) {
2623 case SCIC_STARTING:
2624 case SCIC_READY:
Dan Williamscc9203b2011-05-08 17:34:44 -07002625 scic->port_agent.link_down_handler(scic, &scic->port_agent,
2626 port, phy);
2627 break;
2628 default:
2629 dev_dbg(scic_to_dev(scic),
2630 "%s: SCIC Controller linkdown event from phy %d in "
2631 "unexpected state %d\n",
2632 __func__,
2633 phy->phy_index,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002634 scic->sm.current_state_id);
Dan Williamscc9203b2011-05-08 17:34:44 -07002635 }
2636}
2637
2638/**
2639 * This is a helper method to determine if any remote devices on this
2640 * controller are still in the stopping state.
2641 *
2642 */
2643static bool scic_sds_controller_has_remote_devices_stopping(
2644 struct scic_sds_controller *controller)
2645{
2646 u32 index;
2647
2648 for (index = 0; index < controller->remote_node_entries; index++) {
2649 if ((controller->device_table[index] != NULL) &&
Edmund Nadolskie3013702011-06-02 00:10:43 +00002650 (controller->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
Dan Williamscc9203b2011-05-08 17:34:44 -07002651 return true;
2652 }
2653
2654 return false;
2655}
2656
2657/**
2658 * This method is called by the remote device to inform the controller
2659 * object that the remote device has stopped.
2660 */
2661void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic,
2662 struct scic_sds_remote_device *sci_dev)
2663{
Edmund Nadolskie3013702011-06-02 00:10:43 +00002664 if (scic->sm.current_state_id != SCIC_STOPPING) {
Dan Williamscc9203b2011-05-08 17:34:44 -07002665 dev_dbg(scic_to_dev(scic),
2666 "SCIC Controller 0x%p remote device stopped event "
2667 "from device 0x%p in unexpected state %d\n",
2668 scic, sci_dev,
Edmund Nadolskie3013702011-06-02 00:10:43 +00002669 scic->sm.current_state_id);
Dan Williamscc9203b2011-05-08 17:34:44 -07002670 return;
2671 }
2672
2673 if (!scic_sds_controller_has_remote_devices_stopping(scic)) {
Edmund Nadolskie3013702011-06-02 00:10:43 +00002674 sci_change_state(&scic->sm, SCIC_STOPPED);
Dan Williamscc9203b2011-05-08 17:34:44 -07002675 }
2676}
2677
2678/**
2679 * This method will write to the SCU PCP register the request value. The method
2680 * is used to suspend/resume ports, devices, and phys.
2681 * @scic:
2682 *
2683 *
2684 */
2685void scic_sds_controller_post_request(
2686 struct scic_sds_controller *scic,
2687 u32 request)
2688{
2689 dev_dbg(scic_to_dev(scic),
2690 "%s: SCIC Controller 0x%p post request 0x%08x\n",
2691 __func__,
2692 scic,
2693 request);
2694
2695 writel(request, &scic->smu_registers->post_context_port);
2696}
2697
2698/**
2699 * This method will copy the soft copy of the task context into the physical
2700 * memory accessible by the controller.
2701 * @scic: This parameter specifies the controller for which to copy
2702 * the task context.
2703 * @sci_req: This parameter specifies the request for which the task
2704 * context is being copied.
2705 *
2706 * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
2707 * the physical memory version of the task context. Thus, all subsequent
2708 * updates to the task context are performed in the TC table (i.e. DMAable
2709 * memory). none
2710 */
2711void scic_sds_controller_copy_task_context(
2712 struct scic_sds_controller *scic,
2713 struct scic_sds_request *sci_req)
2714{
2715 struct scu_task_context *task_context_buffer;
2716
2717 task_context_buffer = scic_sds_controller_get_task_context_buffer(
2718 scic, sci_req->io_tag);
2719
2720 memcpy(task_context_buffer,
2721 sci_req->task_context_buffer,
2722 offsetof(struct scu_task_context, sgl_snapshot_ac));
2723
2724 /*
2725 * Now that the soft copy of the TC has been copied into the TC
2726 * table accessible by the silicon. Thus, any further changes to
2727 * the TC (e.g. TC termination) occur in the appropriate location. */
2728 sci_req->task_context_buffer = task_context_buffer;
2729}
2730
2731/**
2732 * This method returns the task context buffer for the given io tag.
2733 * @scic:
2734 * @io_tag:
2735 *
2736 * struct scu_task_context*
2737 */
2738struct scu_task_context *scic_sds_controller_get_task_context_buffer(
2739 struct scic_sds_controller *scic,
2740 u16 io_tag
2741 ) {
2742 u16 task_index = scic_sds_io_tag_get_index(io_tag);
2743
2744 if (task_index < scic->task_context_entries) {
2745 return &scic->task_context_table[task_index];
2746 }
2747
2748 return NULL;
2749}
2750
2751struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
2752 u16 io_tag)
2753{
2754 u16 task_index;
2755 u16 task_sequence;
2756
2757 task_index = scic_sds_io_tag_get_index(io_tag);
2758
2759 if (task_index < scic->task_context_entries) {
2760 if (scic->io_request_table[task_index] != NULL) {
2761 task_sequence = scic_sds_io_tag_get_sequence(io_tag);
2762
2763 if (task_sequence == scic->io_request_sequence[task_index]) {
2764 return scic->io_request_table[task_index];
2765 }
2766 }
2767 }
2768
2769 return NULL;
2770}
2771
2772/**
2773 * This method allocates remote node index and the reserves the remote node
2774 * context space for use. This method can fail if there are no more remote
2775 * node index available.
2776 * @scic: This is the controller object which contains the set of
2777 * free remote node ids
2778 * @sci_dev: This is the device object which is requesting the a remote node
2779 * id
2780 * @node_id: This is the remote node id that is assinged to the device if one
2781 * is available
2782 *
2783 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2784 * node index available.
2785 */
2786enum sci_status scic_sds_controller_allocate_remote_node_context(
2787 struct scic_sds_controller *scic,
2788 struct scic_sds_remote_device *sci_dev,
2789 u16 *node_id)
2790{
2791 u16 node_index;
2792 u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
2793
2794 node_index = scic_sds_remote_node_table_allocate_remote_node(
2795 &scic->available_remote_nodes, remote_node_count
2796 );
2797
2798 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2799 scic->device_table[node_index] = sci_dev;
2800
2801 *node_id = node_index;
2802
2803 return SCI_SUCCESS;
2804 }
2805
2806 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2807}
2808
2809/**
2810 * This method frees the remote node index back to the available pool. Once
2811 * this is done the remote node context buffer is no longer valid and can
2812 * not be used.
2813 * @scic:
2814 * @sci_dev:
2815 * @node_id:
2816 *
2817 */
2818void scic_sds_controller_free_remote_node_context(
2819 struct scic_sds_controller *scic,
2820 struct scic_sds_remote_device *sci_dev,
2821 u16 node_id)
2822{
2823 u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
2824
2825 if (scic->device_table[node_id] == sci_dev) {
2826 scic->device_table[node_id] = NULL;
2827
2828 scic_sds_remote_node_table_release_remote_node_index(
2829 &scic->available_remote_nodes, remote_node_count, node_id
2830 );
2831 }
2832}
2833
2834/**
2835 * This method returns the union scu_remote_node_context for the specified remote
2836 * node id.
2837 * @scic:
2838 * @node_id:
2839 *
2840 * union scu_remote_node_context*
2841 */
2842union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
2843 struct scic_sds_controller *scic,
2844 u16 node_id
2845 ) {
2846 if (
2847 (node_id < scic->remote_node_entries)
2848 && (scic->device_table[node_id] != NULL)
2849 ) {
2850 return &scic->remote_node_context_table[node_id];
2851 }
2852
2853 return NULL;
2854}
2855
2856/**
2857 *
2858 * @resposne_buffer: This is the buffer into which the D2H register FIS will be
2859 * constructed.
2860 * @frame_header: This is the frame header returned by the hardware.
2861 * @frame_buffer: This is the frame buffer returned by the hardware.
2862 *
2863 * This method will combind the frame header and frame buffer to create a SATA
2864 * D2H register FIS none
2865 */
2866void scic_sds_controller_copy_sata_response(
2867 void *response_buffer,
2868 void *frame_header,
2869 void *frame_buffer)
2870{
2871 memcpy(response_buffer, frame_header, sizeof(u32));
2872
2873 memcpy(response_buffer + sizeof(u32),
2874 frame_buffer,
2875 sizeof(struct dev_to_host_fis) - sizeof(u32));
2876}
2877
2878/**
2879 * This method releases the frame once this is done the frame is available for
2880 * re-use by the hardware. The data contained in the frame header and frame
2881 * buffer is no longer valid. The UF queue get pointer is only updated if UF
2882 * control indicates this is appropriate.
2883 * @scic:
2884 * @frame_index:
2885 *
2886 */
2887void scic_sds_controller_release_frame(
2888 struct scic_sds_controller *scic,
2889 u32 frame_index)
2890{
2891 if (scic_sds_unsolicited_frame_control_release_frame(
2892 &scic->uf_control, frame_index) == true)
2893 writel(scic->uf_control.get,
2894 &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
2895}
2896
2897/**
2898 * scic_controller_start_io() - This method is called by the SCI user to
2899 * send/start an IO request. If the method invocation is successful, then
2900 * the IO request has been queued to the hardware for processing.
2901 * @controller: the handle to the controller object for which to start an IO
2902 * request.
2903 * @remote_device: the handle to the remote device object for which to start an
2904 * IO request.
2905 * @io_request: the handle to the io request object to start.
2906 * @io_tag: This parameter specifies a previously allocated IO tag that the
2907 * user desires to be utilized for this request. This parameter is optional.
2908 * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value
2909 * for this parameter.
2910 *
2911 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
2912 * to ensure that each of the methods that may allocate or free available IO
2913 * tags are handled in a mutually exclusive manner. This method is one of said
2914 * methods requiring proper critical code section protection (e.g. semaphore,
2915 * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a
2916 * result, it is expected the user will have set the NCQ tag field in the host
2917 * to device register FIS prior to calling this method. There is also a
2918 * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking
2919 * the scic_controller_start_io() method. scic_controller_allocate_tag() for
2920 * more information on allocating a tag. Indicate if the controller
2921 * successfully started the IO request. SCI_SUCCESS if the IO request was
2922 * successfully started. Determine the failure situations and return values.
2923 */
2924enum sci_status scic_controller_start_io(
2925 struct scic_sds_controller *scic,
2926 struct scic_sds_remote_device *rdev,
2927 struct scic_sds_request *req,
2928 u16 io_tag)
2929{
2930 enum sci_status status;
2931
Edmund Nadolskie3013702011-06-02 00:10:43 +00002932 if (scic->sm.current_state_id != SCIC_READY) {
Dan Williamscc9203b2011-05-08 17:34:44 -07002933 dev_warn(scic_to_dev(scic), "invalid state to start I/O");
2934 return SCI_FAILURE_INVALID_STATE;
2935 }
2936
2937 status = scic_sds_remote_device_start_io(scic, rdev, req);
2938 if (status != SCI_SUCCESS)
2939 return status;
2940
2941 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
2942 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req));
2943 return SCI_SUCCESS;
2944}
2945
2946/**
2947 * scic_controller_terminate_request() - This method is called by the SCI Core
2948 * user to terminate an ongoing (i.e. started) core IO request. This does
2949 * not abort the IO request at the target, but rather removes the IO request
2950 * from the host controller.
2951 * @controller: the handle to the controller object for which to terminate a
2952 * request.
2953 * @remote_device: the handle to the remote device object for which to
2954 * terminate a request.
2955 * @request: the handle to the io or task management request object to
2956 * terminate.
2957 *
2958 * Indicate if the controller successfully began the terminate process for the
2959 * IO request. SCI_SUCCESS if the terminate process was successfully started
2960 * for the request. Determine the failure situations and return values.
2961 */
2962enum sci_status scic_controller_terminate_request(
2963 struct scic_sds_controller *scic,
2964 struct scic_sds_remote_device *rdev,
2965 struct scic_sds_request *req)
2966{
2967 enum sci_status status;
2968
Edmund Nadolskie3013702011-06-02 00:10:43 +00002969 if (scic->sm.current_state_id != SCIC_READY) {
Dan Williamscc9203b2011-05-08 17:34:44 -07002970 dev_warn(scic_to_dev(scic),
2971 "invalid state to terminate request\n");
2972 return SCI_FAILURE_INVALID_STATE;
2973 }
2974
2975 status = scic_sds_io_request_terminate(req);
2976 if (status != SCI_SUCCESS)
2977 return status;
2978
2979 /*
2980 * Utilize the original post context command and or in the POST_TC_ABORT
2981 * request sub-type.
2982 */
2983 scic_sds_controller_post_request(scic,
2984 scic_sds_request_get_post_context(req) |
2985 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2986 return SCI_SUCCESS;
2987}
2988
2989/**
2990 * scic_controller_complete_io() - This method will perform core specific
2991 * completion operations for an IO request. After this method is invoked,
2992 * the user should consider the IO request as invalid until it is properly
2993 * reused (i.e. re-constructed).
2994 * @controller: The handle to the controller object for which to complete the
2995 * IO request.
2996 * @remote_device: The handle to the remote device object for which to complete
2997 * the IO request.
2998 * @io_request: the handle to the io request object to complete.
2999 *
3000 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3001 * to ensure that each of the methods that may allocate or free available IO
3002 * tags are handled in a mutually exclusive manner. This method is one of said
3003 * methods requiring proper critical code section protection (e.g. semaphore,
3004 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
3005 * Core user, using the scic_controller_allocate_io_tag() method, then it is
3006 * the responsibility of the caller to invoke the scic_controller_free_io_tag()
3007 * method to free the tag (i.e. this method will not free the IO tag). Indicate
3008 * if the controller successfully completed the IO request. SCI_SUCCESS if the
3009 * completion process was successful.
3010 */
3011enum sci_status scic_controller_complete_io(
3012 struct scic_sds_controller *scic,
3013 struct scic_sds_remote_device *rdev,
3014 struct scic_sds_request *request)
3015{
3016 enum sci_status status;
3017 u16 index;
3018
Edmund Nadolskie3013702011-06-02 00:10:43 +00003019 switch (scic->sm.current_state_id) {
3020 case SCIC_STOPPING:
Dan Williamscc9203b2011-05-08 17:34:44 -07003021 /* XXX: Implement this function */
3022 return SCI_FAILURE;
Edmund Nadolskie3013702011-06-02 00:10:43 +00003023 case SCIC_READY:
Dan Williamscc9203b2011-05-08 17:34:44 -07003024 status = scic_sds_remote_device_complete_io(scic, rdev, request);
3025 if (status != SCI_SUCCESS)
3026 return status;
3027
3028 index = scic_sds_io_tag_get_index(request->io_tag);
3029 scic->io_request_table[index] = NULL;
3030 return SCI_SUCCESS;
3031 default:
3032 dev_warn(scic_to_dev(scic), "invalid state to complete I/O");
3033 return SCI_FAILURE_INVALID_STATE;
3034 }
3035
3036}
3037
3038enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req)
3039{
3040 struct scic_sds_controller *scic = sci_req->owning_controller;
3041
Edmund Nadolskie3013702011-06-02 00:10:43 +00003042 if (scic->sm.current_state_id != SCIC_READY) {
Dan Williamscc9203b2011-05-08 17:34:44 -07003043 dev_warn(scic_to_dev(scic), "invalid state to continue I/O");
3044 return SCI_FAILURE_INVALID_STATE;
3045 }
3046
3047 scic->io_request_table[scic_sds_io_tag_get_index(sci_req->io_tag)] = sci_req;
3048 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req));
3049 return SCI_SUCCESS;
3050}
3051
3052/**
3053 * scic_controller_start_task() - This method is called by the SCIC user to
3054 * send/start a framework task management request.
3055 * @controller: the handle to the controller object for which to start the task
3056 * management request.
3057 * @remote_device: the handle to the remote device object for which to start
3058 * the task management request.
3059 * @task_request: the handle to the task request object to start.
3060 * @io_tag: This parameter specifies a previously allocated IO tag that the
3061 * user desires to be utilized for this request. Note this not the io_tag
3062 * of the request being managed. It is to be utilized for the task request
3063 * itself. This parameter is optional. The user is allowed to supply
3064 * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter.
3065 *
3066 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3067 * to ensure that each of the methods that may allocate or free available IO
3068 * tags are handled in a mutually exclusive manner. This method is one of said
3069 * methods requiring proper critical code section protection (e.g. semaphore,
3070 * spin-lock, etc.). - The user must synchronize this task with completion
3071 * queue processing. If they are not synchronized then it is possible for the
3072 * io requests that are being managed by the task request can complete before
3073 * starting the task request. scic_controller_allocate_tag() for more
3074 * information on allocating a tag. Indicate if the controller successfully
3075 * started the IO request. SCI_TASK_SUCCESS if the task request was
3076 * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is
3077 * returned if there is/are task(s) outstanding that require termination or
3078 * completion before this request can succeed.
3079 */
3080enum sci_task_status scic_controller_start_task(
3081 struct scic_sds_controller *scic,
3082 struct scic_sds_remote_device *rdev,
3083 struct scic_sds_request *req,
3084 u16 task_tag)
3085{
3086 enum sci_status status;
3087
Edmund Nadolskie3013702011-06-02 00:10:43 +00003088 if (scic->sm.current_state_id != SCIC_READY) {
Dan Williamscc9203b2011-05-08 17:34:44 -07003089 dev_warn(scic_to_dev(scic),
3090 "%s: SCIC Controller starting task from invalid "
3091 "state\n",
3092 __func__);
3093 return SCI_TASK_FAILURE_INVALID_STATE;
3094 }
3095
3096 status = scic_sds_remote_device_start_task(scic, rdev, req);
3097 switch (status) {
3098 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
3099 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
3100
3101 /*
3102 * We will let framework know this task request started successfully,
3103 * although core is still woring on starting the request (to post tc when
3104 * RNC is resumed.)
3105 */
3106 return SCI_SUCCESS;
3107 case SCI_SUCCESS:
3108 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
3109
3110 scic_sds_controller_post_request(scic,
3111 scic_sds_request_get_post_context(req));
3112 break;
3113 default:
3114 break;
3115 }
3116
3117 return status;
3118}
3119
3120/**
3121 * scic_controller_allocate_io_tag() - This method will allocate a tag from the
3122 * pool of free IO tags. Direct allocation of IO tags by the SCI Core user
3123 * is optional. The scic_controller_start_io() method will allocate an IO
3124 * tag if this method is not utilized and the tag is not supplied to the IO
3125 * construct routine. Direct allocation of IO tags may provide additional
3126 * performance improvements in environments capable of supporting this usage
3127 * model. Additionally, direct allocation of IO tags also provides
3128 * additional flexibility to the SCI Core user. Specifically, the user may
3129 * retain IO tags across the lives of multiple IO requests.
3130 * @controller: the handle to the controller object for which to allocate the
3131 * tag.
3132 *
3133 * IO tags are a protected resource. It is incumbent upon the SCI Core user to
3134 * ensure that each of the methods that may allocate or free available IO tags
3135 * are handled in a mutually exclusive manner. This method is one of said
3136 * methods requiring proper critical code section protection (e.g. semaphore,
3137 * spin-lock, etc.). An unsigned integer representing an available IO tag.
3138 * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no
3139 * currently available tags to be allocated. All return other values indicate a
3140 * legitimate tag.
3141 */
3142u16 scic_controller_allocate_io_tag(
3143 struct scic_sds_controller *scic)
3144{
3145 u16 task_context;
3146 u16 sequence_count;
3147
3148 if (!sci_pool_empty(scic->tci_pool)) {
3149 sci_pool_get(scic->tci_pool, task_context);
3150
3151 sequence_count = scic->io_request_sequence[task_context];
3152
3153 return scic_sds_io_tag_construct(sequence_count, task_context);
3154 }
3155
3156 return SCI_CONTROLLER_INVALID_IO_TAG;
3157}
3158
3159/**
3160 * scic_controller_free_io_tag() - This method will free an IO tag to the pool
3161 * of free IO tags. This method provides the SCI Core user more flexibility
3162 * with regards to IO tags. The user may desire to keep an IO tag after an
3163 * IO request has completed, because they plan on re-using the tag for a
3164 * subsequent IO request. This method is only legal if the tag was
3165 * allocated via scic_controller_allocate_io_tag().
3166 * @controller: This parameter specifies the handle to the controller object
3167 * for which to free/return the tag.
3168 * @io_tag: This parameter represents the tag to be freed to the pool of
3169 * available tags.
3170 *
3171 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3172 * to ensure that each of the methods that may allocate or free available IO
3173 * tags are handled in a mutually exclusive manner. This method is one of said
3174 * methods requiring proper critical code section protection (e.g. semaphore,
3175 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
3176 * Core user, using the scic_controller_allocate_io_tag() method, then it is
3177 * the responsibility of the caller to invoke this method to free the tag. This
3178 * method returns an indication of whether the tag was successfully put back
3179 * (freed) to the pool of available tags. SCI_SUCCESS This return value
3180 * indicates the tag was successfully placed into the pool of available IO
3181 * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag
3182 * is not a valid IO tag value.
3183 */
3184enum sci_status scic_controller_free_io_tag(
3185 struct scic_sds_controller *scic,
3186 u16 io_tag)
3187{
3188 u16 sequence;
3189 u16 index;
3190
3191 BUG_ON(io_tag == SCI_CONTROLLER_INVALID_IO_TAG);
3192
3193 sequence = scic_sds_io_tag_get_sequence(io_tag);
3194 index = scic_sds_io_tag_get_index(io_tag);
3195
3196 if (!sci_pool_full(scic->tci_pool)) {
3197 if (sequence == scic->io_request_sequence[index]) {
3198 scic_sds_io_sequence_increment(
3199 scic->io_request_sequence[index]);
3200
3201 sci_pool_put(scic->tci_pool, index);
3202
3203 return SCI_SUCCESS;
3204 }
3205 }
3206
3207 return SCI_FAILURE_INVALID_IO_TAG;
3208}
3209
3210