blob: 7b497f2667b7253846da0f3964532c9643bea69c [file] [log] [blame]
Dan Williams6f231dd2011-07-02 22:56:22 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
Dan Williamscc9203b2011-05-08 17:34:44 -070055#include <linux/device.h>
56#include <scsi/sas.h>
57#include "host.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070058#include "isci.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070059#include "port.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070060#include "host.h"
Dan Williamsd044af12011-03-08 09:52:49 -080061#include "probe_roms.h"
Dan Williamscc9203b2011-05-08 17:34:44 -070062#include "remote_device.h"
63#include "request.h"
Dan Williamscc9203b2011-05-08 17:34:44 -070064#include "scu_completion_codes.h"
65#include "scu_event_codes.h"
Dan Williams63a3a152011-05-08 21:36:46 -070066#include "registers.h"
Dan Williamscc9203b2011-05-08 17:34:44 -070067#include "scu_remote_node_context.h"
68#include "scu_task_context.h"
69#include "scu_unsolicited_frame.h"
Dan Williamsce2b3262011-05-08 15:49:15 -070070#include "timers.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070071
Dan Williamscc9203b2011-05-08 17:34:44 -070072#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
73
74/**
75 * smu_dcc_get_max_ports() -
76 *
77 * This macro returns the maximum number of logical ports supported by the
78 * hardware. The caller passes in the value read from the device context
79 * capacity register and this macro will mash and shift the value appropriately.
80 */
81#define smu_dcc_get_max_ports(dcc_value) \
82 (\
83 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
84 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
85 )
86
87/**
88 * smu_dcc_get_max_task_context() -
89 *
90 * This macro returns the maximum number of task contexts supported by the
91 * hardware. The caller passes in the value read from the device context
92 * capacity register and this macro will mash and shift the value appropriately.
93 */
94#define smu_dcc_get_max_task_context(dcc_value) \
95 (\
96 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
97 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
98 )
99
100/**
101 * smu_dcc_get_max_remote_node_context() -
102 *
103 * This macro returns the maximum number of remote node contexts supported by
104 * the hardware. The caller passes in the value read from the device context
105 * capacity register and this macro will mash and shift the value appropriately.
106 */
107#define smu_dcc_get_max_remote_node_context(dcc_value) \
108 (\
109 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
110 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
111 )
112
113
114#define SCIC_SDS_CONTROLLER_MIN_TIMER_COUNT 3
115#define SCIC_SDS_CONTROLLER_MAX_TIMER_COUNT 3
116
117/**
118 *
119 *
120 * The number of milliseconds to wait for a phy to start.
121 */
122#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
123
124/**
125 *
126 *
127 * The number of milliseconds to wait while a given phy is consuming power
128 * before allowing another set of phys to consume power. Ultimately, this will
129 * be specified by OEM parameter.
130 */
131#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
132
133/**
134 * NORMALIZE_PUT_POINTER() -
135 *
136 * This macro will normalize the completion queue put pointer so its value can
137 * be used as an array inde
138 */
139#define NORMALIZE_PUT_POINTER(x) \
140 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
141
142
143/**
144 * NORMALIZE_EVENT_POINTER() -
145 *
146 * This macro will normalize the completion queue event entry so its value can
147 * be used as an index.
148 */
149#define NORMALIZE_EVENT_POINTER(x) \
150 (\
151 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
152 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
153 )
154
155/**
156 * INCREMENT_COMPLETION_QUEUE_GET() -
157 *
158 * This macro will increment the controllers completion queue index value and
159 * possibly toggle the cycle bit if the completion queue index wraps back to 0.
160 */
161#define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
162 INCREMENT_QUEUE_GET(\
163 (index), \
164 (cycle), \
165 (controller)->completion_queue_entries, \
166 SMU_CQGR_CYCLE_BIT \
167 )
168
169/**
170 * INCREMENT_EVENT_QUEUE_GET() -
171 *
172 * This macro will increment the controllers event queue index value and
173 * possibly toggle the event cycle bit if the event queue index wraps back to 0.
174 */
175#define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
176 INCREMENT_QUEUE_GET(\
177 (index), \
178 (cycle), \
179 (controller)->completion_event_entries, \
180 SMU_CQGR_EVENT_CYCLE_BIT \
181 )
182
183
184/**
185 * NORMALIZE_GET_POINTER() -
186 *
187 * This macro will normalize the completion queue get pointer so its value can
188 * be used as an index into an array
189 */
190#define NORMALIZE_GET_POINTER(x) \
191 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
192
193/**
194 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
195 *
196 * This macro will normalize the completion queue cycle pointer so it matches
197 * the completion queue cycle bit
198 */
199#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
200 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
201
202/**
203 * COMPLETION_QUEUE_CYCLE_BIT() -
204 *
205 * This macro will return the cycle bit of the completion queue entry
206 */
207#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
208
209static bool scic_sds_controller_completion_queue_has_entries(
210 struct scic_sds_controller *scic)
211{
212 u32 get_value = scic->completion_queue_get;
213 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
214
215 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
216 COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index]))
217 return true;
218
219 return false;
220}
221
222static bool scic_sds_controller_isr(struct scic_sds_controller *scic)
223{
224 if (scic_sds_controller_completion_queue_has_entries(scic)) {
225 return true;
226 } else {
227 /*
228 * we have a spurious interrupt it could be that we have already
229 * emptied the completion queue from a previous interrupt */
230 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
231
232 /*
233 * There is a race in the hardware that could cause us not to be notified
234 * of an interrupt completion if we do not take this step. We will mask
235 * then unmask the interrupts so if there is another interrupt pending
236 * the clearing of the interrupt source we get the next interrupt message. */
237 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
238 writel(0, &scic->smu_registers->interrupt_mask);
239 }
240
241 return false;
242}
243
Dan Williamsc7ef4032011-02-18 09:25:05 -0800244irqreturn_t isci_msix_isr(int vec, void *data)
Dan Williams6f231dd2011-07-02 22:56:22 -0700245{
Dan Williamsc7ef4032011-02-18 09:25:05 -0800246 struct isci_host *ihost = data;
Dan Williams6f231dd2011-07-02 22:56:22 -0700247
Artur Wojcikcc3dbd02011-05-04 07:58:16 +0000248 if (scic_sds_controller_isr(&ihost->sci))
Dan Williams0cf89d12011-02-18 09:25:07 -0800249 tasklet_schedule(&ihost->completion_tasklet);
Dan Williams6f231dd2011-07-02 22:56:22 -0700250
Dan Williamsc7ef4032011-02-18 09:25:05 -0800251 return IRQ_HANDLED;
Dan Williams6f231dd2011-07-02 22:56:22 -0700252}
253
Dan Williamscc9203b2011-05-08 17:34:44 -0700254static bool scic_sds_controller_error_isr(struct scic_sds_controller *scic)
255{
256 u32 interrupt_status;
257
258 interrupt_status =
259 readl(&scic->smu_registers->interrupt_status);
260 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
261
262 if (interrupt_status != 0) {
263 /*
264 * There is an error interrupt pending so let it through and handle
265 * in the callback */
266 return true;
267 }
268
269 /*
270 * There is a race in the hardware that could cause us not to be notified
271 * of an interrupt completion if we do not take this step. We will mask
272 * then unmask the error interrupts so if there was another interrupt
273 * pending we will be notified.
274 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
275 writel(0xff, &scic->smu_registers->interrupt_mask);
276 writel(0, &scic->smu_registers->interrupt_mask);
277
278 return false;
279}
280
281static void scic_sds_controller_task_completion(struct scic_sds_controller *scic,
282 u32 completion_entry)
283{
284 u32 index;
285 struct scic_sds_request *io_request;
286
287 index = SCU_GET_COMPLETION_INDEX(completion_entry);
288 io_request = scic->io_request_table[index];
289
290 /* Make sure that we really want to process this IO request */
291 if (
292 (io_request != NULL)
293 && (io_request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG)
294 && (
295 scic_sds_io_tag_get_sequence(io_request->io_tag)
296 == scic->io_request_sequence[index]
297 )
298 ) {
299 /* Yep this is a valid io request pass it along to the io request handler */
300 scic_sds_io_request_tc_completion(io_request, completion_entry);
301 }
302}
303
304static void scic_sds_controller_sdma_completion(struct scic_sds_controller *scic,
305 u32 completion_entry)
306{
307 u32 index;
308 struct scic_sds_request *io_request;
309 struct scic_sds_remote_device *device;
310
311 index = SCU_GET_COMPLETION_INDEX(completion_entry);
312
313 switch (scu_get_command_request_type(completion_entry)) {
314 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
315 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
316 io_request = scic->io_request_table[index];
317 dev_warn(scic_to_dev(scic),
318 "%s: SCIC SDS Completion type SDMA %x for io request "
319 "%p\n",
320 __func__,
321 completion_entry,
322 io_request);
323 /* @todo For a post TC operation we need to fail the IO
324 * request
325 */
326 break;
327
328 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
329 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
330 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
331 device = scic->device_table[index];
332 dev_warn(scic_to_dev(scic),
333 "%s: SCIC SDS Completion type SDMA %x for remote "
334 "device %p\n",
335 __func__,
336 completion_entry,
337 device);
338 /* @todo For a port RNC operation we need to fail the
339 * device
340 */
341 break;
342
343 default:
344 dev_warn(scic_to_dev(scic),
345 "%s: SCIC SDS Completion unknown SDMA completion "
346 "type %x\n",
347 __func__,
348 completion_entry);
349 break;
350
351 }
352}
353
354static void scic_sds_controller_unsolicited_frame(struct scic_sds_controller *scic,
355 u32 completion_entry)
356{
357 u32 index;
358 u32 frame_index;
359
360 struct isci_host *ihost = scic_to_ihost(scic);
361 struct scu_unsolicited_frame_header *frame_header;
362 struct scic_sds_phy *phy;
363 struct scic_sds_remote_device *device;
364
365 enum sci_status result = SCI_FAILURE;
366
367 frame_index = SCU_GET_FRAME_INDEX(completion_entry);
368
369 frame_header = scic->uf_control.buffers.array[frame_index].header;
370 scic->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
371
372 if (SCU_GET_FRAME_ERROR(completion_entry)) {
373 /*
374 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
375 * / this cause a problem? We expect the phy initialization will
376 * / fail if there is an error in the frame. */
377 scic_sds_controller_release_frame(scic, frame_index);
378 return;
379 }
380
381 if (frame_header->is_address_frame) {
382 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
383 phy = &ihost->phys[index].sci;
384 result = scic_sds_phy_frame_handler(phy, frame_index);
385 } else {
386
387 index = SCU_GET_COMPLETION_INDEX(completion_entry);
388
389 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
390 /*
391 * This is a signature fis or a frame from a direct attached SATA
392 * device that has not yet been created. In either case forwared
393 * the frame to the PE and let it take care of the frame data. */
394 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
395 phy = &ihost->phys[index].sci;
396 result = scic_sds_phy_frame_handler(phy, frame_index);
397 } else {
398 if (index < scic->remote_node_entries)
399 device = scic->device_table[index];
400 else
401 device = NULL;
402
403 if (device != NULL)
404 result = scic_sds_remote_device_frame_handler(device, frame_index);
405 else
406 scic_sds_controller_release_frame(scic, frame_index);
407 }
408 }
409
410 if (result != SCI_SUCCESS) {
411 /*
412 * / @todo Is there any reason to report some additional error message
413 * / when we get this failure notifiction? */
414 }
415}
416
417static void scic_sds_controller_event_completion(struct scic_sds_controller *scic,
418 u32 completion_entry)
419{
420 struct isci_host *ihost = scic_to_ihost(scic);
421 struct scic_sds_request *io_request;
422 struct scic_sds_remote_device *device;
423 struct scic_sds_phy *phy;
424 u32 index;
425
426 index = SCU_GET_COMPLETION_INDEX(completion_entry);
427
428 switch (scu_get_event_type(completion_entry)) {
429 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
430 /* / @todo The driver did something wrong and we need to fix the condtion. */
431 dev_err(scic_to_dev(scic),
432 "%s: SCIC Controller 0x%p received SMU command error "
433 "0x%x\n",
434 __func__,
435 scic,
436 completion_entry);
437 break;
438
439 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
440 case SCU_EVENT_TYPE_SMU_ERROR:
441 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
442 /*
443 * / @todo This is a hardware failure and its likely that we want to
444 * / reset the controller. */
445 dev_err(scic_to_dev(scic),
446 "%s: SCIC Controller 0x%p received fatal controller "
447 "event 0x%x\n",
448 __func__,
449 scic,
450 completion_entry);
451 break;
452
453 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
454 io_request = scic->io_request_table[index];
455 scic_sds_io_request_event_handler(io_request, completion_entry);
456 break;
457
458 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
459 switch (scu_get_event_specifier(completion_entry)) {
460 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
461 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
462 io_request = scic->io_request_table[index];
463 if (io_request != NULL)
464 scic_sds_io_request_event_handler(io_request, completion_entry);
465 else
466 dev_warn(scic_to_dev(scic),
467 "%s: SCIC Controller 0x%p received "
468 "event 0x%x for io request object "
469 "that doesnt exist.\n",
470 __func__,
471 scic,
472 completion_entry);
473
474 break;
475
476 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
477 device = scic->device_table[index];
478 if (device != NULL)
479 scic_sds_remote_device_event_handler(device, completion_entry);
480 else
481 dev_warn(scic_to_dev(scic),
482 "%s: SCIC Controller 0x%p received "
483 "event 0x%x for remote device object "
484 "that doesnt exist.\n",
485 __func__,
486 scic,
487 completion_entry);
488
489 break;
490 }
491 break;
492
493 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
494 /*
495 * direct the broadcast change event to the phy first and then let
496 * the phy redirect the broadcast change to the port object */
497 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
498 /*
499 * direct error counter event to the phy object since that is where
500 * we get the event notification. This is a type 4 event. */
501 case SCU_EVENT_TYPE_OSSP_EVENT:
502 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
503 phy = &ihost->phys[index].sci;
504 scic_sds_phy_event_handler(phy, completion_entry);
505 break;
506
507 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
508 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
509 case SCU_EVENT_TYPE_RNC_OPS_MISC:
510 if (index < scic->remote_node_entries) {
511 device = scic->device_table[index];
512
513 if (device != NULL)
514 scic_sds_remote_device_event_handler(device, completion_entry);
515 } else
516 dev_err(scic_to_dev(scic),
517 "%s: SCIC Controller 0x%p received event 0x%x "
518 "for remote device object 0x%0x that doesnt "
519 "exist.\n",
520 __func__,
521 scic,
522 completion_entry,
523 index);
524
525 break;
526
527 default:
528 dev_warn(scic_to_dev(scic),
529 "%s: SCIC Controller received unknown event code %x\n",
530 __func__,
531 completion_entry);
532 break;
533 }
534}
535
536
537
538static void scic_sds_controller_process_completions(struct scic_sds_controller *scic)
539{
540 u32 completion_count = 0;
541 u32 completion_entry;
542 u32 get_index;
543 u32 get_cycle;
544 u32 event_index;
545 u32 event_cycle;
546
547 dev_dbg(scic_to_dev(scic),
548 "%s: completion queue begining get:0x%08x\n",
549 __func__,
550 scic->completion_queue_get);
551
552 /* Get the component parts of the completion queue */
553 get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get);
554 get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get;
555
556 event_index = NORMALIZE_EVENT_POINTER(scic->completion_queue_get);
557 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get;
558
559 while (
560 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
561 == COMPLETION_QUEUE_CYCLE_BIT(scic->completion_queue[get_index])
562 ) {
563 completion_count++;
564
565 completion_entry = scic->completion_queue[get_index];
566 INCREMENT_COMPLETION_QUEUE_GET(scic, get_index, get_cycle);
567
568 dev_dbg(scic_to_dev(scic),
569 "%s: completion queue entry:0x%08x\n",
570 __func__,
571 completion_entry);
572
573 switch (SCU_GET_COMPLETION_TYPE(completion_entry)) {
574 case SCU_COMPLETION_TYPE_TASK:
575 scic_sds_controller_task_completion(scic, completion_entry);
576 break;
577
578 case SCU_COMPLETION_TYPE_SDMA:
579 scic_sds_controller_sdma_completion(scic, completion_entry);
580 break;
581
582 case SCU_COMPLETION_TYPE_UFI:
583 scic_sds_controller_unsolicited_frame(scic, completion_entry);
584 break;
585
586 case SCU_COMPLETION_TYPE_EVENT:
587 INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
588 scic_sds_controller_event_completion(scic, completion_entry);
589 break;
590
591 case SCU_COMPLETION_TYPE_NOTIFY:
592 /*
593 * Presently we do the same thing with a notify event that we do with the
594 * other event codes. */
595 INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
596 scic_sds_controller_event_completion(scic, completion_entry);
597 break;
598
599 default:
600 dev_warn(scic_to_dev(scic),
601 "%s: SCIC Controller received unknown "
602 "completion type %x\n",
603 __func__,
604 completion_entry);
605 break;
606 }
607 }
608
609 /* Update the get register if we completed one or more entries */
610 if (completion_count > 0) {
611 scic->completion_queue_get =
612 SMU_CQGR_GEN_BIT(ENABLE) |
613 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
614 event_cycle |
615 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index) |
616 get_cycle |
617 SMU_CQGR_GEN_VAL(POINTER, get_index);
618
619 writel(scic->completion_queue_get,
620 &scic->smu_registers->completion_queue_get);
621
622 }
623
624 dev_dbg(scic_to_dev(scic),
625 "%s: completion queue ending get:0x%08x\n",
626 __func__,
627 scic->completion_queue_get);
628
629}
630
631static void scic_sds_controller_error_handler(struct scic_sds_controller *scic)
632{
633 u32 interrupt_status;
634
635 interrupt_status =
636 readl(&scic->smu_registers->interrupt_status);
637
638 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
639 scic_sds_controller_completion_queue_has_entries(scic)) {
640
641 scic_sds_controller_process_completions(scic);
642 writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status);
643 } else {
644 dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__,
645 interrupt_status);
646
647 sci_base_state_machine_change_state(&scic->state_machine,
648 SCI_BASE_CONTROLLER_STATE_FAILED);
649
650 return;
651 }
652
653 /* If we dont process any completions I am not sure that we want to do this.
654 * We are in the middle of a hardware fault and should probably be reset.
655 */
656 writel(0, &scic->smu_registers->interrupt_mask);
657}
658
Dan Williamsc7ef4032011-02-18 09:25:05 -0800659irqreturn_t isci_intx_isr(int vec, void *data)
Dan Williams6f231dd2011-07-02 22:56:22 -0700660{
Dan Williams6f231dd2011-07-02 22:56:22 -0700661 irqreturn_t ret = IRQ_NONE;
Dan Williams31e824e2011-04-19 12:32:51 -0700662 struct isci_host *ihost = data;
Artur Wojcikcc3dbd02011-05-04 07:58:16 +0000663 struct scic_sds_controller *scic = &ihost->sci;
Dan Williams6f231dd2011-07-02 22:56:22 -0700664
Dan Williams31e824e2011-04-19 12:32:51 -0700665 if (scic_sds_controller_isr(scic)) {
666 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
667 tasklet_schedule(&ihost->completion_tasklet);
668 ret = IRQ_HANDLED;
669 } else if (scic_sds_controller_error_isr(scic)) {
670 spin_lock(&ihost->scic_lock);
671 scic_sds_controller_error_handler(scic);
672 spin_unlock(&ihost->scic_lock);
673 ret = IRQ_HANDLED;
Dan Williams6f231dd2011-07-02 22:56:22 -0700674 }
Dan Williams92f4f0f2011-02-18 09:25:11 -0800675
Dan Williams6f231dd2011-07-02 22:56:22 -0700676 return ret;
677}
678
Dan Williams92f4f0f2011-02-18 09:25:11 -0800679irqreturn_t isci_error_isr(int vec, void *data)
680{
681 struct isci_host *ihost = data;
Dan Williams92f4f0f2011-02-18 09:25:11 -0800682
Artur Wojcikcc3dbd02011-05-04 07:58:16 +0000683 if (scic_sds_controller_error_isr(&ihost->sci))
684 scic_sds_controller_error_handler(&ihost->sci);
Dan Williams92f4f0f2011-02-18 09:25:11 -0800685
686 return IRQ_HANDLED;
687}
Dan Williams6f231dd2011-07-02 22:56:22 -0700688
689/**
690 * isci_host_start_complete() - This function is called by the core library,
691 * through the ISCI Module, to indicate controller start status.
692 * @isci_host: This parameter specifies the ISCI host object
693 * @completion_status: This parameter specifies the completion status from the
694 * core library.
695 *
696 */
Dan Williamscc9203b2011-05-08 17:34:44 -0700697static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
Dan Williams6f231dd2011-07-02 22:56:22 -0700698{
Dan Williams0cf89d12011-02-18 09:25:07 -0800699 if (completion_status != SCI_SUCCESS)
700 dev_info(&ihost->pdev->dev,
701 "controller start timed out, continuing...\n");
702 isci_host_change_state(ihost, isci_ready);
703 clear_bit(IHOST_START_PENDING, &ihost->flags);
704 wake_up(&ihost->eventq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700705}
706
Dan Williamsc7ef4032011-02-18 09:25:05 -0800707int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
Dan Williams6f231dd2011-07-02 22:56:22 -0700708{
Dan Williams4393aa42011-03-31 13:10:44 -0700709 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
Dan Williams6f231dd2011-07-02 22:56:22 -0700710
Edmund Nadolski77950f52011-02-18 09:25:09 -0800711 if (test_bit(IHOST_START_PENDING, &ihost->flags))
Dan Williams6f231dd2011-07-02 22:56:22 -0700712 return 0;
Dan Williams6f231dd2011-07-02 22:56:22 -0700713
Edmund Nadolski77950f52011-02-18 09:25:09 -0800714 /* todo: use sas_flush_discovery once it is upstream */
715 scsi_flush_work(shost);
716
717 scsi_flush_work(shost);
Dan Williams6f231dd2011-07-02 22:56:22 -0700718
Dan Williams0cf89d12011-02-18 09:25:07 -0800719 dev_dbg(&ihost->pdev->dev,
720 "%s: ihost->status = %d, time = %ld\n",
721 __func__, isci_host_get_state(ihost), time);
Dan Williams6f231dd2011-07-02 22:56:22 -0700722
Dan Williams6f231dd2011-07-02 22:56:22 -0700723 return 1;
724
725}
726
Dan Williamscc9203b2011-05-08 17:34:44 -0700727/**
728 * scic_controller_get_suggested_start_timeout() - This method returns the
729 * suggested scic_controller_start() timeout amount. The user is free to
730 * use any timeout value, but this method provides the suggested minimum
731 * start timeout value. The returned value is based upon empirical
732 * information determined as a result of interoperability testing.
733 * @controller: the handle to the controller object for which to return the
734 * suggested start timeout.
735 *
736 * This method returns the number of milliseconds for the suggested start
737 * operation timeout.
738 */
739static u32 scic_controller_get_suggested_start_timeout(
740 struct scic_sds_controller *sc)
741{
742 /* Validate the user supplied parameters. */
743 if (sc == NULL)
744 return 0;
745
746 /*
747 * The suggested minimum timeout value for a controller start operation:
748 *
749 * Signature FIS Timeout
750 * + Phy Start Timeout
751 * + Number of Phy Spin Up Intervals
752 * ---------------------------------
753 * Number of milliseconds for the controller start operation.
754 *
755 * NOTE: The number of phy spin up intervals will be equivalent
756 * to the number of phys divided by the number phys allowed
757 * per interval - 1 (once OEM parameters are supported).
758 * Currently we assume only 1 phy per interval. */
759
760 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
761 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
762 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
763}
764
765static void scic_controller_enable_interrupts(
766 struct scic_sds_controller *scic)
767{
768 BUG_ON(scic->smu_registers == NULL);
769 writel(0, &scic->smu_registers->interrupt_mask);
770}
771
772void scic_controller_disable_interrupts(
773 struct scic_sds_controller *scic)
774{
775 BUG_ON(scic->smu_registers == NULL);
776 writel(0xffffffff, &scic->smu_registers->interrupt_mask);
777}
778
779static void scic_sds_controller_enable_port_task_scheduler(
780 struct scic_sds_controller *scic)
781{
782 u32 port_task_scheduler_value;
783
784 port_task_scheduler_value =
785 readl(&scic->scu_registers->peg0.ptsg.control);
786 port_task_scheduler_value |=
787 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
788 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
789 writel(port_task_scheduler_value,
790 &scic->scu_registers->peg0.ptsg.control);
791}
792
793static void scic_sds_controller_assign_task_entries(struct scic_sds_controller *scic)
794{
795 u32 task_assignment;
796
797 /*
798 * Assign all the TCs to function 0
799 * TODO: Do we actually need to read this register to write it back?
800 */
801
802 task_assignment =
803 readl(&scic->smu_registers->task_context_assignment[0]);
804
805 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
806 (SMU_TCA_GEN_VAL(ENDING, scic->task_context_entries - 1)) |
807 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
808
809 writel(task_assignment,
810 &scic->smu_registers->task_context_assignment[0]);
811
812}
813
814static void scic_sds_controller_initialize_completion_queue(struct scic_sds_controller *scic)
815{
816 u32 index;
817 u32 completion_queue_control_value;
818 u32 completion_queue_get_value;
819 u32 completion_queue_put_value;
820
821 scic->completion_queue_get = 0;
822
823 completion_queue_control_value = (
824 SMU_CQC_QUEUE_LIMIT_SET(scic->completion_queue_entries - 1)
825 | SMU_CQC_EVENT_LIMIT_SET(scic->completion_event_entries - 1)
826 );
827
828 writel(completion_queue_control_value,
829 &scic->smu_registers->completion_queue_control);
830
831
832 /* Set the completion queue get pointer and enable the queue */
833 completion_queue_get_value = (
834 (SMU_CQGR_GEN_VAL(POINTER, 0))
835 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
836 | (SMU_CQGR_GEN_BIT(ENABLE))
837 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
838 );
839
840 writel(completion_queue_get_value,
841 &scic->smu_registers->completion_queue_get);
842
843 /* Set the completion queue put pointer */
844 completion_queue_put_value = (
845 (SMU_CQPR_GEN_VAL(POINTER, 0))
846 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
847 );
848
849 writel(completion_queue_put_value,
850 &scic->smu_registers->completion_queue_put);
851
852 /* Initialize the cycle bit of the completion queue entries */
853 for (index = 0; index < scic->completion_queue_entries; index++) {
854 /*
855 * If get.cycle_bit != completion_queue.cycle_bit
856 * its not a valid completion queue entry
857 * so at system start all entries are invalid */
858 scic->completion_queue[index] = 0x80000000;
859 }
860}
861
862static void scic_sds_controller_initialize_unsolicited_frame_queue(struct scic_sds_controller *scic)
863{
864 u32 frame_queue_control_value;
865 u32 frame_queue_get_value;
866 u32 frame_queue_put_value;
867
868 /* Write the queue size */
869 frame_queue_control_value =
870 SCU_UFQC_GEN_VAL(QUEUE_SIZE,
871 scic->uf_control.address_table.count);
872
873 writel(frame_queue_control_value,
874 &scic->scu_registers->sdma.unsolicited_frame_queue_control);
875
876 /* Setup the get pointer for the unsolicited frame queue */
877 frame_queue_get_value = (
878 SCU_UFQGP_GEN_VAL(POINTER, 0)
879 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
880 );
881
882 writel(frame_queue_get_value,
883 &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
884 /* Setup the put pointer for the unsolicited frame queue */
885 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
886 writel(frame_queue_put_value,
887 &scic->scu_registers->sdma.unsolicited_frame_put_pointer);
888}
889
890/**
891 * This method will attempt to transition into the ready state for the
892 * controller and indicate that the controller start operation has completed
893 * if all criteria are met.
894 * @scic: This parameter indicates the controller object for which
895 * to transition to ready.
896 * @status: This parameter indicates the status value to be pass into the call
897 * to scic_cb_controller_start_complete().
898 *
899 * none.
900 */
901static void scic_sds_controller_transition_to_ready(
902 struct scic_sds_controller *scic,
903 enum sci_status status)
904{
905 struct isci_host *ihost = scic_to_ihost(scic);
906
907 if (scic->state_machine.current_state_id ==
908 SCI_BASE_CONTROLLER_STATE_STARTING) {
909 /*
910 * We move into the ready state, because some of the phys/ports
911 * may be up and operational.
912 */
913 sci_base_state_machine_change_state(&scic->state_machine,
914 SCI_BASE_CONTROLLER_STATE_READY);
915
916 isci_host_start_complete(ihost, status);
917 }
918}
919
920static void scic_sds_controller_phy_timer_stop(struct scic_sds_controller *scic)
921{
922 isci_timer_stop(scic->phy_startup_timer);
923
924 scic->phy_startup_timer_pending = false;
925}
926
927static void scic_sds_controller_phy_timer_start(struct scic_sds_controller *scic)
928{
929 isci_timer_start(scic->phy_startup_timer,
930 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
931
932 scic->phy_startup_timer_pending = true;
933}
934
Adam Gruchala4a33c522011-05-10 23:54:23 +0000935static bool is_phy_starting(struct scic_sds_phy *sci_phy)
936{
937 enum scic_sds_phy_states state;
938
939 state = sci_phy->state_machine.current_state_id;
940 switch (state) {
941 case SCI_BASE_PHY_STATE_STARTING:
942 case SCIC_SDS_PHY_STARTING_SUBSTATE_INITIAL:
943 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_SPEED_EN:
944 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_IAF_UF:
945 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SAS_POWER:
946 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_POWER:
947 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_PHY_EN:
948 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SATA_SPEED_EN:
949 case SCIC_SDS_PHY_STARTING_SUBSTATE_AWAIT_SIG_FIS_UF:
950 case SCIC_SDS_PHY_STARTING_SUBSTATE_FINAL:
951 return true;
952 default:
953 return false;
954 }
955}
956
Dan Williamscc9203b2011-05-08 17:34:44 -0700957/**
958 * scic_sds_controller_start_next_phy - start phy
959 * @scic: controller
960 *
961 * If all the phys have been started, then attempt to transition the
962 * controller to the READY state and inform the user
963 * (scic_cb_controller_start_complete()).
964 */
965static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic)
966{
967 struct isci_host *ihost = scic_to_ihost(scic);
968 struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
969 struct scic_sds_phy *sci_phy;
970 enum sci_status status;
971
972 status = SCI_SUCCESS;
973
974 if (scic->phy_startup_timer_pending)
975 return status;
976
977 if (scic->next_phy_to_start >= SCI_MAX_PHYS) {
978 bool is_controller_start_complete = true;
979 u32 state;
980 u8 index;
981
982 for (index = 0; index < SCI_MAX_PHYS; index++) {
983 sci_phy = &ihost->phys[index].sci;
984 state = sci_phy->state_machine.current_state_id;
985
Dan Williams4f20ef42011-05-12 06:00:31 -0700986 if (!phy_get_non_dummy_port(sci_phy))
Dan Williamscc9203b2011-05-08 17:34:44 -0700987 continue;
988
989 /* The controller start operation is complete iff:
990 * - all links have been given an opportunity to start
991 * - have no indication of a connected device
992 * - have an indication of a connected device and it has
993 * finished the link training process.
994 */
995 if ((sci_phy->is_in_link_training == false &&
996 state == SCI_BASE_PHY_STATE_INITIAL) ||
997 (sci_phy->is_in_link_training == false &&
998 state == SCI_BASE_PHY_STATE_STOPPED) ||
999 (sci_phy->is_in_link_training == true &&
Adam Gruchala4a33c522011-05-10 23:54:23 +00001000 is_phy_starting(sci_phy))) {
Dan Williamscc9203b2011-05-08 17:34:44 -07001001 is_controller_start_complete = false;
1002 break;
1003 }
1004 }
1005
1006 /*
1007 * The controller has successfully finished the start process.
1008 * Inform the SCI Core user and transition to the READY state. */
1009 if (is_controller_start_complete == true) {
1010 scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS);
1011 scic_sds_controller_phy_timer_stop(scic);
1012 }
1013 } else {
1014 sci_phy = &ihost->phys[scic->next_phy_to_start].sci;
1015
1016 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
Dan Williams4f20ef42011-05-12 06:00:31 -07001017 if (phy_get_non_dummy_port(sci_phy) == NULL) {
Dan Williamscc9203b2011-05-08 17:34:44 -07001018 scic->next_phy_to_start++;
1019
1020 /* Caution recursion ahead be forwarned
1021 *
1022 * The PHY was never added to a PORT in MPC mode
1023 * so start the next phy in sequence This phy
1024 * will never go link up and will not draw power
1025 * the OEM parameters either configured the phy
1026 * incorrectly for the PORT or it was never
1027 * assigned to a PORT
1028 */
1029 return scic_sds_controller_start_next_phy(scic);
1030 }
1031 }
1032
1033 status = scic_sds_phy_start(sci_phy);
1034
1035 if (status == SCI_SUCCESS) {
1036 scic_sds_controller_phy_timer_start(scic);
1037 } else {
1038 dev_warn(scic_to_dev(scic),
1039 "%s: Controller stop operation failed "
1040 "to stop phy %d because of status "
1041 "%d.\n",
1042 __func__,
1043 ihost->phys[scic->next_phy_to_start].sci.phy_index,
1044 status);
1045 }
1046
1047 scic->next_phy_to_start++;
1048 }
1049
1050 return status;
1051}
1052
1053static void scic_sds_controller_phy_startup_timeout_handler(void *_scic)
1054{
1055 struct scic_sds_controller *scic = _scic;
1056 enum sci_status status;
1057
1058 scic->phy_startup_timer_pending = false;
1059 status = SCI_FAILURE;
1060 while (status != SCI_SUCCESS)
1061 status = scic_sds_controller_start_next_phy(scic);
1062}
1063
1064static enum sci_status scic_controller_start(struct scic_sds_controller *scic,
1065 u32 timeout)
1066{
1067 struct isci_host *ihost = scic_to_ihost(scic);
1068 enum sci_status result;
1069 u16 index;
1070
1071 if (scic->state_machine.current_state_id !=
1072 SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
1073 dev_warn(scic_to_dev(scic),
1074 "SCIC Controller start operation requested in "
1075 "invalid state\n");
1076 return SCI_FAILURE_INVALID_STATE;
1077 }
1078
1079 /* Build the TCi free pool */
1080 sci_pool_initialize(scic->tci_pool);
1081 for (index = 0; index < scic->task_context_entries; index++)
1082 sci_pool_put(scic->tci_pool, index);
1083
1084 /* Build the RNi free pool */
1085 scic_sds_remote_node_table_initialize(
1086 &scic->available_remote_nodes,
1087 scic->remote_node_entries);
1088
1089 /*
1090 * Before anything else lets make sure we will not be
1091 * interrupted by the hardware.
1092 */
1093 scic_controller_disable_interrupts(scic);
1094
1095 /* Enable the port task scheduler */
1096 scic_sds_controller_enable_port_task_scheduler(scic);
1097
1098 /* Assign all the task entries to scic physical function */
1099 scic_sds_controller_assign_task_entries(scic);
1100
1101 /* Now initialize the completion queue */
1102 scic_sds_controller_initialize_completion_queue(scic);
1103
1104 /* Initialize the unsolicited frame queue for use */
1105 scic_sds_controller_initialize_unsolicited_frame_queue(scic);
1106
1107 /* Start all of the ports on this controller */
1108 for (index = 0; index < scic->logical_port_entries; index++) {
1109 struct scic_sds_port *sci_port = &ihost->ports[index].sci;
1110
Piotr Sawickid76f71d2011-05-11 23:52:26 +00001111 result = scic_sds_port_start(sci_port);
Dan Williamscc9203b2011-05-08 17:34:44 -07001112 if (result)
1113 return result;
1114 }
1115
1116 scic_sds_controller_start_next_phy(scic);
1117
1118 isci_timer_start(scic->timeout_timer, timeout);
1119
1120 sci_base_state_machine_change_state(&scic->state_machine,
1121 SCI_BASE_CONTROLLER_STATE_STARTING);
1122
1123 return SCI_SUCCESS;
1124}
1125
Dan Williams6f231dd2011-07-02 22:56:22 -07001126void isci_host_scan_start(struct Scsi_Host *shost)
1127{
Dan Williams4393aa42011-03-31 13:10:44 -07001128 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001129 unsigned long tmo = scic_controller_get_suggested_start_timeout(&ihost->sci);
Dan Williams6f231dd2011-07-02 22:56:22 -07001130
Dan Williams0cf89d12011-02-18 09:25:07 -08001131 set_bit(IHOST_START_PENDING, &ihost->flags);
Edmund Nadolski77950f52011-02-18 09:25:09 -08001132
1133 spin_lock_irq(&ihost->scic_lock);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001134 scic_controller_start(&ihost->sci, tmo);
1135 scic_controller_enable_interrupts(&ihost->sci);
Edmund Nadolski77950f52011-02-18 09:25:09 -08001136 spin_unlock_irq(&ihost->scic_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07001137}
1138
Dan Williamscc9203b2011-05-08 17:34:44 -07001139static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
Dan Williams6f231dd2011-07-02 22:56:22 -07001140{
Dan Williams0cf89d12011-02-18 09:25:07 -08001141 isci_host_change_state(ihost, isci_stopped);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001142 scic_controller_disable_interrupts(&ihost->sci);
Dan Williams0cf89d12011-02-18 09:25:07 -08001143 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1144 wake_up(&ihost->eventq);
Dan Williams6f231dd2011-07-02 22:56:22 -07001145}
1146
Dan Williamscc9203b2011-05-08 17:34:44 -07001147static void scic_sds_controller_completion_handler(struct scic_sds_controller *scic)
1148{
1149 /* Empty out the completion queue */
1150 if (scic_sds_controller_completion_queue_has_entries(scic))
1151 scic_sds_controller_process_completions(scic);
1152
1153 /* Clear the interrupt and enable all interrupts again */
1154 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
1155 /* Could we write the value of SMU_ISR_COMPLETION? */
1156 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
1157 writel(0, &scic->smu_registers->interrupt_mask);
1158}
1159
Dan Williams6f231dd2011-07-02 22:56:22 -07001160/**
1161 * isci_host_completion_routine() - This function is the delayed service
1162 * routine that calls the sci core library's completion handler. It's
1163 * scheduled as a tasklet from the interrupt service routine when interrupts
1164 * in use, or set as the timeout function in polled mode.
1165 * @data: This parameter specifies the ISCI host object
1166 *
1167 */
1168static void isci_host_completion_routine(unsigned long data)
1169{
1170 struct isci_host *isci_host = (struct isci_host *)data;
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001171 struct list_head completed_request_list;
1172 struct list_head errored_request_list;
1173 struct list_head *current_position;
1174 struct list_head *next_position;
Dan Williams6f231dd2011-07-02 22:56:22 -07001175 struct isci_request *request;
1176 struct isci_request *next_request;
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001177 struct sas_task *task;
Dan Williams6f231dd2011-07-02 22:56:22 -07001178
1179 INIT_LIST_HEAD(&completed_request_list);
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001180 INIT_LIST_HEAD(&errored_request_list);
Dan Williams6f231dd2011-07-02 22:56:22 -07001181
1182 spin_lock_irq(&isci_host->scic_lock);
1183
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001184 scic_sds_controller_completion_handler(&isci_host->sci);
Dan Williamsc7ef4032011-02-18 09:25:05 -08001185
Dan Williams6f231dd2011-07-02 22:56:22 -07001186 /* Take the lists of completed I/Os from the host. */
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001187
Dan Williams6f231dd2011-07-02 22:56:22 -07001188 list_splice_init(&isci_host->requests_to_complete,
1189 &completed_request_list);
1190
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001191 /* Take the list of errored I/Os from the host. */
1192 list_splice_init(&isci_host->requests_to_errorback,
1193 &errored_request_list);
Dan Williams6f231dd2011-07-02 22:56:22 -07001194
1195 spin_unlock_irq(&isci_host->scic_lock);
1196
1197 /* Process any completions in the lists. */
1198 list_for_each_safe(current_position, next_position,
1199 &completed_request_list) {
1200
1201 request = list_entry(current_position, struct isci_request,
1202 completed_node);
1203 task = isci_request_access_task(request);
1204
1205 /* Normal notification (task_done) */
1206 dev_dbg(&isci_host->pdev->dev,
1207 "%s: Normal - request/task = %p/%p\n",
1208 __func__,
1209 request,
1210 task);
1211
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001212 /* Return the task to libsas */
1213 if (task != NULL) {
Dan Williams6f231dd2011-07-02 22:56:22 -07001214
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001215 task->lldd_task = NULL;
1216 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1217
1218 /* If the task is already in the abort path,
1219 * the task_done callback cannot be called.
1220 */
1221 task->task_done(task);
1222 }
1223 }
Dan Williams6f231dd2011-07-02 22:56:22 -07001224 /* Free the request object. */
1225 isci_request_free(isci_host, request);
1226 }
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001227 list_for_each_entry_safe(request, next_request, &errored_request_list,
Dan Williams6f231dd2011-07-02 22:56:22 -07001228 completed_node) {
1229
1230 task = isci_request_access_task(request);
1231
1232 /* Use sas_task_abort */
1233 dev_warn(&isci_host->pdev->dev,
1234 "%s: Error - request/task = %p/%p\n",
1235 __func__,
1236 request,
1237 task);
1238
Jeff Skirvin11b00c12011-03-04 14:06:40 -08001239 if (task != NULL) {
1240
1241 /* Put the task into the abort path if it's not there
1242 * already.
1243 */
1244 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
1245 sas_task_abort(task);
1246
1247 } else {
1248 /* This is a case where the request has completed with a
1249 * status such that it needed further target servicing,
1250 * but the sas_task reference has already been removed
1251 * from the request. Since it was errored, it was not
1252 * being aborted, so there is nothing to do except free
1253 * it.
1254 */
1255
1256 spin_lock_irq(&isci_host->scic_lock);
1257 /* Remove the request from the remote device's list
1258 * of pending requests.
1259 */
1260 list_del_init(&request->dev_node);
1261 spin_unlock_irq(&isci_host->scic_lock);
1262
1263 /* Free the request object. */
1264 isci_request_free(isci_host, request);
1265 }
Dan Williams6f231dd2011-07-02 22:56:22 -07001266 }
1267
1268}
1269
Dan Williamscc9203b2011-05-08 17:34:44 -07001270/**
1271 * scic_controller_stop() - This method will stop an individual controller
1272 * object.This method will invoke the associated user callback upon
1273 * completion. The completion callback is called when the following
1274 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
1275 * controller has been quiesced. This method will ensure that all IO
1276 * requests are quiesced, phys are stopped, and all additional operation by
1277 * the hardware is halted.
1278 * @controller: the handle to the controller object to stop.
1279 * @timeout: This parameter specifies the number of milliseconds in which the
1280 * stop operation should complete.
1281 *
1282 * The controller must be in the STARTED or STOPPED state. Indicate if the
1283 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1284 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1285 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1286 * controller is not either in the STARTED or STOPPED states.
1287 */
1288static enum sci_status scic_controller_stop(struct scic_sds_controller *scic,
1289 u32 timeout)
1290{
1291 if (scic->state_machine.current_state_id !=
1292 SCI_BASE_CONTROLLER_STATE_READY) {
1293 dev_warn(scic_to_dev(scic),
1294 "SCIC Controller stop operation requested in "
1295 "invalid state\n");
1296 return SCI_FAILURE_INVALID_STATE;
1297 }
1298
1299 isci_timer_start(scic->timeout_timer, timeout);
1300 sci_base_state_machine_change_state(&scic->state_machine,
1301 SCI_BASE_CONTROLLER_STATE_STOPPING);
1302 return SCI_SUCCESS;
1303}
1304
1305/**
1306 * scic_controller_reset() - This method will reset the supplied core
1307 * controller regardless of the state of said controller. This operation is
1308 * considered destructive. In other words, all current operations are wiped
1309 * out. No IO completions for outstanding devices occur. Outstanding IO
1310 * requests are not aborted or completed at the actual remote device.
1311 * @controller: the handle to the controller object to reset.
1312 *
1313 * Indicate if the controller reset method succeeded or failed in some way.
1314 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1315 * the controller reset operation is unable to complete.
1316 */
1317static enum sci_status scic_controller_reset(struct scic_sds_controller *scic)
1318{
1319 switch (scic->state_machine.current_state_id) {
1320 case SCI_BASE_CONTROLLER_STATE_RESET:
1321 case SCI_BASE_CONTROLLER_STATE_READY:
1322 case SCI_BASE_CONTROLLER_STATE_STOPPED:
1323 case SCI_BASE_CONTROLLER_STATE_FAILED:
1324 /*
1325 * The reset operation is not a graceful cleanup, just
1326 * perform the state transition.
1327 */
1328 sci_base_state_machine_change_state(&scic->state_machine,
1329 SCI_BASE_CONTROLLER_STATE_RESETTING);
1330 return SCI_SUCCESS;
1331 default:
1332 dev_warn(scic_to_dev(scic),
1333 "SCIC Controller reset operation requested in "
1334 "invalid state\n");
1335 return SCI_FAILURE_INVALID_STATE;
1336 }
1337}
1338
Dan Williams0cf89d12011-02-18 09:25:07 -08001339void isci_host_deinit(struct isci_host *ihost)
Dan Williams6f231dd2011-07-02 22:56:22 -07001340{
1341 int i;
1342
Dan Williams0cf89d12011-02-18 09:25:07 -08001343 isci_host_change_state(ihost, isci_stopping);
Dan Williams6f231dd2011-07-02 22:56:22 -07001344 for (i = 0; i < SCI_MAX_PORTS; i++) {
Dan Williamse5313812011-05-07 10:11:43 -07001345 struct isci_port *iport = &ihost->ports[i];
Dan Williams0cf89d12011-02-18 09:25:07 -08001346 struct isci_remote_device *idev, *d;
1347
Dan Williamse5313812011-05-07 10:11:43 -07001348 list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
Dan Williams0cf89d12011-02-18 09:25:07 -08001349 isci_remote_device_change_state(idev, isci_stopping);
Dan Williams6ad31fe2011-03-04 12:10:29 -08001350 isci_remote_device_stop(ihost, idev);
Dan Williams6f231dd2011-07-02 22:56:22 -07001351 }
1352 }
1353
Dan Williams0cf89d12011-02-18 09:25:07 -08001354 set_bit(IHOST_STOP_PENDING, &ihost->flags);
Dan Williams7c40a802011-03-02 11:49:26 -08001355
1356 spin_lock_irq(&ihost->scic_lock);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001357 scic_controller_stop(&ihost->sci, SCIC_CONTROLLER_STOP_TIMEOUT);
Dan Williams7c40a802011-03-02 11:49:26 -08001358 spin_unlock_irq(&ihost->scic_lock);
1359
Dan Williams0cf89d12011-02-18 09:25:07 -08001360 wait_for_stop(ihost);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00001361 scic_controller_reset(&ihost->sci);
Edmund Nadolski5553ba22011-05-19 11:59:10 +00001362
1363 /* Cancel any/all outstanding port timers */
1364 for (i = 0; i < ihost->sci.logical_port_entries; i++) {
1365 struct scic_sds_port *sci_port = &ihost->ports[i].sci;
1366 del_timer_sync(&sci_port->timer.timer);
1367 }
1368
Edmund Nadolskia628d472011-05-19 11:59:36 +00001369 /* Cancel any/all outstanding phy timers */
1370 for (i = 0; i < SCI_MAX_PHYS; i++) {
1371 struct scic_sds_phy *sci_phy = &ihost->phys[i].sci;
1372 del_timer_sync(&sci_phy->sata_timer.timer);
1373 }
1374
Edmund Nadolskiac0eeb42011-05-19 20:00:51 -07001375 del_timer_sync(&ihost->sci.port_agent.timer.timer);
1376
Edmund Nadolski04736612011-05-19 20:17:47 -07001377 del_timer_sync(&ihost->sci.power_control.timer.timer);
1378
Dan Williams7c40a802011-03-02 11:49:26 -08001379 isci_timer_list_destroy(ihost);
Dan Williams6f231dd2011-07-02 22:56:22 -07001380}
1381
Dan Williams6f231dd2011-07-02 22:56:22 -07001382static void __iomem *scu_base(struct isci_host *isci_host)
1383{
1384 struct pci_dev *pdev = isci_host->pdev;
1385 int id = isci_host->id;
1386
1387 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1388}
1389
1390static void __iomem *smu_base(struct isci_host *isci_host)
1391{
1392 struct pci_dev *pdev = isci_host->pdev;
1393 int id = isci_host->id;
1394
1395 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1396}
1397
Dave Jiangb5f18a22011-03-16 14:57:23 -07001398static void isci_user_parameters_get(
1399 struct isci_host *isci_host,
1400 union scic_user_parameters *scic_user_params)
1401{
1402 struct scic_sds_user_parameters *u = &scic_user_params->sds1;
1403 int i;
1404
1405 for (i = 0; i < SCI_MAX_PHYS; i++) {
1406 struct sci_phy_user_params *u_phy = &u->phys[i];
1407
1408 u_phy->max_speed_generation = phy_gen;
1409
1410 /* we are not exporting these for now */
1411 u_phy->align_insertion_frequency = 0x7f;
1412 u_phy->in_connection_align_insertion_frequency = 0xff;
1413 u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
1414 }
1415
1416 u->stp_inactivity_timeout = stp_inactive_to;
1417 u->ssp_inactivity_timeout = ssp_inactive_to;
1418 u->stp_max_occupancy_timeout = stp_max_occ_to;
1419 u->ssp_max_occupancy_timeout = ssp_max_occ_to;
1420 u->no_outbound_task_timeout = no_outbound_task_to;
1421 u->max_number_concurrent_device_spin_up = max_concurr_spinup;
1422}
1423
Dan Williams9269e0e2011-05-12 07:42:17 -07001424static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001425{
Dan Williams9269e0e2011-05-12 07:42:17 -07001426 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
Dan Williamscc9203b2011-05-08 17:34:44 -07001427
1428 sci_base_state_machine_change_state(&scic->state_machine,
1429 SCI_BASE_CONTROLLER_STATE_RESET);
1430}
1431
Dan Williams9269e0e2011-05-12 07:42:17 -07001432static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001433{
Dan Williams9269e0e2011-05-12 07:42:17 -07001434 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
Dan Williamscc9203b2011-05-08 17:34:44 -07001435
1436 isci_timer_stop(scic->timeout_timer);
1437}
1438
1439#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1440#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1441#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1442#define INTERRUPT_COALESCE_NUMBER_MAX 256
1443#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1444#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1445
1446/**
1447 * scic_controller_set_interrupt_coalescence() - This method allows the user to
1448 * configure the interrupt coalescence.
1449 * @controller: This parameter represents the handle to the controller object
1450 * for which its interrupt coalesce register is overridden.
1451 * @coalesce_number: Used to control the number of entries in the Completion
1452 * Queue before an interrupt is generated. If the number of entries exceed
1453 * this number, an interrupt will be generated. The valid range of the input
1454 * is [0, 256]. A setting of 0 results in coalescing being disabled.
1455 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1456 * input is [0, 2700000] . A setting of 0 is allowed and results in no
1457 * interrupt coalescing timeout.
1458 *
1459 * Indicate if the user successfully set the interrupt coalesce parameters.
1460 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1461 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1462 */
1463static enum sci_status scic_controller_set_interrupt_coalescence(
1464 struct scic_sds_controller *scic_controller,
1465 u32 coalesce_number,
1466 u32 coalesce_timeout)
1467{
1468 u8 timeout_encode = 0;
1469 u32 min = 0;
1470 u32 max = 0;
1471
1472 /* Check if the input parameters fall in the range. */
1473 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1474 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1475
1476 /*
1477 * Defined encoding for interrupt coalescing timeout:
1478 * Value Min Max Units
1479 * ----- --- --- -----
1480 * 0 - - Disabled
1481 * 1 13.3 20.0 ns
1482 * 2 26.7 40.0
1483 * 3 53.3 80.0
1484 * 4 106.7 160.0
1485 * 5 213.3 320.0
1486 * 6 426.7 640.0
1487 * 7 853.3 1280.0
1488 * 8 1.7 2.6 us
1489 * 9 3.4 5.1
1490 * 10 6.8 10.2
1491 * 11 13.7 20.5
1492 * 12 27.3 41.0
1493 * 13 54.6 81.9
1494 * 14 109.2 163.8
1495 * 15 218.5 327.7
1496 * 16 436.9 655.4
1497 * 17 873.8 1310.7
1498 * 18 1.7 2.6 ms
1499 * 19 3.5 5.2
1500 * 20 7.0 10.5
1501 * 21 14.0 21.0
1502 * 22 28.0 41.9
1503 * 23 55.9 83.9
1504 * 24 111.8 167.8
1505 * 25 223.7 335.5
1506 * 26 447.4 671.1
1507 * 27 894.8 1342.2
1508 * 28 1.8 2.7 s
1509 * Others Undefined */
1510
1511 /*
1512 * Use the table above to decide the encode of interrupt coalescing timeout
1513 * value for register writing. */
1514 if (coalesce_timeout == 0)
1515 timeout_encode = 0;
1516 else{
1517 /* make the timeout value in unit of (10 ns). */
1518 coalesce_timeout = coalesce_timeout * 100;
1519 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1520 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1521
1522 /* get the encode of timeout for register writing. */
1523 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1524 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1525 timeout_encode++) {
1526 if (min <= coalesce_timeout && max > coalesce_timeout)
1527 break;
1528 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1529 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1530 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1531 break;
1532 else{
1533 timeout_encode++;
1534 break;
1535 }
1536 } else {
1537 max = max * 2;
1538 min = min * 2;
1539 }
1540 }
1541
1542 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1543 /* the value is out of range. */
1544 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1545 }
1546
1547 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1548 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1549 &scic_controller->smu_registers->interrupt_coalesce_control);
1550
1551
1552 scic_controller->interrupt_coalesce_number = (u16)coalesce_number;
1553 scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100;
1554
1555 return SCI_SUCCESS;
1556}
1557
1558
Dan Williams9269e0e2011-05-12 07:42:17 -07001559static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001560{
Dan Williams9269e0e2011-05-12 07:42:17 -07001561 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
Dan Williamscc9203b2011-05-08 17:34:44 -07001562
1563 /* set the default interrupt coalescence number and timeout value. */
1564 scic_controller_set_interrupt_coalescence(scic, 0x10, 250);
1565}
1566
Dan Williams9269e0e2011-05-12 07:42:17 -07001567static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001568{
Dan Williams9269e0e2011-05-12 07:42:17 -07001569 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
Dan Williamscc9203b2011-05-08 17:34:44 -07001570
1571 /* disable interrupt coalescence. */
1572 scic_controller_set_interrupt_coalescence(scic, 0, 0);
1573}
1574
1575static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic)
1576{
1577 u32 index;
1578 enum sci_status status;
1579 enum sci_status phy_status;
1580 struct isci_host *ihost = scic_to_ihost(scic);
1581
1582 status = SCI_SUCCESS;
1583
1584 for (index = 0; index < SCI_MAX_PHYS; index++) {
1585 phy_status = scic_sds_phy_stop(&ihost->phys[index].sci);
1586
1587 if (phy_status != SCI_SUCCESS &&
1588 phy_status != SCI_FAILURE_INVALID_STATE) {
1589 status = SCI_FAILURE;
1590
1591 dev_warn(scic_to_dev(scic),
1592 "%s: Controller stop operation failed to stop "
1593 "phy %d because of status %d.\n",
1594 __func__,
1595 ihost->phys[index].sci.phy_index, phy_status);
1596 }
1597 }
1598
1599 return status;
1600}
1601
1602static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic)
1603{
1604 u32 index;
1605 enum sci_status port_status;
1606 enum sci_status status = SCI_SUCCESS;
1607 struct isci_host *ihost = scic_to_ihost(scic);
1608
1609 for (index = 0; index < scic->logical_port_entries; index++) {
1610 struct scic_sds_port *sci_port = &ihost->ports[index].sci;
Dan Williamscc9203b2011-05-08 17:34:44 -07001611
Piotr Sawicki8bc80d32011-05-11 23:52:31 +00001612 port_status = scic_sds_port_stop(sci_port);
Dan Williamscc9203b2011-05-08 17:34:44 -07001613
1614 if ((port_status != SCI_SUCCESS) &&
1615 (port_status != SCI_FAILURE_INVALID_STATE)) {
1616 status = SCI_FAILURE;
1617
1618 dev_warn(scic_to_dev(scic),
1619 "%s: Controller stop operation failed to "
1620 "stop port %d because of status %d.\n",
1621 __func__,
1622 sci_port->logical_port_index,
1623 port_status);
1624 }
1625 }
1626
1627 return status;
1628}
1629
1630static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic)
1631{
1632 u32 index;
1633 enum sci_status status;
1634 enum sci_status device_status;
1635
1636 status = SCI_SUCCESS;
1637
1638 for (index = 0; index < scic->remote_node_entries; index++) {
1639 if (scic->device_table[index] != NULL) {
1640 /* / @todo What timeout value do we want to provide to this request? */
1641 device_status = scic_remote_device_stop(scic->device_table[index], 0);
1642
1643 if ((device_status != SCI_SUCCESS) &&
1644 (device_status != SCI_FAILURE_INVALID_STATE)) {
1645 dev_warn(scic_to_dev(scic),
1646 "%s: Controller stop operation failed "
1647 "to stop device 0x%p because of "
1648 "status %d.\n",
1649 __func__,
1650 scic->device_table[index], device_status);
1651 }
1652 }
1653 }
1654
1655 return status;
1656}
1657
Dan Williams9269e0e2011-05-12 07:42:17 -07001658static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001659{
Dan Williams9269e0e2011-05-12 07:42:17 -07001660 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
Dan Williamscc9203b2011-05-08 17:34:44 -07001661
1662 /* Stop all of the components for this controller */
1663 scic_sds_controller_stop_phys(scic);
1664 scic_sds_controller_stop_ports(scic);
1665 scic_sds_controller_stop_devices(scic);
1666}
1667
Dan Williams9269e0e2011-05-12 07:42:17 -07001668static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001669{
Dan Williams9269e0e2011-05-12 07:42:17 -07001670 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
Dan Williamscc9203b2011-05-08 17:34:44 -07001671
1672 isci_timer_stop(scic->timeout_timer);
1673}
1674
1675
1676/**
1677 * scic_sds_controller_reset_hardware() -
1678 *
1679 * This method will reset the controller hardware.
1680 */
1681static void scic_sds_controller_reset_hardware(struct scic_sds_controller *scic)
1682{
1683 /* Disable interrupts so we dont take any spurious interrupts */
1684 scic_controller_disable_interrupts(scic);
1685
1686 /* Reset the SCU */
1687 writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control);
1688
1689 /* Delay for 1ms to before clearing the CQP and UFQPR. */
1690 udelay(1000);
1691
1692 /* The write to the CQGR clears the CQP */
1693 writel(0x00000000, &scic->smu_registers->completion_queue_get);
1694
1695 /* The write to the UFQGP clears the UFQPR */
1696 writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
1697}
1698
Dan Williams9269e0e2011-05-12 07:42:17 -07001699static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm)
Dan Williamscc9203b2011-05-08 17:34:44 -07001700{
Dan Williams9269e0e2011-05-12 07:42:17 -07001701 struct scic_sds_controller *scic = container_of(sm, typeof(*scic), state_machine);
Dan Williamscc9203b2011-05-08 17:34:44 -07001702
1703 scic_sds_controller_reset_hardware(scic);
1704 sci_base_state_machine_change_state(&scic->state_machine,
1705 SCI_BASE_CONTROLLER_STATE_RESET);
1706}
1707
1708static const struct sci_base_state scic_sds_controller_state_table[] = {
1709 [SCI_BASE_CONTROLLER_STATE_INITIAL] = {
1710 .enter_state = scic_sds_controller_initial_state_enter,
1711 },
1712 [SCI_BASE_CONTROLLER_STATE_RESET] = {},
1713 [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {},
1714 [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {},
1715 [SCI_BASE_CONTROLLER_STATE_STARTING] = {
1716 .exit_state = scic_sds_controller_starting_state_exit,
1717 },
1718 [SCI_BASE_CONTROLLER_STATE_READY] = {
1719 .enter_state = scic_sds_controller_ready_state_enter,
1720 .exit_state = scic_sds_controller_ready_state_exit,
1721 },
1722 [SCI_BASE_CONTROLLER_STATE_RESETTING] = {
1723 .enter_state = scic_sds_controller_resetting_state_enter,
1724 },
1725 [SCI_BASE_CONTROLLER_STATE_STOPPING] = {
1726 .enter_state = scic_sds_controller_stopping_state_enter,
1727 .exit_state = scic_sds_controller_stopping_state_exit,
1728 },
1729 [SCI_BASE_CONTROLLER_STATE_STOPPED] = {},
1730 [SCI_BASE_CONTROLLER_STATE_FAILED] = {}
1731};
1732
1733static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic)
1734{
1735 /* these defaults are overridden by the platform / firmware */
1736 struct isci_host *ihost = scic_to_ihost(scic);
1737 u16 index;
1738
1739 /* Default to APC mode. */
1740 scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1741
1742 /* Default to APC mode. */
1743 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1;
1744
1745 /* Default to no SSC operation. */
1746 scic->oem_parameters.sds1.controller.do_enable_ssc = false;
1747
1748 /* Initialize all of the port parameter information to narrow ports. */
1749 for (index = 0; index < SCI_MAX_PORTS; index++) {
1750 scic->oem_parameters.sds1.ports[index].phy_mask = 0;
1751 }
1752
1753 /* Initialize all of the phy parameter information. */
1754 for (index = 0; index < SCI_MAX_PHYS; index++) {
1755 /* Default to 6G (i.e. Gen 3) for now. */
1756 scic->user_parameters.sds1.phys[index].max_speed_generation = 3;
1757
1758 /* the frequencies cannot be 0 */
1759 scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f;
1760 scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff;
1761 scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1762
1763 /*
1764 * Previous Vitesse based expanders had a arbitration issue that
1765 * is worked around by having the upper 32-bits of SAS address
1766 * with a value greater then the Vitesse company identifier.
1767 * Hence, usage of 0x5FCFFFFF. */
1768 scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id;
1769 scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF;
1770 }
1771
1772 scic->user_parameters.sds1.stp_inactivity_timeout = 5;
1773 scic->user_parameters.sds1.ssp_inactivity_timeout = 5;
1774 scic->user_parameters.sds1.stp_max_occupancy_timeout = 5;
1775 scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20;
1776 scic->user_parameters.sds1.no_outbound_task_timeout = 20;
1777}
1778
1779
1780
1781/**
1782 * scic_controller_construct() - This method will attempt to construct a
1783 * controller object utilizing the supplied parameter information.
1784 * @c: This parameter specifies the controller to be constructed.
1785 * @scu_base: mapped base address of the scu registers
1786 * @smu_base: mapped base address of the smu registers
1787 *
1788 * Indicate if the controller was successfully constructed or if it failed in
1789 * some way. SCI_SUCCESS This value is returned if the controller was
1790 * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
1791 * if the interrupt coalescence timer may cause SAS compliance issues for SMP
1792 * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
1793 * This value is returned if the controller does not support the supplied type.
1794 * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
1795 * controller does not support the supplied initialization data version.
1796 */
1797static enum sci_status scic_controller_construct(struct scic_sds_controller *scic,
1798 void __iomem *scu_base,
1799 void __iomem *smu_base)
1800{
1801 struct isci_host *ihost = scic_to_ihost(scic);
1802 u8 i;
1803
1804 sci_base_state_machine_construct(&scic->state_machine,
Dan Williams9269e0e2011-05-12 07:42:17 -07001805 scic_sds_controller_state_table,
1806 SCI_BASE_CONTROLLER_STATE_INITIAL);
Dan Williamscc9203b2011-05-08 17:34:44 -07001807
1808 sci_base_state_machine_start(&scic->state_machine);
1809
1810 scic->scu_registers = scu_base;
1811 scic->smu_registers = smu_base;
1812
1813 scic_sds_port_configuration_agent_construct(&scic->port_agent);
1814
1815 /* Construct the ports for this controller */
1816 for (i = 0; i < SCI_MAX_PORTS; i++)
1817 scic_sds_port_construct(&ihost->ports[i].sci, i, scic);
1818 scic_sds_port_construct(&ihost->ports[i].sci, SCIC_SDS_DUMMY_PORT, scic);
1819
1820 /* Construct the phys for this controller */
1821 for (i = 0; i < SCI_MAX_PHYS; i++) {
1822 /* Add all the PHYs to the dummy port */
1823 scic_sds_phy_construct(&ihost->phys[i].sci,
1824 &ihost->ports[SCI_MAX_PORTS].sci, i);
1825 }
1826
1827 scic->invalid_phy_mask = 0;
1828
1829 /* Set the default maximum values */
1830 scic->completion_event_entries = SCU_EVENT_COUNT;
1831 scic->completion_queue_entries = SCU_COMPLETION_QUEUE_COUNT;
1832 scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
1833 scic->logical_port_entries = SCI_MAX_PORTS;
1834 scic->task_context_entries = SCU_IO_REQUEST_COUNT;
1835 scic->uf_control.buffers.count = SCU_UNSOLICITED_FRAME_COUNT;
1836 scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT;
1837
1838 /* Initialize the User and OEM parameters to default values. */
1839 scic_sds_controller_set_default_config_parameters(scic);
1840
1841 return scic_controller_reset(scic);
1842}
1843
1844int scic_oem_parameters_validate(struct scic_sds_oem_params *oem)
1845{
1846 int i;
1847
1848 for (i = 0; i < SCI_MAX_PORTS; i++)
1849 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1850 return -EINVAL;
1851
1852 for (i = 0; i < SCI_MAX_PHYS; i++)
1853 if (oem->phys[i].sas_address.high == 0 &&
1854 oem->phys[i].sas_address.low == 0)
1855 return -EINVAL;
1856
1857 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1858 for (i = 0; i < SCI_MAX_PHYS; i++)
1859 if (oem->ports[i].phy_mask != 0)
1860 return -EINVAL;
1861 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1862 u8 phy_mask = 0;
1863
1864 for (i = 0; i < SCI_MAX_PHYS; i++)
1865 phy_mask |= oem->ports[i].phy_mask;
1866
1867 if (phy_mask == 0)
1868 return -EINVAL;
1869 } else
1870 return -EINVAL;
1871
1872 if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
1873 return -EINVAL;
1874
1875 return 0;
1876}
1877
1878static enum sci_status scic_oem_parameters_set(struct scic_sds_controller *scic,
1879 union scic_oem_parameters *scic_parms)
1880{
1881 u32 state = scic->state_machine.current_state_id;
1882
1883 if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
1884 state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
1885 state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
1886
1887 if (scic_oem_parameters_validate(&scic_parms->sds1))
1888 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1889 scic->oem_parameters.sds1 = scic_parms->sds1;
1890
1891 return SCI_SUCCESS;
1892 }
1893
1894 return SCI_FAILURE_INVALID_STATE;
1895}
1896
1897void scic_oem_parameters_get(
1898 struct scic_sds_controller *scic,
1899 union scic_oem_parameters *scic_parms)
1900{
1901 memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms));
1902}
1903
1904static void scic_sds_controller_timeout_handler(void *_scic)
1905{
1906 struct scic_sds_controller *scic = _scic;
1907 struct isci_host *ihost = scic_to_ihost(scic);
1908 struct sci_base_state_machine *sm = &scic->state_machine;
1909
1910 if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STARTING)
1911 scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT);
1912 else if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STOPPING) {
1913 sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_FAILED);
1914 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
1915 } else /* / @todo Now what do we want to do in this case? */
1916 dev_err(scic_to_dev(scic),
1917 "%s: Controller timer fired when controller was not "
1918 "in a state being timed.\n",
1919 __func__);
1920}
1921
1922static enum sci_status scic_sds_controller_initialize_phy_startup(struct scic_sds_controller *scic)
1923{
1924 struct isci_host *ihost = scic_to_ihost(scic);
1925
1926 scic->phy_startup_timer = isci_timer_create(ihost,
1927 scic,
1928 scic_sds_controller_phy_startup_timeout_handler);
1929
1930 if (scic->phy_startup_timer == NULL)
1931 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
1932 else {
1933 scic->next_phy_to_start = 0;
1934 scic->phy_startup_timer_pending = false;
1935 }
1936
1937 return SCI_SUCCESS;
1938}
1939
Edmund Nadolski04736612011-05-19 20:17:47 -07001940static void power_control_timeout(unsigned long data)
Dan Williamscc9203b2011-05-08 17:34:44 -07001941{
Edmund Nadolski04736612011-05-19 20:17:47 -07001942 struct sci_timer *tmr = (struct sci_timer *)data;
1943 struct scic_sds_controller *scic = container_of(tmr, typeof(*scic), power_control.timer);
1944 struct isci_host *ihost = scic_to_ihost(scic);
1945 struct scic_sds_phy *sci_phy;
1946 unsigned long flags;
1947 u8 i;
Dan Williamscc9203b2011-05-08 17:34:44 -07001948
Edmund Nadolski04736612011-05-19 20:17:47 -07001949 spin_lock_irqsave(&ihost->scic_lock, flags);
Dan Williamscc9203b2011-05-08 17:34:44 -07001950
Edmund Nadolski04736612011-05-19 20:17:47 -07001951 if (tmr->cancel)
1952 goto done;
Dan Williamscc9203b2011-05-08 17:34:44 -07001953
1954 scic->power_control.phys_granted_power = 0;
1955
1956 if (scic->power_control.phys_waiting == 0) {
1957 scic->power_control.timer_started = false;
Edmund Nadolski04736612011-05-19 20:17:47 -07001958 goto done;
Dan Williamscc9203b2011-05-08 17:34:44 -07001959 }
Edmund Nadolski04736612011-05-19 20:17:47 -07001960
1961 for (i = 0; i < SCI_MAX_PHYS; i++) {
1962
1963 if (scic->power_control.phys_waiting == 0)
1964 break;
1965
1966 sci_phy = scic->power_control.requesters[i];
1967 if (sci_phy == NULL)
1968 continue;
1969
1970 if (scic->power_control.phys_granted_power >=
1971 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up)
1972 break;
1973
1974 scic->power_control.requesters[i] = NULL;
1975 scic->power_control.phys_waiting--;
1976 scic->power_control.phys_granted_power++;
1977 scic_sds_phy_consume_power_handler(sci_phy);
1978 }
1979
1980 /*
1981 * It doesn't matter if the power list is empty, we need to start the
1982 * timer in case another phy becomes ready.
1983 */
1984 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1985 scic->power_control.timer_started = true;
1986
1987done:
1988 spin_unlock_irqrestore(&ihost->scic_lock, flags);
Dan Williamscc9203b2011-05-08 17:34:44 -07001989}
1990
1991/**
1992 * This method inserts the phy in the stagger spinup control queue.
1993 * @scic:
1994 *
1995 *
1996 */
1997void scic_sds_controller_power_control_queue_insert(
1998 struct scic_sds_controller *scic,
1999 struct scic_sds_phy *sci_phy)
2000{
2001 BUG_ON(sci_phy == NULL);
2002
2003 if (scic->power_control.phys_granted_power <
2004 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
2005 scic->power_control.phys_granted_power++;
2006 scic_sds_phy_consume_power_handler(sci_phy);
2007
2008 /*
2009 * stop and start the power_control timer. When the timer fires, the
2010 * no_of_phys_granted_power will be set to 0
2011 */
Edmund Nadolski04736612011-05-19 20:17:47 -07002012 if (scic->power_control.timer_started)
2013 sci_del_timer(&scic->power_control.timer);
2014
2015 sci_mod_timer(&scic->power_control.timer,
2016 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
2017 scic->power_control.timer_started = true;
2018
Dan Williamscc9203b2011-05-08 17:34:44 -07002019 } else {
2020 /* Add the phy in the waiting list */
2021 scic->power_control.requesters[sci_phy->phy_index] = sci_phy;
2022 scic->power_control.phys_waiting++;
2023 }
2024}
2025
2026/**
2027 * This method removes the phy from the stagger spinup control queue.
2028 * @scic:
2029 *
2030 *
2031 */
2032void scic_sds_controller_power_control_queue_remove(
2033 struct scic_sds_controller *scic,
2034 struct scic_sds_phy *sci_phy)
2035{
2036 BUG_ON(sci_phy == NULL);
2037
2038 if (scic->power_control.requesters[sci_phy->phy_index] != NULL) {
2039 scic->power_control.phys_waiting--;
2040 }
2041
2042 scic->power_control.requesters[sci_phy->phy_index] = NULL;
2043}
2044
2045#define AFE_REGISTER_WRITE_DELAY 10
2046
2047/* Initialize the AFE for this phy index. We need to read the AFE setup from
2048 * the OEM parameters
2049 */
2050static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic)
2051{
2052 const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
2053 u32 afe_status;
2054 u32 phy_id;
2055
2056 /* Clear DFX Status registers */
2057 writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0);
2058 udelay(AFE_REGISTER_WRITE_DELAY);
2059
2060 if (is_b0()) {
2061 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
2062 * Timer, PM Stagger Timer */
2063 writel(0x0007BFFF, &scic->scu_registers->afe.afe_pmsn_master_control2);
2064 udelay(AFE_REGISTER_WRITE_DELAY);
2065 }
2066
2067 /* Configure bias currents to normal */
2068 if (is_a0())
2069 writel(0x00005500, &scic->scu_registers->afe.afe_bias_control);
2070 else if (is_a2())
2071 writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control);
2072 else if (is_b0())
2073 writel(0x00005F00, &scic->scu_registers->afe.afe_bias_control);
2074
2075 udelay(AFE_REGISTER_WRITE_DELAY);
2076
2077 /* Enable PLL */
2078 if (is_b0())
2079 writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0);
2080 else
2081 writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0);
2082
2083 udelay(AFE_REGISTER_WRITE_DELAY);
2084
2085 /* Wait for the PLL to lock */
2086 do {
2087 afe_status = readl(&scic->scu_registers->afe.afe_common_block_status);
2088 udelay(AFE_REGISTER_WRITE_DELAY);
2089 } while ((afe_status & 0x00001000) == 0);
2090
2091 if (is_a0() || is_a2()) {
2092 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
2093 writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0);
2094 udelay(AFE_REGISTER_WRITE_DELAY);
2095 }
2096
2097 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
2098 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
2099
2100 if (is_b0()) {
2101 /* Configure transmitter SSC parameters */
2102 writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
2103 udelay(AFE_REGISTER_WRITE_DELAY);
2104 } else {
2105 /*
2106 * All defaults, except the Receive Word Alignament/Comma Detect
2107 * Enable....(0xe800) */
2108 writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2109 udelay(AFE_REGISTER_WRITE_DELAY);
2110
2111 writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
2112 udelay(AFE_REGISTER_WRITE_DELAY);
2113 }
2114
2115 /*
2116 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2117 * & increase TX int & ext bias 20%....(0xe85c) */
2118 if (is_a0())
2119 writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2120 else if (is_a2())
2121 writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2122 else {
2123 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
2124 writel(0x000003d7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2125 udelay(AFE_REGISTER_WRITE_DELAY);
2126
2127 /*
2128 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
2129 * & increase TX int & ext bias 20%....(0xe85c) */
2130 writel(0x000003d4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
2131 }
2132 udelay(AFE_REGISTER_WRITE_DELAY);
2133
2134 if (is_a0() || is_a2()) {
2135 /* Enable TX equalization (0xe824) */
2136 writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2137 udelay(AFE_REGISTER_WRITE_DELAY);
2138 }
2139
2140 /*
2141 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
2142 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
2143 writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
2144 udelay(AFE_REGISTER_WRITE_DELAY);
2145
2146 /* Leave DFE/FFE on */
2147 if (is_a0())
2148 writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2149 else if (is_a2())
2150 writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2151 else {
2152 writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
2153 udelay(AFE_REGISTER_WRITE_DELAY);
2154 /* Enable TX equalization (0xe824) */
2155 writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
2156 }
2157 udelay(AFE_REGISTER_WRITE_DELAY);
2158
2159 writel(oem_phy->afe_tx_amp_control0,
2160 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
2161 udelay(AFE_REGISTER_WRITE_DELAY);
2162
2163 writel(oem_phy->afe_tx_amp_control1,
2164 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
2165 udelay(AFE_REGISTER_WRITE_DELAY);
2166
2167 writel(oem_phy->afe_tx_amp_control2,
2168 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
2169 udelay(AFE_REGISTER_WRITE_DELAY);
2170
2171 writel(oem_phy->afe_tx_amp_control3,
2172 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
2173 udelay(AFE_REGISTER_WRITE_DELAY);
2174 }
2175
2176 /* Transfer control to the PEs */
2177 writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0);
2178 udelay(AFE_REGISTER_WRITE_DELAY);
2179}
2180
2181static enum sci_status scic_controller_set_mode(struct scic_sds_controller *scic,
2182 enum sci_controller_mode operating_mode)
2183{
2184 enum sci_status status = SCI_SUCCESS;
2185
2186 if ((scic->state_machine.current_state_id ==
2187 SCI_BASE_CONTROLLER_STATE_INITIALIZING) ||
2188 (scic->state_machine.current_state_id ==
2189 SCI_BASE_CONTROLLER_STATE_INITIALIZED)) {
2190 switch (operating_mode) {
2191 case SCI_MODE_SPEED:
2192 scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
2193 scic->task_context_entries = SCU_IO_REQUEST_COUNT;
2194 scic->uf_control.buffers.count =
2195 SCU_UNSOLICITED_FRAME_COUNT;
2196 scic->completion_event_entries = SCU_EVENT_COUNT;
2197 scic->completion_queue_entries =
2198 SCU_COMPLETION_QUEUE_COUNT;
2199 break;
2200
2201 case SCI_MODE_SIZE:
2202 scic->remote_node_entries = SCI_MIN_REMOTE_DEVICES;
2203 scic->task_context_entries = SCI_MIN_IO_REQUESTS;
2204 scic->uf_control.buffers.count =
2205 SCU_MIN_UNSOLICITED_FRAMES;
2206 scic->completion_event_entries = SCU_MIN_EVENTS;
2207 scic->completion_queue_entries =
2208 SCU_MIN_COMPLETION_QUEUE_ENTRIES;
2209 break;
2210
2211 default:
2212 status = SCI_FAILURE_INVALID_PARAMETER_VALUE;
2213 break;
2214 }
2215 } else
2216 status = SCI_FAILURE_INVALID_STATE;
2217
2218 return status;
2219}
2220
2221static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic)
2222{
Edmund Nadolski04736612011-05-19 20:17:47 -07002223 sci_init_timer(&scic->power_control.timer, power_control_timeout);
Dan Williamscc9203b2011-05-08 17:34:44 -07002224
2225 memset(scic->power_control.requesters, 0,
2226 sizeof(scic->power_control.requesters));
2227
2228 scic->power_control.phys_waiting = 0;
2229 scic->power_control.phys_granted_power = 0;
2230}
2231
2232static enum sci_status scic_controller_initialize(struct scic_sds_controller *scic)
2233{
2234 struct sci_base_state_machine *sm = &scic->state_machine;
2235 enum sci_status result = SCI_SUCCESS;
2236 struct isci_host *ihost = scic_to_ihost(scic);
2237 u32 index, state;
2238
2239 if (scic->state_machine.current_state_id !=
2240 SCI_BASE_CONTROLLER_STATE_RESET) {
2241 dev_warn(scic_to_dev(scic),
2242 "SCIC Controller initialize operation requested "
2243 "in invalid state\n");
2244 return SCI_FAILURE_INVALID_STATE;
2245 }
2246
2247 sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_INITIALIZING);
2248
2249 scic->timeout_timer = isci_timer_create(ihost, scic,
2250 scic_sds_controller_timeout_handler);
2251
2252 scic_sds_controller_initialize_phy_startup(scic);
2253
2254 scic_sds_controller_initialize_power_control(scic);
2255
2256 /*
2257 * There is nothing to do here for B0 since we do not have to
2258 * program the AFE registers.
2259 * / @todo The AFE settings are supposed to be correct for the B0 but
2260 * / presently they seem to be wrong. */
2261 scic_sds_controller_afe_initialization(scic);
2262
2263 if (result == SCI_SUCCESS) {
2264 u32 status;
2265 u32 terminate_loop;
2266
2267 /* Take the hardware out of reset */
2268 writel(0, &scic->smu_registers->soft_reset_control);
2269
2270 /*
2271 * / @todo Provide meaningfull error code for hardware failure
2272 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2273 result = SCI_FAILURE;
2274 terminate_loop = 100;
2275
2276 while (terminate_loop-- && (result != SCI_SUCCESS)) {
2277 /* Loop until the hardware reports success */
2278 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2279 status = readl(&scic->smu_registers->control_status);
2280
2281 if ((status & SCU_RAM_INIT_COMPLETED) ==
2282 SCU_RAM_INIT_COMPLETED)
2283 result = SCI_SUCCESS;
2284 }
2285 }
2286
2287 if (result == SCI_SUCCESS) {
2288 u32 max_supported_ports;
2289 u32 max_supported_devices;
2290 u32 max_supported_io_requests;
2291 u32 device_context_capacity;
2292
2293 /*
2294 * Determine what are the actaul device capacities that the
2295 * hardware will support */
2296 device_context_capacity =
2297 readl(&scic->smu_registers->device_context_capacity);
2298
2299
2300 max_supported_ports = smu_dcc_get_max_ports(device_context_capacity);
2301 max_supported_devices = smu_dcc_get_max_remote_node_context(device_context_capacity);
2302 max_supported_io_requests = smu_dcc_get_max_task_context(device_context_capacity);
2303
2304 /*
2305 * Make all PEs that are unassigned match up with the
2306 * logical ports
2307 */
2308 for (index = 0; index < max_supported_ports; index++) {
2309 struct scu_port_task_scheduler_group_registers __iomem
2310 *ptsg = &scic->scu_registers->peg0.ptsg;
2311
2312 writel(index, &ptsg->protocol_engine[index]);
2313 }
2314
2315 /* Record the smaller of the two capacity values */
2316 scic->logical_port_entries =
2317 min(max_supported_ports, scic->logical_port_entries);
2318
2319 scic->task_context_entries =
2320 min(max_supported_io_requests,
2321 scic->task_context_entries);
2322
2323 scic->remote_node_entries =
2324 min(max_supported_devices, scic->remote_node_entries);
2325
2326 /*
2327 * Now that we have the correct hardware reported minimum values
2328 * build the MDL for the controller. Default to a performance
2329 * configuration.
2330 */
2331 scic_controller_set_mode(scic, SCI_MODE_SPEED);
2332 }
2333
2334 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2335 if (result == SCI_SUCCESS) {
2336 u32 dma_configuration;
2337
2338 /* Configure the payload DMA */
2339 dma_configuration =
2340 readl(&scic->scu_registers->sdma.pdma_configuration);
2341 dma_configuration |=
2342 SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2343 writel(dma_configuration,
2344 &scic->scu_registers->sdma.pdma_configuration);
2345
2346 /* Configure the control DMA */
2347 dma_configuration =
2348 readl(&scic->scu_registers->sdma.cdma_configuration);
2349 dma_configuration |=
2350 SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2351 writel(dma_configuration,
2352 &scic->scu_registers->sdma.cdma_configuration);
2353 }
2354
2355 /*
2356 * Initialize the PHYs before the PORTs because the PHY registers
2357 * are accessed during the port initialization.
2358 */
2359 if (result == SCI_SUCCESS) {
2360 /* Initialize the phys */
2361 for (index = 0;
2362 (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS);
2363 index++) {
2364 result = scic_sds_phy_initialize(
2365 &ihost->phys[index].sci,
2366 &scic->scu_registers->peg0.pe[index].tl,
2367 &scic->scu_registers->peg0.pe[index].ll);
2368 }
2369 }
2370
2371 if (result == SCI_SUCCESS) {
2372 /* Initialize the logical ports */
2373 for (index = 0;
2374 (index < scic->logical_port_entries) &&
2375 (result == SCI_SUCCESS);
2376 index++) {
2377 result = scic_sds_port_initialize(
2378 &ihost->ports[index].sci,
2379 &scic->scu_registers->peg0.ptsg.port[index],
2380 &scic->scu_registers->peg0.ptsg.protocol_engine,
2381 &scic->scu_registers->peg0.viit[index]);
2382 }
2383 }
2384
2385 if (result == SCI_SUCCESS)
2386 result = scic_sds_port_configuration_agent_initialize(
2387 scic,
2388 &scic->port_agent);
2389
2390 /* Advance the controller state machine */
2391 if (result == SCI_SUCCESS)
2392 state = SCI_BASE_CONTROLLER_STATE_INITIALIZED;
2393 else
2394 state = SCI_BASE_CONTROLLER_STATE_FAILED;
2395 sci_base_state_machine_change_state(sm, state);
2396
2397 return result;
2398}
2399
2400static enum sci_status scic_user_parameters_set(
2401 struct scic_sds_controller *scic,
2402 union scic_user_parameters *scic_parms)
2403{
2404 u32 state = scic->state_machine.current_state_id;
2405
2406 if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
2407 state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
2408 state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
2409 u16 index;
2410
2411 /*
2412 * Validate the user parameters. If they are not legal, then
2413 * return a failure.
2414 */
2415 for (index = 0; index < SCI_MAX_PHYS; index++) {
2416 struct sci_phy_user_params *user_phy;
2417
2418 user_phy = &scic_parms->sds1.phys[index];
2419
2420 if (!((user_phy->max_speed_generation <=
2421 SCIC_SDS_PARM_MAX_SPEED) &&
2422 (user_phy->max_speed_generation >
2423 SCIC_SDS_PARM_NO_SPEED)))
2424 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2425
2426 if (user_phy->in_connection_align_insertion_frequency <
2427 3)
2428 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2429
2430 if ((user_phy->in_connection_align_insertion_frequency <
2431 3) ||
2432 (user_phy->align_insertion_frequency == 0) ||
2433 (user_phy->
2434 notify_enable_spin_up_insertion_frequency ==
2435 0))
2436 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2437 }
2438
2439 if ((scic_parms->sds1.stp_inactivity_timeout == 0) ||
2440 (scic_parms->sds1.ssp_inactivity_timeout == 0) ||
2441 (scic_parms->sds1.stp_max_occupancy_timeout == 0) ||
2442 (scic_parms->sds1.ssp_max_occupancy_timeout == 0) ||
2443 (scic_parms->sds1.no_outbound_task_timeout == 0))
2444 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2445
2446 memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms));
2447
2448 return SCI_SUCCESS;
2449 }
2450
2451 return SCI_FAILURE_INVALID_STATE;
2452}
2453
2454static int scic_controller_mem_init(struct scic_sds_controller *scic)
2455{
2456 struct device *dev = scic_to_dev(scic);
2457 dma_addr_t dma_handle;
2458 enum sci_status result;
2459
2460 scic->completion_queue = dmam_alloc_coherent(dev,
2461 scic->completion_queue_entries * sizeof(u32),
2462 &dma_handle, GFP_KERNEL);
2463 if (!scic->completion_queue)
2464 return -ENOMEM;
2465
2466 writel(lower_32_bits(dma_handle),
2467 &scic->smu_registers->completion_queue_lower);
2468 writel(upper_32_bits(dma_handle),
2469 &scic->smu_registers->completion_queue_upper);
2470
2471 scic->remote_node_context_table = dmam_alloc_coherent(dev,
2472 scic->remote_node_entries *
2473 sizeof(union scu_remote_node_context),
2474 &dma_handle, GFP_KERNEL);
2475 if (!scic->remote_node_context_table)
2476 return -ENOMEM;
2477
2478 writel(lower_32_bits(dma_handle),
2479 &scic->smu_registers->remote_node_context_lower);
2480 writel(upper_32_bits(dma_handle),
2481 &scic->smu_registers->remote_node_context_upper);
2482
2483 scic->task_context_table = dmam_alloc_coherent(dev,
2484 scic->task_context_entries *
2485 sizeof(struct scu_task_context),
2486 &dma_handle, GFP_KERNEL);
2487 if (!scic->task_context_table)
2488 return -ENOMEM;
2489
2490 writel(lower_32_bits(dma_handle),
2491 &scic->smu_registers->host_task_table_lower);
2492 writel(upper_32_bits(dma_handle),
2493 &scic->smu_registers->host_task_table_upper);
2494
2495 result = scic_sds_unsolicited_frame_control_construct(scic);
2496 if (result)
2497 return result;
2498
2499 /*
2500 * Inform the silicon as to the location of the UF headers and
2501 * address table.
2502 */
2503 writel(lower_32_bits(scic->uf_control.headers.physical_address),
2504 &scic->scu_registers->sdma.uf_header_base_address_lower);
2505 writel(upper_32_bits(scic->uf_control.headers.physical_address),
2506 &scic->scu_registers->sdma.uf_header_base_address_upper);
2507
2508 writel(lower_32_bits(scic->uf_control.address_table.physical_address),
2509 &scic->scu_registers->sdma.uf_address_table_lower);
2510 writel(upper_32_bits(scic->uf_control.address_table.physical_address),
2511 &scic->scu_registers->sdma.uf_address_table_upper);
2512
2513 return 0;
2514}
2515
Dan Williams6f231dd2011-07-02 22:56:22 -07002516int isci_host_init(struct isci_host *isci_host)
2517{
Dan Williamsd9c37392011-03-03 17:59:32 -08002518 int err = 0, i;
Dan Williams6f231dd2011-07-02 22:56:22 -07002519 enum sci_status status;
Dan Williams4711ba12011-03-11 10:43:57 -08002520 union scic_oem_parameters oem;
Dan Williams6f231dd2011-07-02 22:56:22 -07002521 union scic_user_parameters scic_user_params;
Dan Williamsd044af12011-03-08 09:52:49 -08002522 struct isci_pci_info *pci_info = to_pci_info(isci_host->pdev);
Dan Williams6f231dd2011-07-02 22:56:22 -07002523
Dan Williams7c40a802011-03-02 11:49:26 -08002524 isci_timer_list_construct(isci_host);
Dan Williams6f231dd2011-07-02 22:56:22 -07002525
Dan Williams6f231dd2011-07-02 22:56:22 -07002526 spin_lock_init(&isci_host->state_lock);
2527 spin_lock_init(&isci_host->scic_lock);
2528 spin_lock_init(&isci_host->queue_lock);
Dan Williams0cf89d12011-02-18 09:25:07 -08002529 init_waitqueue_head(&isci_host->eventq);
Dan Williams6f231dd2011-07-02 22:56:22 -07002530
2531 isci_host_change_state(isci_host, isci_starting);
2532 isci_host->can_queue = ISCI_CAN_QUEUE_VAL;
2533
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002534 status = scic_controller_construct(&isci_host->sci, scu_base(isci_host),
Dan Williams6f231dd2011-07-02 22:56:22 -07002535 smu_base(isci_host));
2536
2537 if (status != SCI_SUCCESS) {
2538 dev_err(&isci_host->pdev->dev,
2539 "%s: scic_controller_construct failed - status = %x\n",
2540 __func__,
2541 status);
Dave Jiang858d4aa2011-02-22 01:27:03 -08002542 return -ENODEV;
Dan Williams6f231dd2011-07-02 22:56:22 -07002543 }
2544
2545 isci_host->sas_ha.dev = &isci_host->pdev->dev;
2546 isci_host->sas_ha.lldd_ha = isci_host;
2547
Dan Williamsd044af12011-03-08 09:52:49 -08002548 /*
2549 * grab initial values stored in the controller object for OEM and USER
2550 * parameters
2551 */
Dave Jiangb5f18a22011-03-16 14:57:23 -07002552 isci_user_parameters_get(isci_host, &scic_user_params);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002553 status = scic_user_parameters_set(&isci_host->sci,
Dan Williamsd044af12011-03-08 09:52:49 -08002554 &scic_user_params);
2555 if (status != SCI_SUCCESS) {
2556 dev_warn(&isci_host->pdev->dev,
2557 "%s: scic_user_parameters_set failed\n",
2558 __func__);
2559 return -ENODEV;
2560 }
Dan Williams6f231dd2011-07-02 22:56:22 -07002561
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002562 scic_oem_parameters_get(&isci_host->sci, &oem);
Dan Williamsd044af12011-03-08 09:52:49 -08002563
2564 /* grab any OEM parameters specified in orom */
2565 if (pci_info->orom) {
Dan Williams4711ba12011-03-11 10:43:57 -08002566 status = isci_parse_oem_parameters(&oem,
Dan Williamsd044af12011-03-08 09:52:49 -08002567 pci_info->orom,
2568 isci_host->id);
Dan Williams6f231dd2011-07-02 22:56:22 -07002569 if (status != SCI_SUCCESS) {
2570 dev_warn(&isci_host->pdev->dev,
2571 "parsing firmware oem parameters failed\n");
Dave Jiang858d4aa2011-02-22 01:27:03 -08002572 return -EINVAL;
Dan Williams6f231dd2011-07-02 22:56:22 -07002573 }
Dan Williams4711ba12011-03-11 10:43:57 -08002574 }
2575
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002576 status = scic_oem_parameters_set(&isci_host->sci, &oem);
Dan Williams4711ba12011-03-11 10:43:57 -08002577 if (status != SCI_SUCCESS) {
2578 dev_warn(&isci_host->pdev->dev,
2579 "%s: scic_oem_parameters_set failed\n",
2580 __func__);
2581 return -ENODEV;
Dan Williams6f231dd2011-07-02 22:56:22 -07002582 }
2583
Dan Williams6f231dd2011-07-02 22:56:22 -07002584 tasklet_init(&isci_host->completion_tasklet,
Dan Williamsc7ef4032011-02-18 09:25:05 -08002585 isci_host_completion_routine, (unsigned long)isci_host);
Dan Williams6f231dd2011-07-02 22:56:22 -07002586
Dan Williams6f231dd2011-07-02 22:56:22 -07002587 INIT_LIST_HEAD(&isci_host->requests_to_complete);
Jeff Skirvin11b00c12011-03-04 14:06:40 -08002588 INIT_LIST_HEAD(&isci_host->requests_to_errorback);
Dan Williams6f231dd2011-07-02 22:56:22 -07002589
Dan Williams7c40a802011-03-02 11:49:26 -08002590 spin_lock_irq(&isci_host->scic_lock);
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002591 status = scic_controller_initialize(&isci_host->sci);
Dan Williams7c40a802011-03-02 11:49:26 -08002592 spin_unlock_irq(&isci_host->scic_lock);
2593 if (status != SCI_SUCCESS) {
2594 dev_warn(&isci_host->pdev->dev,
2595 "%s: scic_controller_initialize failed -"
2596 " status = 0x%x\n",
2597 __func__, status);
2598 return -ENODEV;
2599 }
2600
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002601 err = scic_controller_mem_init(&isci_host->sci);
Dan Williams6f231dd2011-07-02 22:56:22 -07002602 if (err)
Dave Jiang858d4aa2011-02-22 01:27:03 -08002603 return err;
Dan Williams6f231dd2011-07-02 22:56:22 -07002604
Dan Williams6f231dd2011-07-02 22:56:22 -07002605 isci_host->dma_pool = dmam_pool_create(DRV_NAME, &isci_host->pdev->dev,
Dan Williams67ea8382011-05-08 11:47:15 -07002606 sizeof(struct isci_request),
Dan Williams6f231dd2011-07-02 22:56:22 -07002607 SLAB_HWCACHE_ALIGN, 0);
2608
Dave Jiang858d4aa2011-02-22 01:27:03 -08002609 if (!isci_host->dma_pool)
2610 return -ENOMEM;
Dan Williams6f231dd2011-07-02 22:56:22 -07002611
Dan Williamsd9c37392011-03-03 17:59:32 -08002612 for (i = 0; i < SCI_MAX_PORTS; i++)
Dan Williamse5313812011-05-07 10:11:43 -07002613 isci_port_init(&isci_host->ports[i], isci_host, i);
Dan Williams6f231dd2011-07-02 22:56:22 -07002614
Dan Williamsd9c37392011-03-03 17:59:32 -08002615 for (i = 0; i < SCI_MAX_PHYS; i++)
2616 isci_phy_init(&isci_host->phys[i], isci_host, i);
2617
2618 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
Dan Williams57f20f42011-04-21 18:14:45 -07002619 struct isci_remote_device *idev = &isci_host->devices[i];
Dan Williamsd9c37392011-03-03 17:59:32 -08002620
2621 INIT_LIST_HEAD(&idev->reqs_in_process);
2622 INIT_LIST_HEAD(&idev->node);
2623 spin_lock_init(&idev->state_lock);
2624 }
Dan Williams6f231dd2011-07-02 22:56:22 -07002625
Dave Jiang858d4aa2011-02-22 01:27:03 -08002626 return 0;
Dan Williams6f231dd2011-07-02 22:56:22 -07002627}
Dan Williamscc9203b2011-05-08 17:34:44 -07002628
2629void scic_sds_controller_link_up(struct scic_sds_controller *scic,
2630 struct scic_sds_port *port, struct scic_sds_phy *phy)
2631{
2632 switch (scic->state_machine.current_state_id) {
2633 case SCI_BASE_CONTROLLER_STATE_STARTING:
2634 scic_sds_controller_phy_timer_stop(scic);
2635 scic->port_agent.link_up_handler(scic, &scic->port_agent,
2636 port, phy);
2637 scic_sds_controller_start_next_phy(scic);
2638 break;
2639 case SCI_BASE_CONTROLLER_STATE_READY:
2640 scic->port_agent.link_up_handler(scic, &scic->port_agent,
2641 port, phy);
2642 break;
2643 default:
2644 dev_dbg(scic_to_dev(scic),
2645 "%s: SCIC Controller linkup event from phy %d in "
2646 "unexpected state %d\n", __func__, phy->phy_index,
2647 scic->state_machine.current_state_id);
2648 }
2649}
2650
2651void scic_sds_controller_link_down(struct scic_sds_controller *scic,
2652 struct scic_sds_port *port, struct scic_sds_phy *phy)
2653{
2654 switch (scic->state_machine.current_state_id) {
2655 case SCI_BASE_CONTROLLER_STATE_STARTING:
2656 case SCI_BASE_CONTROLLER_STATE_READY:
2657 scic->port_agent.link_down_handler(scic, &scic->port_agent,
2658 port, phy);
2659 break;
2660 default:
2661 dev_dbg(scic_to_dev(scic),
2662 "%s: SCIC Controller linkdown event from phy %d in "
2663 "unexpected state %d\n",
2664 __func__,
2665 phy->phy_index,
2666 scic->state_machine.current_state_id);
2667 }
2668}
2669
2670/**
2671 * This is a helper method to determine if any remote devices on this
2672 * controller are still in the stopping state.
2673 *
2674 */
2675static bool scic_sds_controller_has_remote_devices_stopping(
2676 struct scic_sds_controller *controller)
2677{
2678 u32 index;
2679
2680 for (index = 0; index < controller->remote_node_entries; index++) {
2681 if ((controller->device_table[index] != NULL) &&
2682 (controller->device_table[index]->state_machine.current_state_id
2683 == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING))
2684 return true;
2685 }
2686
2687 return false;
2688}
2689
2690/**
2691 * This method is called by the remote device to inform the controller
2692 * object that the remote device has stopped.
2693 */
2694void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic,
2695 struct scic_sds_remote_device *sci_dev)
2696{
2697 if (scic->state_machine.current_state_id !=
2698 SCI_BASE_CONTROLLER_STATE_STOPPING) {
2699 dev_dbg(scic_to_dev(scic),
2700 "SCIC Controller 0x%p remote device stopped event "
2701 "from device 0x%p in unexpected state %d\n",
2702 scic, sci_dev,
2703 scic->state_machine.current_state_id);
2704 return;
2705 }
2706
2707 if (!scic_sds_controller_has_remote_devices_stopping(scic)) {
2708 sci_base_state_machine_change_state(&scic->state_machine,
2709 SCI_BASE_CONTROLLER_STATE_STOPPED);
2710 }
2711}
2712
2713/**
2714 * This method will write to the SCU PCP register the request value. The method
2715 * is used to suspend/resume ports, devices, and phys.
2716 * @scic:
2717 *
2718 *
2719 */
2720void scic_sds_controller_post_request(
2721 struct scic_sds_controller *scic,
2722 u32 request)
2723{
2724 dev_dbg(scic_to_dev(scic),
2725 "%s: SCIC Controller 0x%p post request 0x%08x\n",
2726 __func__,
2727 scic,
2728 request);
2729
2730 writel(request, &scic->smu_registers->post_context_port);
2731}
2732
2733/**
2734 * This method will copy the soft copy of the task context into the physical
2735 * memory accessible by the controller.
2736 * @scic: This parameter specifies the controller for which to copy
2737 * the task context.
2738 * @sci_req: This parameter specifies the request for which the task
2739 * context is being copied.
2740 *
2741 * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
2742 * the physical memory version of the task context. Thus, all subsequent
2743 * updates to the task context are performed in the TC table (i.e. DMAable
2744 * memory). none
2745 */
2746void scic_sds_controller_copy_task_context(
2747 struct scic_sds_controller *scic,
2748 struct scic_sds_request *sci_req)
2749{
2750 struct scu_task_context *task_context_buffer;
2751
2752 task_context_buffer = scic_sds_controller_get_task_context_buffer(
2753 scic, sci_req->io_tag);
2754
2755 memcpy(task_context_buffer,
2756 sci_req->task_context_buffer,
2757 offsetof(struct scu_task_context, sgl_snapshot_ac));
2758
2759 /*
2760 * Now that the soft copy of the TC has been copied into the TC
2761 * table accessible by the silicon. Thus, any further changes to
2762 * the TC (e.g. TC termination) occur in the appropriate location. */
2763 sci_req->task_context_buffer = task_context_buffer;
2764}
2765
2766/**
2767 * This method returns the task context buffer for the given io tag.
2768 * @scic:
2769 * @io_tag:
2770 *
2771 * struct scu_task_context*
2772 */
2773struct scu_task_context *scic_sds_controller_get_task_context_buffer(
2774 struct scic_sds_controller *scic,
2775 u16 io_tag
2776 ) {
2777 u16 task_index = scic_sds_io_tag_get_index(io_tag);
2778
2779 if (task_index < scic->task_context_entries) {
2780 return &scic->task_context_table[task_index];
2781 }
2782
2783 return NULL;
2784}
2785
2786struct scic_sds_request *scic_request_by_tag(struct scic_sds_controller *scic,
2787 u16 io_tag)
2788{
2789 u16 task_index;
2790 u16 task_sequence;
2791
2792 task_index = scic_sds_io_tag_get_index(io_tag);
2793
2794 if (task_index < scic->task_context_entries) {
2795 if (scic->io_request_table[task_index] != NULL) {
2796 task_sequence = scic_sds_io_tag_get_sequence(io_tag);
2797
2798 if (task_sequence == scic->io_request_sequence[task_index]) {
2799 return scic->io_request_table[task_index];
2800 }
2801 }
2802 }
2803
2804 return NULL;
2805}
2806
2807/**
2808 * This method allocates remote node index and the reserves the remote node
2809 * context space for use. This method can fail if there are no more remote
2810 * node index available.
2811 * @scic: This is the controller object which contains the set of
2812 * free remote node ids
2813 * @sci_dev: This is the device object which is requesting the a remote node
2814 * id
2815 * @node_id: This is the remote node id that is assinged to the device if one
2816 * is available
2817 *
2818 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2819 * node index available.
2820 */
2821enum sci_status scic_sds_controller_allocate_remote_node_context(
2822 struct scic_sds_controller *scic,
2823 struct scic_sds_remote_device *sci_dev,
2824 u16 *node_id)
2825{
2826 u16 node_index;
2827 u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
2828
2829 node_index = scic_sds_remote_node_table_allocate_remote_node(
2830 &scic->available_remote_nodes, remote_node_count
2831 );
2832
2833 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2834 scic->device_table[node_index] = sci_dev;
2835
2836 *node_id = node_index;
2837
2838 return SCI_SUCCESS;
2839 }
2840
2841 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2842}
2843
2844/**
2845 * This method frees the remote node index back to the available pool. Once
2846 * this is done the remote node context buffer is no longer valid and can
2847 * not be used.
2848 * @scic:
2849 * @sci_dev:
2850 * @node_id:
2851 *
2852 */
2853void scic_sds_controller_free_remote_node_context(
2854 struct scic_sds_controller *scic,
2855 struct scic_sds_remote_device *sci_dev,
2856 u16 node_id)
2857{
2858 u32 remote_node_count = scic_sds_remote_device_node_count(sci_dev);
2859
2860 if (scic->device_table[node_id] == sci_dev) {
2861 scic->device_table[node_id] = NULL;
2862
2863 scic_sds_remote_node_table_release_remote_node_index(
2864 &scic->available_remote_nodes, remote_node_count, node_id
2865 );
2866 }
2867}
2868
2869/**
2870 * This method returns the union scu_remote_node_context for the specified remote
2871 * node id.
2872 * @scic:
2873 * @node_id:
2874 *
2875 * union scu_remote_node_context*
2876 */
2877union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
2878 struct scic_sds_controller *scic,
2879 u16 node_id
2880 ) {
2881 if (
2882 (node_id < scic->remote_node_entries)
2883 && (scic->device_table[node_id] != NULL)
2884 ) {
2885 return &scic->remote_node_context_table[node_id];
2886 }
2887
2888 return NULL;
2889}
2890
2891/**
2892 *
2893 * @resposne_buffer: This is the buffer into which the D2H register FIS will be
2894 * constructed.
2895 * @frame_header: This is the frame header returned by the hardware.
2896 * @frame_buffer: This is the frame buffer returned by the hardware.
2897 *
2898 * This method will combind the frame header and frame buffer to create a SATA
2899 * D2H register FIS none
2900 */
2901void scic_sds_controller_copy_sata_response(
2902 void *response_buffer,
2903 void *frame_header,
2904 void *frame_buffer)
2905{
2906 memcpy(response_buffer, frame_header, sizeof(u32));
2907
2908 memcpy(response_buffer + sizeof(u32),
2909 frame_buffer,
2910 sizeof(struct dev_to_host_fis) - sizeof(u32));
2911}
2912
2913/**
2914 * This method releases the frame once this is done the frame is available for
2915 * re-use by the hardware. The data contained in the frame header and frame
2916 * buffer is no longer valid. The UF queue get pointer is only updated if UF
2917 * control indicates this is appropriate.
2918 * @scic:
2919 * @frame_index:
2920 *
2921 */
2922void scic_sds_controller_release_frame(
2923 struct scic_sds_controller *scic,
2924 u32 frame_index)
2925{
2926 if (scic_sds_unsolicited_frame_control_release_frame(
2927 &scic->uf_control, frame_index) == true)
2928 writel(scic->uf_control.get,
2929 &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
2930}
2931
2932/**
2933 * scic_controller_start_io() - This method is called by the SCI user to
2934 * send/start an IO request. If the method invocation is successful, then
2935 * the IO request has been queued to the hardware for processing.
2936 * @controller: the handle to the controller object for which to start an IO
2937 * request.
2938 * @remote_device: the handle to the remote device object for which to start an
2939 * IO request.
2940 * @io_request: the handle to the io request object to start.
2941 * @io_tag: This parameter specifies a previously allocated IO tag that the
2942 * user desires to be utilized for this request. This parameter is optional.
2943 * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value
2944 * for this parameter.
2945 *
2946 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
2947 * to ensure that each of the methods that may allocate or free available IO
2948 * tags are handled in a mutually exclusive manner. This method is one of said
2949 * methods requiring proper critical code section protection (e.g. semaphore,
2950 * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a
2951 * result, it is expected the user will have set the NCQ tag field in the host
2952 * to device register FIS prior to calling this method. There is also a
2953 * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking
2954 * the scic_controller_start_io() method. scic_controller_allocate_tag() for
2955 * more information on allocating a tag. Indicate if the controller
2956 * successfully started the IO request. SCI_SUCCESS if the IO request was
2957 * successfully started. Determine the failure situations and return values.
2958 */
2959enum sci_status scic_controller_start_io(
2960 struct scic_sds_controller *scic,
2961 struct scic_sds_remote_device *rdev,
2962 struct scic_sds_request *req,
2963 u16 io_tag)
2964{
2965 enum sci_status status;
2966
2967 if (scic->state_machine.current_state_id !=
2968 SCI_BASE_CONTROLLER_STATE_READY) {
2969 dev_warn(scic_to_dev(scic), "invalid state to start I/O");
2970 return SCI_FAILURE_INVALID_STATE;
2971 }
2972
2973 status = scic_sds_remote_device_start_io(scic, rdev, req);
2974 if (status != SCI_SUCCESS)
2975 return status;
2976
2977 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
2978 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(req));
2979 return SCI_SUCCESS;
2980}
2981
2982/**
2983 * scic_controller_terminate_request() - This method is called by the SCI Core
2984 * user to terminate an ongoing (i.e. started) core IO request. This does
2985 * not abort the IO request at the target, but rather removes the IO request
2986 * from the host controller.
2987 * @controller: the handle to the controller object for which to terminate a
2988 * request.
2989 * @remote_device: the handle to the remote device object for which to
2990 * terminate a request.
2991 * @request: the handle to the io or task management request object to
2992 * terminate.
2993 *
2994 * Indicate if the controller successfully began the terminate process for the
2995 * IO request. SCI_SUCCESS if the terminate process was successfully started
2996 * for the request. Determine the failure situations and return values.
2997 */
2998enum sci_status scic_controller_terminate_request(
2999 struct scic_sds_controller *scic,
3000 struct scic_sds_remote_device *rdev,
3001 struct scic_sds_request *req)
3002{
3003 enum sci_status status;
3004
3005 if (scic->state_machine.current_state_id !=
3006 SCI_BASE_CONTROLLER_STATE_READY) {
3007 dev_warn(scic_to_dev(scic),
3008 "invalid state to terminate request\n");
3009 return SCI_FAILURE_INVALID_STATE;
3010 }
3011
3012 status = scic_sds_io_request_terminate(req);
3013 if (status != SCI_SUCCESS)
3014 return status;
3015
3016 /*
3017 * Utilize the original post context command and or in the POST_TC_ABORT
3018 * request sub-type.
3019 */
3020 scic_sds_controller_post_request(scic,
3021 scic_sds_request_get_post_context(req) |
3022 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
3023 return SCI_SUCCESS;
3024}
3025
3026/**
3027 * scic_controller_complete_io() - This method will perform core specific
3028 * completion operations for an IO request. After this method is invoked,
3029 * the user should consider the IO request as invalid until it is properly
3030 * reused (i.e. re-constructed).
3031 * @controller: The handle to the controller object for which to complete the
3032 * IO request.
3033 * @remote_device: The handle to the remote device object for which to complete
3034 * the IO request.
3035 * @io_request: the handle to the io request object to complete.
3036 *
3037 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3038 * to ensure that each of the methods that may allocate or free available IO
3039 * tags are handled in a mutually exclusive manner. This method is one of said
3040 * methods requiring proper critical code section protection (e.g. semaphore,
3041 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
3042 * Core user, using the scic_controller_allocate_io_tag() method, then it is
3043 * the responsibility of the caller to invoke the scic_controller_free_io_tag()
3044 * method to free the tag (i.e. this method will not free the IO tag). Indicate
3045 * if the controller successfully completed the IO request. SCI_SUCCESS if the
3046 * completion process was successful.
3047 */
3048enum sci_status scic_controller_complete_io(
3049 struct scic_sds_controller *scic,
3050 struct scic_sds_remote_device *rdev,
3051 struct scic_sds_request *request)
3052{
3053 enum sci_status status;
3054 u16 index;
3055
3056 switch (scic->state_machine.current_state_id) {
3057 case SCI_BASE_CONTROLLER_STATE_STOPPING:
3058 /* XXX: Implement this function */
3059 return SCI_FAILURE;
3060 case SCI_BASE_CONTROLLER_STATE_READY:
3061 status = scic_sds_remote_device_complete_io(scic, rdev, request);
3062 if (status != SCI_SUCCESS)
3063 return status;
3064
3065 index = scic_sds_io_tag_get_index(request->io_tag);
3066 scic->io_request_table[index] = NULL;
3067 return SCI_SUCCESS;
3068 default:
3069 dev_warn(scic_to_dev(scic), "invalid state to complete I/O");
3070 return SCI_FAILURE_INVALID_STATE;
3071 }
3072
3073}
3074
3075enum sci_status scic_controller_continue_io(struct scic_sds_request *sci_req)
3076{
3077 struct scic_sds_controller *scic = sci_req->owning_controller;
3078
3079 if (scic->state_machine.current_state_id !=
3080 SCI_BASE_CONTROLLER_STATE_READY) {
3081 dev_warn(scic_to_dev(scic), "invalid state to continue I/O");
3082 return SCI_FAILURE_INVALID_STATE;
3083 }
3084
3085 scic->io_request_table[scic_sds_io_tag_get_index(sci_req->io_tag)] = sci_req;
3086 scic_sds_controller_post_request(scic, scic_sds_request_get_post_context(sci_req));
3087 return SCI_SUCCESS;
3088}
3089
3090/**
3091 * scic_controller_start_task() - This method is called by the SCIC user to
3092 * send/start a framework task management request.
3093 * @controller: the handle to the controller object for which to start the task
3094 * management request.
3095 * @remote_device: the handle to the remote device object for which to start
3096 * the task management request.
3097 * @task_request: the handle to the task request object to start.
3098 * @io_tag: This parameter specifies a previously allocated IO tag that the
3099 * user desires to be utilized for this request. Note this not the io_tag
3100 * of the request being managed. It is to be utilized for the task request
3101 * itself. This parameter is optional. The user is allowed to supply
3102 * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter.
3103 *
3104 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3105 * to ensure that each of the methods that may allocate or free available IO
3106 * tags are handled in a mutually exclusive manner. This method is one of said
3107 * methods requiring proper critical code section protection (e.g. semaphore,
3108 * spin-lock, etc.). - The user must synchronize this task with completion
3109 * queue processing. If they are not synchronized then it is possible for the
3110 * io requests that are being managed by the task request can complete before
3111 * starting the task request. scic_controller_allocate_tag() for more
3112 * information on allocating a tag. Indicate if the controller successfully
3113 * started the IO request. SCI_TASK_SUCCESS if the task request was
3114 * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is
3115 * returned if there is/are task(s) outstanding that require termination or
3116 * completion before this request can succeed.
3117 */
3118enum sci_task_status scic_controller_start_task(
3119 struct scic_sds_controller *scic,
3120 struct scic_sds_remote_device *rdev,
3121 struct scic_sds_request *req,
3122 u16 task_tag)
3123{
3124 enum sci_status status;
3125
3126 if (scic->state_machine.current_state_id !=
3127 SCI_BASE_CONTROLLER_STATE_READY) {
3128 dev_warn(scic_to_dev(scic),
3129 "%s: SCIC Controller starting task from invalid "
3130 "state\n",
3131 __func__);
3132 return SCI_TASK_FAILURE_INVALID_STATE;
3133 }
3134
3135 status = scic_sds_remote_device_start_task(scic, rdev, req);
3136 switch (status) {
3137 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
3138 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
3139
3140 /*
3141 * We will let framework know this task request started successfully,
3142 * although core is still woring on starting the request (to post tc when
3143 * RNC is resumed.)
3144 */
3145 return SCI_SUCCESS;
3146 case SCI_SUCCESS:
3147 scic->io_request_table[scic_sds_io_tag_get_index(req->io_tag)] = req;
3148
3149 scic_sds_controller_post_request(scic,
3150 scic_sds_request_get_post_context(req));
3151 break;
3152 default:
3153 break;
3154 }
3155
3156 return status;
3157}
3158
3159/**
3160 * scic_controller_allocate_io_tag() - This method will allocate a tag from the
3161 * pool of free IO tags. Direct allocation of IO tags by the SCI Core user
3162 * is optional. The scic_controller_start_io() method will allocate an IO
3163 * tag if this method is not utilized and the tag is not supplied to the IO
3164 * construct routine. Direct allocation of IO tags may provide additional
3165 * performance improvements in environments capable of supporting this usage
3166 * model. Additionally, direct allocation of IO tags also provides
3167 * additional flexibility to the SCI Core user. Specifically, the user may
3168 * retain IO tags across the lives of multiple IO requests.
3169 * @controller: the handle to the controller object for which to allocate the
3170 * tag.
3171 *
3172 * IO tags are a protected resource. It is incumbent upon the SCI Core user to
3173 * ensure that each of the methods that may allocate or free available IO tags
3174 * are handled in a mutually exclusive manner. This method is one of said
3175 * methods requiring proper critical code section protection (e.g. semaphore,
3176 * spin-lock, etc.). An unsigned integer representing an available IO tag.
3177 * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no
3178 * currently available tags to be allocated. All return other values indicate a
3179 * legitimate tag.
3180 */
3181u16 scic_controller_allocate_io_tag(
3182 struct scic_sds_controller *scic)
3183{
3184 u16 task_context;
3185 u16 sequence_count;
3186
3187 if (!sci_pool_empty(scic->tci_pool)) {
3188 sci_pool_get(scic->tci_pool, task_context);
3189
3190 sequence_count = scic->io_request_sequence[task_context];
3191
3192 return scic_sds_io_tag_construct(sequence_count, task_context);
3193 }
3194
3195 return SCI_CONTROLLER_INVALID_IO_TAG;
3196}
3197
3198/**
3199 * scic_controller_free_io_tag() - This method will free an IO tag to the pool
3200 * of free IO tags. This method provides the SCI Core user more flexibility
3201 * with regards to IO tags. The user may desire to keep an IO tag after an
3202 * IO request has completed, because they plan on re-using the tag for a
3203 * subsequent IO request. This method is only legal if the tag was
3204 * allocated via scic_controller_allocate_io_tag().
3205 * @controller: This parameter specifies the handle to the controller object
3206 * for which to free/return the tag.
3207 * @io_tag: This parameter represents the tag to be freed to the pool of
3208 * available tags.
3209 *
3210 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
3211 * to ensure that each of the methods that may allocate or free available IO
3212 * tags are handled in a mutually exclusive manner. This method is one of said
3213 * methods requiring proper critical code section protection (e.g. semaphore,
3214 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
3215 * Core user, using the scic_controller_allocate_io_tag() method, then it is
3216 * the responsibility of the caller to invoke this method to free the tag. This
3217 * method returns an indication of whether the tag was successfully put back
3218 * (freed) to the pool of available tags. SCI_SUCCESS This return value
3219 * indicates the tag was successfully placed into the pool of available IO
3220 * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag
3221 * is not a valid IO tag value.
3222 */
3223enum sci_status scic_controller_free_io_tag(
3224 struct scic_sds_controller *scic,
3225 u16 io_tag)
3226{
3227 u16 sequence;
3228 u16 index;
3229
3230 BUG_ON(io_tag == SCI_CONTROLLER_INVALID_IO_TAG);
3231
3232 sequence = scic_sds_io_tag_get_sequence(io_tag);
3233 index = scic_sds_io_tag_get_index(io_tag);
3234
3235 if (!sci_pool_full(scic->tci_pool)) {
3236 if (sequence == scic->io_request_sequence[index]) {
3237 scic_sds_io_sequence_increment(
3238 scic->io_request_sequence[index]);
3239
3240 sci_pool_put(scic->tci_pool, index);
3241
3242 return SCI_SUCCESS;
3243 }
3244 }
3245
3246 return SCI_FAILURE_INVALID_IO_TAG;
3247}
3248
3249