Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Adaptec AAC series RAID controller driver |
| 3 | * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com> |
| 4 | * |
| 5 | * based on the old aacraid driver that is.. |
| 6 | * Adaptec aacraid device driver for Linux. |
| 7 | * |
| 8 | * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com) |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of the GNU General Public License as published by |
| 12 | * the Free Software Foundation; either version 2, or (at your option) |
| 13 | * any later version. |
| 14 | * |
| 15 | * This program is distributed in the hope that it will be useful, |
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18 | * GNU General Public License for more details. |
| 19 | * |
| 20 | * You should have received a copy of the GNU General Public License |
| 21 | * along with this program; see the file COPYING. If not, write to |
| 22 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
| 23 | * |
| 24 | */ |
| 25 | |
| 26 | #include <linux/kernel.h> |
| 27 | #include <linux/init.h> |
| 28 | #include <linux/types.h> |
| 29 | #include <linux/sched.h> |
| 30 | #include <linux/pci.h> |
| 31 | #include <linux/spinlock.h> |
| 32 | #include <linux/slab.h> |
| 33 | #include <linux/completion.h> |
| 34 | #include <linux/blkdev.h> |
| 35 | #include <asm/semaphore.h> |
| 36 | #include <asm/uaccess.h> |
| 37 | |
| 38 | #include <scsi/scsi.h> |
| 39 | #include <scsi/scsi_cmnd.h> |
| 40 | #include <scsi/scsi_device.h> |
| 41 | #include <scsi/scsi_host.h> |
| 42 | |
| 43 | #include "aacraid.h" |
| 44 | |
| 45 | /* values for inqd_pdt: Peripheral device type in plain English */ |
| 46 | #define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */ |
| 47 | #define INQD_PDT_PROC 0x03 /* Processor device */ |
| 48 | #define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */ |
| 49 | #define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */ |
| 50 | #define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */ |
| 51 | #define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */ |
| 52 | |
| 53 | #define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */ |
| 54 | #define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */ |
| 55 | |
| 56 | #define MAX_FIB_DATA (sizeof(struct hw_fib) - sizeof(FIB_HEADER)) |
| 57 | |
| 58 | #define MAX_DRIVER_SG_SEGMENT_COUNT 17 |
| 59 | |
| 60 | /* |
| 61 | * Sense codes |
| 62 | */ |
| 63 | |
| 64 | #define SENCODE_NO_SENSE 0x00 |
| 65 | #define SENCODE_END_OF_DATA 0x00 |
| 66 | #define SENCODE_BECOMING_READY 0x04 |
| 67 | #define SENCODE_INIT_CMD_REQUIRED 0x04 |
| 68 | #define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A |
| 69 | #define SENCODE_INVALID_COMMAND 0x20 |
| 70 | #define SENCODE_LBA_OUT_OF_RANGE 0x21 |
| 71 | #define SENCODE_INVALID_CDB_FIELD 0x24 |
| 72 | #define SENCODE_LUN_NOT_SUPPORTED 0x25 |
| 73 | #define SENCODE_INVALID_PARAM_FIELD 0x26 |
| 74 | #define SENCODE_PARAM_NOT_SUPPORTED 0x26 |
| 75 | #define SENCODE_PARAM_VALUE_INVALID 0x26 |
| 76 | #define SENCODE_RESET_OCCURRED 0x29 |
| 77 | #define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E |
| 78 | #define SENCODE_INQUIRY_DATA_CHANGED 0x3F |
| 79 | #define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39 |
| 80 | #define SENCODE_DIAGNOSTIC_FAILURE 0x40 |
| 81 | #define SENCODE_INTERNAL_TARGET_FAILURE 0x44 |
| 82 | #define SENCODE_INVALID_MESSAGE_ERROR 0x49 |
| 83 | #define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c |
| 84 | #define SENCODE_OVERLAPPED_COMMAND 0x4E |
| 85 | |
| 86 | /* |
| 87 | * Additional sense codes |
| 88 | */ |
| 89 | |
| 90 | #define ASENCODE_NO_SENSE 0x00 |
| 91 | #define ASENCODE_END_OF_DATA 0x05 |
| 92 | #define ASENCODE_BECOMING_READY 0x01 |
| 93 | #define ASENCODE_INIT_CMD_REQUIRED 0x02 |
| 94 | #define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00 |
| 95 | #define ASENCODE_INVALID_COMMAND 0x00 |
| 96 | #define ASENCODE_LBA_OUT_OF_RANGE 0x00 |
| 97 | #define ASENCODE_INVALID_CDB_FIELD 0x00 |
| 98 | #define ASENCODE_LUN_NOT_SUPPORTED 0x00 |
| 99 | #define ASENCODE_INVALID_PARAM_FIELD 0x00 |
| 100 | #define ASENCODE_PARAM_NOT_SUPPORTED 0x01 |
| 101 | #define ASENCODE_PARAM_VALUE_INVALID 0x02 |
| 102 | #define ASENCODE_RESET_OCCURRED 0x00 |
| 103 | #define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00 |
| 104 | #define ASENCODE_INQUIRY_DATA_CHANGED 0x03 |
| 105 | #define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00 |
| 106 | #define ASENCODE_DIAGNOSTIC_FAILURE 0x80 |
| 107 | #define ASENCODE_INTERNAL_TARGET_FAILURE 0x00 |
| 108 | #define ASENCODE_INVALID_MESSAGE_ERROR 0x00 |
| 109 | #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00 |
| 110 | #define ASENCODE_OVERLAPPED_COMMAND 0x00 |
| 111 | |
| 112 | #define BYTE0(x) (unsigned char)(x) |
| 113 | #define BYTE1(x) (unsigned char)((x) >> 8) |
| 114 | #define BYTE2(x) (unsigned char)((x) >> 16) |
| 115 | #define BYTE3(x) (unsigned char)((x) >> 24) |
| 116 | |
| 117 | /*------------------------------------------------------------------------------ |
| 118 | * S T R U C T S / T Y P E D E F S |
| 119 | *----------------------------------------------------------------------------*/ |
| 120 | /* SCSI inquiry data */ |
| 121 | struct inquiry_data { |
| 122 | u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */ |
| 123 | u8 inqd_dtq; /* RMB | Device Type Qualifier */ |
| 124 | u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */ |
| 125 | u8 inqd_rdf; /* AENC | TrmIOP | Response data format */ |
| 126 | u8 inqd_len; /* Additional length (n-4) */ |
| 127 | u8 inqd_pad1[2];/* Reserved - must be zero */ |
| 128 | u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ |
| 129 | u8 inqd_vid[8]; /* Vendor ID */ |
| 130 | u8 inqd_pid[16];/* Product ID */ |
| 131 | u8 inqd_prl[4]; /* Product Revision Level */ |
| 132 | }; |
| 133 | |
| 134 | /* |
| 135 | * M O D U L E G L O B A L S |
| 136 | */ |
| 137 | |
| 138 | static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap); |
| 139 | static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg); |
| 140 | static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); |
| 141 | #ifdef AAC_DETAILED_STATUS_INFO |
| 142 | static char *aac_get_status_string(u32 status); |
| 143 | #endif |
| 144 | |
| 145 | /* |
| 146 | * Non dasd selection is handled entirely in aachba now |
| 147 | */ |
| 148 | |
| 149 | static int nondasd = -1; |
| 150 | static int dacmode = -1; |
| 151 | |
| 152 | static int commit = -1; |
| 153 | |
| 154 | module_param(nondasd, int, 0); |
| 155 | MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on"); |
| 156 | module_param(dacmode, int, 0); |
| 157 | MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on"); |
| 158 | module_param(commit, int, 0); |
| 159 | MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on"); |
| 160 | |
| 161 | /** |
| 162 | * aac_get_config_status - check the adapter configuration |
| 163 | * @common: adapter to query |
| 164 | * |
| 165 | * Query config status, and commit the configuration if needed. |
| 166 | */ |
| 167 | int aac_get_config_status(struct aac_dev *dev) |
| 168 | { |
| 169 | int status = 0; |
| 170 | struct fib * fibptr; |
| 171 | |
| 172 | if (!(fibptr = fib_alloc(dev))) |
| 173 | return -ENOMEM; |
| 174 | |
| 175 | fib_init(fibptr); |
| 176 | { |
| 177 | struct aac_get_config_status *dinfo; |
| 178 | dinfo = (struct aac_get_config_status *) fib_data(fibptr); |
| 179 | |
| 180 | dinfo->command = cpu_to_le32(VM_ContainerConfig); |
| 181 | dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS); |
| 182 | dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); |
| 183 | } |
| 184 | |
| 185 | status = fib_send(ContainerCommand, |
| 186 | fibptr, |
| 187 | sizeof (struct aac_get_config_status), |
| 188 | FsaNormal, |
| 189 | 1, 1, |
| 190 | NULL, NULL); |
| 191 | if (status < 0 ) { |
| 192 | printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n"); |
| 193 | } else { |
| 194 | struct aac_get_config_status_resp *reply |
| 195 | = (struct aac_get_config_status_resp *) fib_data(fibptr); |
| 196 | dprintk((KERN_WARNING |
| 197 | "aac_get_config_status: response=%d status=%d action=%d\n", |
| 198 | le32_to_cpu(reply->response), |
| 199 | le32_to_cpu(reply->status), |
| 200 | le32_to_cpu(reply->data.action))); |
| 201 | if ((le32_to_cpu(reply->response) != ST_OK) || |
| 202 | (le32_to_cpu(reply->status) != CT_OK) || |
| 203 | (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) { |
| 204 | printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n"); |
| 205 | status = -EINVAL; |
| 206 | } |
| 207 | } |
| 208 | fib_complete(fibptr); |
| 209 | /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ |
| 210 | if (status >= 0) { |
| 211 | if (commit == 1) { |
| 212 | struct aac_commit_config * dinfo; |
| 213 | fib_init(fibptr); |
| 214 | dinfo = (struct aac_commit_config *) fib_data(fibptr); |
| 215 | |
| 216 | dinfo->command = cpu_to_le32(VM_ContainerConfig); |
| 217 | dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); |
| 218 | |
| 219 | status = fib_send(ContainerCommand, |
| 220 | fibptr, |
| 221 | sizeof (struct aac_commit_config), |
| 222 | FsaNormal, |
| 223 | 1, 1, |
| 224 | NULL, NULL); |
| 225 | fib_complete(fibptr); |
| 226 | } else if (commit == 0) { |
| 227 | printk(KERN_WARNING |
| 228 | "aac_get_config_status: Foreign device configurations are being ignored\n"); |
| 229 | } |
| 230 | } |
| 231 | fib_free(fibptr); |
| 232 | return status; |
| 233 | } |
| 234 | |
| 235 | /** |
| 236 | * aac_get_containers - list containers |
| 237 | * @common: adapter to probe |
| 238 | * |
| 239 | * Make a list of all containers on this controller |
| 240 | */ |
| 241 | int aac_get_containers(struct aac_dev *dev) |
| 242 | { |
| 243 | struct fsa_dev_info *fsa_dev_ptr; |
| 244 | u32 index; |
| 245 | int status = 0; |
| 246 | struct fib * fibptr; |
| 247 | unsigned instance; |
| 248 | struct aac_get_container_count *dinfo; |
| 249 | struct aac_get_container_count_resp *dresp; |
| 250 | int maximum_num_containers = MAXIMUM_NUM_CONTAINERS; |
| 251 | |
| 252 | instance = dev->scsi_host_ptr->unique_id; |
| 253 | |
| 254 | if (!(fibptr = fib_alloc(dev))) |
| 255 | return -ENOMEM; |
| 256 | |
| 257 | fib_init(fibptr); |
| 258 | dinfo = (struct aac_get_container_count *) fib_data(fibptr); |
| 259 | dinfo->command = cpu_to_le32(VM_ContainerConfig); |
| 260 | dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); |
| 261 | |
| 262 | status = fib_send(ContainerCommand, |
| 263 | fibptr, |
| 264 | sizeof (struct aac_get_container_count), |
| 265 | FsaNormal, |
| 266 | 1, 1, |
| 267 | NULL, NULL); |
| 268 | if (status >= 0) { |
| 269 | dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); |
| 270 | maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); |
| 271 | fib_complete(fibptr); |
| 272 | } |
| 273 | |
| 274 | if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) |
| 275 | maximum_num_containers = MAXIMUM_NUM_CONTAINERS; |
| 276 | |
| 277 | fsa_dev_ptr = (struct fsa_dev_info *) kmalloc( |
| 278 | sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL); |
| 279 | if (!fsa_dev_ptr) { |
| 280 | fib_free(fibptr); |
| 281 | return -ENOMEM; |
| 282 | } |
| 283 | memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers); |
| 284 | |
| 285 | dev->fsa_dev = fsa_dev_ptr; |
| 286 | dev->maximum_num_containers = maximum_num_containers; |
| 287 | |
| 288 | for (index = 0; index < dev->maximum_num_containers; index++) { |
| 289 | struct aac_query_mount *dinfo; |
| 290 | struct aac_mount *dresp; |
| 291 | |
| 292 | fsa_dev_ptr[index].devname[0] = '\0'; |
| 293 | |
| 294 | fib_init(fibptr); |
| 295 | dinfo = (struct aac_query_mount *) fib_data(fibptr); |
| 296 | |
| 297 | dinfo->command = cpu_to_le32(VM_NameServe); |
| 298 | dinfo->count = cpu_to_le32(index); |
| 299 | dinfo->type = cpu_to_le32(FT_FILESYS); |
| 300 | |
| 301 | status = fib_send(ContainerCommand, |
| 302 | fibptr, |
| 303 | sizeof (struct aac_query_mount), |
| 304 | FsaNormal, |
| 305 | 1, 1, |
| 306 | NULL, NULL); |
| 307 | if (status < 0 ) { |
| 308 | printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n"); |
| 309 | break; |
| 310 | } |
| 311 | dresp = (struct aac_mount *)fib_data(fibptr); |
| 312 | |
| 313 | dprintk ((KERN_DEBUG |
| 314 | "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n", |
| 315 | (int)index, (int)le32_to_cpu(dresp->status), |
| 316 | (int)le32_to_cpu(dresp->mnt[0].vol), |
| 317 | (int)le32_to_cpu(dresp->mnt[0].state), |
| 318 | (unsigned)le32_to_cpu(dresp->mnt[0].capacity))); |
| 319 | if ((le32_to_cpu(dresp->status) == ST_OK) && |
| 320 | (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && |
| 321 | (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { |
| 322 | fsa_dev_ptr[index].valid = 1; |
| 323 | fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol); |
| 324 | fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity); |
| 325 | if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) |
| 326 | fsa_dev_ptr[index].ro = 1; |
| 327 | } |
| 328 | fib_complete(fibptr); |
| 329 | /* |
| 330 | * If there are no more containers, then stop asking. |
| 331 | */ |
| 332 | if ((index + 1) >= le32_to_cpu(dresp->count)){ |
| 333 | break; |
| 334 | } |
| 335 | } |
| 336 | fib_free(fibptr); |
| 337 | return status; |
| 338 | } |
| 339 | |
| 340 | static void aac_io_done(struct scsi_cmnd * scsicmd) |
| 341 | { |
| 342 | unsigned long cpu_flags; |
| 343 | struct Scsi_Host *host = scsicmd->device->host; |
| 344 | spin_lock_irqsave(host->host_lock, cpu_flags); |
| 345 | scsicmd->scsi_done(scsicmd); |
| 346 | spin_unlock_irqrestore(host->host_lock, cpu_flags); |
| 347 | } |
| 348 | |
| 349 | static void get_container_name_callback(void *context, struct fib * fibptr) |
| 350 | { |
| 351 | struct aac_get_name_resp * get_name_reply; |
| 352 | struct scsi_cmnd * scsicmd; |
| 353 | |
| 354 | scsicmd = (struct scsi_cmnd *) context; |
| 355 | |
| 356 | dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies)); |
| 357 | if (fibptr == NULL) |
| 358 | BUG(); |
| 359 | |
| 360 | get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr); |
| 361 | /* Failure is irrelevant, using default value instead */ |
| 362 | if ((le32_to_cpu(get_name_reply->status) == CT_OK) |
| 363 | && (get_name_reply->data[0] != '\0')) { |
| 364 | int count; |
| 365 | char * dp; |
| 366 | char * sp = get_name_reply->data; |
| 367 | sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0'; |
| 368 | while (*sp == ' ') |
| 369 | ++sp; |
| 370 | count = sizeof(((struct inquiry_data *)NULL)->inqd_pid); |
| 371 | dp = ((struct inquiry_data *)scsicmd->request_buffer)->inqd_pid; |
| 372 | if (*sp) do { |
| 373 | *dp++ = (*sp) ? *sp++ : ' '; |
| 374 | } while (--count > 0); |
| 375 | } |
| 376 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
| 377 | |
| 378 | fib_complete(fibptr); |
| 379 | fib_free(fibptr); |
| 380 | aac_io_done(scsicmd); |
| 381 | } |
| 382 | |
| 383 | /** |
| 384 | * aac_get_container_name - get container name, none blocking. |
| 385 | */ |
| 386 | static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid) |
| 387 | { |
| 388 | int status; |
| 389 | struct aac_get_name *dinfo; |
| 390 | struct fib * cmd_fibcontext; |
| 391 | struct aac_dev * dev; |
| 392 | |
| 393 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
| 394 | |
| 395 | if (!(cmd_fibcontext = fib_alloc(dev))) |
| 396 | return -ENOMEM; |
| 397 | |
| 398 | fib_init(cmd_fibcontext); |
| 399 | dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); |
| 400 | |
| 401 | dinfo->command = cpu_to_le32(VM_ContainerConfig); |
| 402 | dinfo->type = cpu_to_le32(CT_READ_NAME); |
| 403 | dinfo->cid = cpu_to_le32(cid); |
| 404 | dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); |
| 405 | |
| 406 | status = fib_send(ContainerCommand, |
| 407 | cmd_fibcontext, |
| 408 | sizeof (struct aac_get_name), |
| 409 | FsaNormal, |
| 410 | 0, 1, |
| 411 | (fib_callback) get_container_name_callback, |
| 412 | (void *) scsicmd); |
| 413 | |
| 414 | /* |
| 415 | * Check that the command queued to the controller |
| 416 | */ |
| 417 | if (status == -EINPROGRESS) |
| 418 | return 0; |
| 419 | |
| 420 | printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status); |
| 421 | fib_complete(cmd_fibcontext); |
| 422 | fib_free(cmd_fibcontext); |
| 423 | return -1; |
| 424 | } |
| 425 | |
| 426 | /** |
| 427 | * probe_container - query a logical volume |
| 428 | * @dev: device to query |
| 429 | * @cid: container identifier |
| 430 | * |
| 431 | * Queries the controller about the given volume. The volume information |
| 432 | * is updated in the struct fsa_dev_info structure rather than returned. |
| 433 | */ |
| 434 | |
| 435 | static int probe_container(struct aac_dev *dev, int cid) |
| 436 | { |
| 437 | struct fsa_dev_info *fsa_dev_ptr; |
| 438 | int status; |
| 439 | struct aac_query_mount *dinfo; |
| 440 | struct aac_mount *dresp; |
| 441 | struct fib * fibptr; |
| 442 | unsigned instance; |
| 443 | |
| 444 | fsa_dev_ptr = dev->fsa_dev; |
| 445 | instance = dev->scsi_host_ptr->unique_id; |
| 446 | |
| 447 | if (!(fibptr = fib_alloc(dev))) |
| 448 | return -ENOMEM; |
| 449 | |
| 450 | fib_init(fibptr); |
| 451 | |
| 452 | dinfo = (struct aac_query_mount *)fib_data(fibptr); |
| 453 | |
| 454 | dinfo->command = cpu_to_le32(VM_NameServe); |
| 455 | dinfo->count = cpu_to_le32(cid); |
| 456 | dinfo->type = cpu_to_le32(FT_FILESYS); |
| 457 | |
| 458 | status = fib_send(ContainerCommand, |
| 459 | fibptr, |
| 460 | sizeof(struct aac_query_mount), |
| 461 | FsaNormal, |
| 462 | 1, 1, |
| 463 | NULL, NULL); |
| 464 | if (status < 0) { |
| 465 | printk(KERN_WARNING "aacraid: probe_containers query failed.\n"); |
| 466 | goto error; |
| 467 | } |
| 468 | |
| 469 | dresp = (struct aac_mount *) fib_data(fibptr); |
| 470 | |
| 471 | if ((le32_to_cpu(dresp->status) == ST_OK) && |
| 472 | (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && |
| 473 | (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { |
| 474 | fsa_dev_ptr[cid].valid = 1; |
| 475 | fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol); |
| 476 | fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity); |
| 477 | if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) |
| 478 | fsa_dev_ptr[cid].ro = 1; |
| 479 | } |
| 480 | |
| 481 | error: |
| 482 | fib_complete(fibptr); |
| 483 | fib_free(fibptr); |
| 484 | |
| 485 | return status; |
| 486 | } |
| 487 | |
| 488 | /* Local Structure to set SCSI inquiry data strings */ |
| 489 | struct scsi_inq { |
| 490 | char vid[8]; /* Vendor ID */ |
| 491 | char pid[16]; /* Product ID */ |
| 492 | char prl[4]; /* Product Revision Level */ |
| 493 | }; |
| 494 | |
| 495 | /** |
| 496 | * InqStrCopy - string merge |
| 497 | * @a: string to copy from |
| 498 | * @b: string to copy to |
| 499 | * |
| 500 | * Copy a String from one location to another |
| 501 | * without copying \0 |
| 502 | */ |
| 503 | |
| 504 | static void inqstrcpy(char *a, char *b) |
| 505 | { |
| 506 | |
| 507 | while(*a != (char)0) |
| 508 | *b++ = *a++; |
| 509 | } |
| 510 | |
| 511 | static char *container_types[] = { |
| 512 | "None", |
| 513 | "Volume", |
| 514 | "Mirror", |
| 515 | "Stripe", |
| 516 | "RAID5", |
| 517 | "SSRW", |
| 518 | "SSRO", |
| 519 | "Morph", |
| 520 | "Legacy", |
| 521 | "RAID4", |
| 522 | "RAID10", |
| 523 | "RAID00", |
| 524 | "V-MIRRORS", |
| 525 | "PSEUDO R4", |
| 526 | "RAID50", |
| 527 | "Unknown" |
| 528 | }; |
| 529 | |
| 530 | |
| 531 | |
| 532 | /* Function: setinqstr |
| 533 | * |
| 534 | * Arguments: [1] pointer to void [1] int |
| 535 | * |
| 536 | * Purpose: Sets SCSI inquiry data strings for vendor, product |
| 537 | * and revision level. Allows strings to be set in platform dependant |
| 538 | * files instead of in OS dependant driver source. |
| 539 | */ |
| 540 | |
| 541 | static void setinqstr(int devtype, void *data, int tindex) |
| 542 | { |
| 543 | struct scsi_inq *str; |
| 544 | struct aac_driver_ident *mp; |
| 545 | |
| 546 | mp = aac_get_driver_ident(devtype); |
| 547 | |
| 548 | str = (struct scsi_inq *)(data); /* cast data to scsi inq block */ |
| 549 | |
| 550 | inqstrcpy (mp->vname, str->vid); |
| 551 | inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */ |
| 552 | |
| 553 | if (tindex < (sizeof(container_types)/sizeof(char *))){ |
| 554 | char *findit = str->pid; |
| 555 | |
| 556 | for ( ; *findit != ' '; findit++); /* walk till we find a space */ |
| 557 | /* RAID is superfluous in the context of a RAID device */ |
| 558 | if (memcmp(findit-4, "RAID", 4) == 0) |
| 559 | *(findit -= 4) = ' '; |
| 560 | inqstrcpy (container_types[tindex], findit + 1); |
| 561 | } |
| 562 | inqstrcpy ("V1.0", str->prl); |
| 563 | } |
| 564 | |
| 565 | void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code, |
| 566 | u8 a_sense_code, u8 incorrect_length, |
| 567 | u8 bit_pointer, u16 field_pointer, |
| 568 | u32 residue) |
| 569 | { |
| 570 | sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */ |
| 571 | sense_buf[1] = 0; /* Segment number, always zero */ |
| 572 | |
| 573 | if (incorrect_length) { |
| 574 | sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */ |
| 575 | sense_buf[3] = BYTE3(residue); |
| 576 | sense_buf[4] = BYTE2(residue); |
| 577 | sense_buf[5] = BYTE1(residue); |
| 578 | sense_buf[6] = BYTE0(residue); |
| 579 | } else |
| 580 | sense_buf[2] = sense_key; /* Sense key */ |
| 581 | |
| 582 | if (sense_key == ILLEGAL_REQUEST) |
| 583 | sense_buf[7] = 10; /* Additional sense length */ |
| 584 | else |
| 585 | sense_buf[7] = 6; /* Additional sense length */ |
| 586 | |
| 587 | sense_buf[12] = sense_code; /* Additional sense code */ |
| 588 | sense_buf[13] = a_sense_code; /* Additional sense code qualifier */ |
| 589 | if (sense_key == ILLEGAL_REQUEST) { |
| 590 | sense_buf[15] = 0; |
| 591 | |
| 592 | if (sense_code == SENCODE_INVALID_PARAM_FIELD) |
| 593 | sense_buf[15] = 0x80;/* Std sense key specific field */ |
| 594 | /* Illegal parameter is in the parameter block */ |
| 595 | |
| 596 | if (sense_code == SENCODE_INVALID_CDB_FIELD) |
| 597 | sense_buf[15] = 0xc0;/* Std sense key specific field */ |
| 598 | /* Illegal parameter is in the CDB block */ |
| 599 | sense_buf[15] |= bit_pointer; |
| 600 | sense_buf[16] = field_pointer >> 8; /* MSB */ |
| 601 | sense_buf[17] = field_pointer; /* LSB */ |
| 602 | } |
| 603 | } |
| 604 | |
| 605 | int aac_get_adapter_info(struct aac_dev* dev) |
| 606 | { |
| 607 | struct fib* fibptr; |
| 608 | struct aac_adapter_info* info; |
| 609 | int rcode; |
| 610 | u32 tmp; |
| 611 | if (!(fibptr = fib_alloc(dev))) |
| 612 | return -ENOMEM; |
| 613 | |
| 614 | fib_init(fibptr); |
| 615 | info = (struct aac_adapter_info*) fib_data(fibptr); |
| 616 | |
| 617 | memset(info,0,sizeof(struct aac_adapter_info)); |
| 618 | |
| 619 | rcode = fib_send(RequestAdapterInfo, |
| 620 | fibptr, |
| 621 | sizeof(struct aac_adapter_info), |
| 622 | FsaNormal, |
| 623 | 1, 1, |
| 624 | NULL, |
| 625 | NULL); |
| 626 | |
| 627 | memcpy(&dev->adapter_info, info, sizeof(struct aac_adapter_info)); |
| 628 | |
| 629 | tmp = le32_to_cpu(dev->adapter_info.kernelrev); |
| 630 | printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d]\n", |
| 631 | dev->name, |
| 632 | dev->id, |
| 633 | tmp>>24, |
| 634 | (tmp>>16)&0xff, |
| 635 | tmp&0xff, |
| 636 | le32_to_cpu(dev->adapter_info.kernelbuild)); |
| 637 | tmp = le32_to_cpu(dev->adapter_info.monitorrev); |
| 638 | printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", |
| 639 | dev->name, dev->id, |
| 640 | tmp>>24,(tmp>>16)&0xff,tmp&0xff, |
| 641 | le32_to_cpu(dev->adapter_info.monitorbuild)); |
| 642 | tmp = le32_to_cpu(dev->adapter_info.biosrev); |
| 643 | printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", |
| 644 | dev->name, dev->id, |
| 645 | tmp>>24,(tmp>>16)&0xff,tmp&0xff, |
| 646 | le32_to_cpu(dev->adapter_info.biosbuild)); |
| 647 | if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) |
| 648 | printk(KERN_INFO "%s%d: serial %x\n", |
| 649 | dev->name, dev->id, |
| 650 | le32_to_cpu(dev->adapter_info.serial[0])); |
| 651 | |
| 652 | dev->nondasd_support = 0; |
| 653 | dev->raid_scsi_mode = 0; |
| 654 | if(dev->adapter_info.options & AAC_OPT_NONDASD){ |
| 655 | dev->nondasd_support = 1; |
| 656 | } |
| 657 | |
| 658 | /* |
| 659 | * If the firmware supports ROMB RAID/SCSI mode and we are currently |
| 660 | * in RAID/SCSI mode, set the flag. For now if in this mode we will |
| 661 | * force nondasd support on. If we decide to allow the non-dasd flag |
| 662 | * additional changes changes will have to be made to support |
| 663 | * RAID/SCSI. the function aac_scsi_cmd in this module will have to be |
| 664 | * changed to support the new dev->raid_scsi_mode flag instead of |
| 665 | * leaching off of the dev->nondasd_support flag. Also in linit.c the |
| 666 | * function aac_detect will have to be modified where it sets up the |
| 667 | * max number of channels based on the aac->nondasd_support flag only. |
| 668 | */ |
| 669 | if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) && |
| 670 | (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) { |
| 671 | dev->nondasd_support = 1; |
| 672 | dev->raid_scsi_mode = 1; |
| 673 | } |
| 674 | if (dev->raid_scsi_mode != 0) |
| 675 | printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n", |
| 676 | dev->name, dev->id); |
| 677 | |
| 678 | if(nondasd != -1) { |
| 679 | dev->nondasd_support = (nondasd!=0); |
| 680 | } |
| 681 | if(dev->nondasd_support != 0){ |
| 682 | printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); |
| 683 | } |
| 684 | |
| 685 | dev->dac_support = 0; |
| 686 | if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ |
| 687 | printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id); |
| 688 | dev->dac_support = 1; |
| 689 | } |
| 690 | |
| 691 | if(dacmode != -1) { |
| 692 | dev->dac_support = (dacmode!=0); |
| 693 | } |
| 694 | if(dev->dac_support != 0) { |
| 695 | if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL) && |
| 696 | !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL)) { |
| 697 | printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", |
| 698 | dev->name, dev->id); |
| 699 | } else if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFULL) && |
| 700 | !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFULL)) { |
| 701 | printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n", |
| 702 | dev->name, dev->id); |
| 703 | dev->dac_support = 0; |
| 704 | } else { |
| 705 | printk(KERN_WARNING"%s%d: No suitable DMA available.\n", |
| 706 | dev->name, dev->id); |
| 707 | rcode = -ENOMEM; |
| 708 | } |
| 709 | } |
| 710 | |
| 711 | fib_complete(fibptr); |
| 712 | fib_free(fibptr); |
| 713 | |
| 714 | return rcode; |
| 715 | } |
| 716 | |
| 717 | |
| 718 | static void read_callback(void *context, struct fib * fibptr) |
| 719 | { |
| 720 | struct aac_dev *dev; |
| 721 | struct aac_read_reply *readreply; |
| 722 | struct scsi_cmnd *scsicmd; |
| 723 | u32 lba; |
| 724 | u32 cid; |
| 725 | |
| 726 | scsicmd = (struct scsi_cmnd *) context; |
| 727 | |
| 728 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
| 729 | cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); |
| 730 | |
| 731 | lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; |
| 732 | dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies)); |
| 733 | |
| 734 | if (fibptr == NULL) |
| 735 | BUG(); |
| 736 | |
| 737 | if(scsicmd->use_sg) |
| 738 | pci_unmap_sg(dev->pdev, |
| 739 | (struct scatterlist *)scsicmd->buffer, |
| 740 | scsicmd->use_sg, |
| 741 | scsicmd->sc_data_direction); |
| 742 | else if(scsicmd->request_bufflen) |
| 743 | pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, |
| 744 | scsicmd->request_bufflen, |
| 745 | scsicmd->sc_data_direction); |
| 746 | readreply = (struct aac_read_reply *)fib_data(fibptr); |
| 747 | if (le32_to_cpu(readreply->status) == ST_OK) |
| 748 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
| 749 | else { |
| 750 | printk(KERN_WARNING "read_callback: read failed, status = %d\n", |
| 751 | le32_to_cpu(readreply->status)); |
| 752 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; |
| 753 | set_sense((u8 *) &dev->fsa_dev[cid].sense_data, |
| 754 | HARDWARE_ERROR, |
| 755 | SENCODE_INTERNAL_TARGET_FAILURE, |
| 756 | ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, |
| 757 | 0, 0); |
| 758 | memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, |
| 759 | (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) |
| 760 | ? sizeof(scsicmd->sense_buffer) |
| 761 | : sizeof(dev->fsa_dev[cid].sense_data)); |
| 762 | } |
| 763 | fib_complete(fibptr); |
| 764 | fib_free(fibptr); |
| 765 | |
| 766 | aac_io_done(scsicmd); |
| 767 | } |
| 768 | |
| 769 | static void write_callback(void *context, struct fib * fibptr) |
| 770 | { |
| 771 | struct aac_dev *dev; |
| 772 | struct aac_write_reply *writereply; |
| 773 | struct scsi_cmnd *scsicmd; |
| 774 | u32 lba; |
| 775 | u32 cid; |
| 776 | |
| 777 | scsicmd = (struct scsi_cmnd *) context; |
| 778 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
| 779 | cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); |
| 780 | |
| 781 | lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; |
| 782 | dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies)); |
| 783 | if (fibptr == NULL) |
| 784 | BUG(); |
| 785 | |
| 786 | if(scsicmd->use_sg) |
| 787 | pci_unmap_sg(dev->pdev, |
| 788 | (struct scatterlist *)scsicmd->buffer, |
| 789 | scsicmd->use_sg, |
| 790 | scsicmd->sc_data_direction); |
| 791 | else if(scsicmd->request_bufflen) |
| 792 | pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, |
| 793 | scsicmd->request_bufflen, |
| 794 | scsicmd->sc_data_direction); |
| 795 | |
| 796 | writereply = (struct aac_write_reply *) fib_data(fibptr); |
| 797 | if (le32_to_cpu(writereply->status) == ST_OK) |
| 798 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
| 799 | else { |
| 800 | printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status); |
| 801 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; |
| 802 | set_sense((u8 *) &dev->fsa_dev[cid].sense_data, |
| 803 | HARDWARE_ERROR, |
| 804 | SENCODE_INTERNAL_TARGET_FAILURE, |
| 805 | ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, |
| 806 | 0, 0); |
| 807 | memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, |
| 808 | sizeof(struct sense_data)); |
| 809 | } |
| 810 | |
| 811 | fib_complete(fibptr); |
| 812 | fib_free(fibptr); |
| 813 | aac_io_done(scsicmd); |
| 814 | } |
| 815 | |
| 816 | int aac_read(struct scsi_cmnd * scsicmd, int cid) |
| 817 | { |
| 818 | u32 lba; |
| 819 | u32 count; |
| 820 | int status; |
| 821 | |
| 822 | u16 fibsize; |
| 823 | struct aac_dev *dev; |
| 824 | struct fib * cmd_fibcontext; |
| 825 | |
| 826 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
| 827 | /* |
| 828 | * Get block address and transfer length |
| 829 | */ |
| 830 | if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */ |
| 831 | { |
| 832 | dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid)); |
| 833 | |
| 834 | lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; |
| 835 | count = scsicmd->cmnd[4]; |
| 836 | |
| 837 | if (count == 0) |
| 838 | count = 256; |
| 839 | } else { |
| 840 | dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid)); |
| 841 | |
| 842 | lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; |
| 843 | count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; |
| 844 | } |
| 845 | dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies)); |
| 846 | /* |
| 847 | * Alocate and initialize a Fib |
| 848 | */ |
| 849 | if (!(cmd_fibcontext = fib_alloc(dev))) { |
| 850 | return -1; |
| 851 | } |
| 852 | |
| 853 | fib_init(cmd_fibcontext); |
| 854 | |
| 855 | if(dev->dac_support == 1) { |
| 856 | struct aac_read64 *readcmd; |
| 857 | readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext); |
| 858 | readcmd->command = cpu_to_le32(VM_CtHostRead64); |
| 859 | readcmd->cid = cpu_to_le16(cid); |
| 860 | readcmd->sector_count = cpu_to_le16(count); |
| 861 | readcmd->block = cpu_to_le32(lba); |
| 862 | readcmd->pad = 0; |
| 863 | readcmd->flags = 0; |
| 864 | |
| 865 | aac_build_sg64(scsicmd, &readcmd->sg); |
| 866 | fibsize = sizeof(struct aac_read64) + |
| 867 | ((le32_to_cpu(readcmd->sg.count) - 1) * |
| 868 | sizeof (struct sgentry64)); |
| 869 | BUG_ON (fibsize > (sizeof(struct hw_fib) - |
| 870 | sizeof(struct aac_fibhdr))); |
| 871 | /* |
| 872 | * Now send the Fib to the adapter |
| 873 | */ |
| 874 | status = fib_send(ContainerCommand64, |
| 875 | cmd_fibcontext, |
| 876 | fibsize, |
| 877 | FsaNormal, |
| 878 | 0, 1, |
| 879 | (fib_callback) read_callback, |
| 880 | (void *) scsicmd); |
| 881 | } else { |
| 882 | struct aac_read *readcmd; |
| 883 | readcmd = (struct aac_read *) fib_data(cmd_fibcontext); |
| 884 | readcmd->command = cpu_to_le32(VM_CtBlockRead); |
| 885 | readcmd->cid = cpu_to_le32(cid); |
| 886 | readcmd->block = cpu_to_le32(lba); |
| 887 | readcmd->count = cpu_to_le32(count * 512); |
| 888 | |
| 889 | if (count * 512 > (64 * 1024)) |
| 890 | BUG(); |
| 891 | |
| 892 | aac_build_sg(scsicmd, &readcmd->sg); |
| 893 | fibsize = sizeof(struct aac_read) + |
| 894 | ((le32_to_cpu(readcmd->sg.count) - 1) * |
| 895 | sizeof (struct sgentry)); |
| 896 | BUG_ON (fibsize > (sizeof(struct hw_fib) - |
| 897 | sizeof(struct aac_fibhdr))); |
| 898 | /* |
| 899 | * Now send the Fib to the adapter |
| 900 | */ |
| 901 | status = fib_send(ContainerCommand, |
| 902 | cmd_fibcontext, |
| 903 | fibsize, |
| 904 | FsaNormal, |
| 905 | 0, 1, |
| 906 | (fib_callback) read_callback, |
| 907 | (void *) scsicmd); |
| 908 | } |
| 909 | |
| 910 | |
| 911 | |
| 912 | /* |
| 913 | * Check that the command queued to the controller |
| 914 | */ |
| 915 | if (status == -EINPROGRESS) |
| 916 | return 0; |
| 917 | |
| 918 | printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status); |
| 919 | /* |
| 920 | * For some reason, the Fib didn't queue, return QUEUE_FULL |
| 921 | */ |
| 922 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; |
| 923 | aac_io_done(scsicmd); |
| 924 | fib_complete(cmd_fibcontext); |
| 925 | fib_free(cmd_fibcontext); |
| 926 | return 0; |
| 927 | } |
| 928 | |
| 929 | static int aac_write(struct scsi_cmnd * scsicmd, int cid) |
| 930 | { |
| 931 | u32 lba; |
| 932 | u32 count; |
| 933 | int status; |
| 934 | u16 fibsize; |
| 935 | struct aac_dev *dev; |
| 936 | struct fib * cmd_fibcontext; |
| 937 | |
| 938 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
| 939 | /* |
| 940 | * Get block address and transfer length |
| 941 | */ |
| 942 | if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */ |
| 943 | { |
| 944 | lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; |
| 945 | count = scsicmd->cmnd[4]; |
| 946 | if (count == 0) |
| 947 | count = 256; |
| 948 | } else { |
| 949 | dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid)); |
| 950 | lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; |
| 951 | count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; |
| 952 | } |
| 953 | dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", |
| 954 | smp_processor_id(), (unsigned long long)lba, jiffies)); |
| 955 | /* |
| 956 | * Allocate and initialize a Fib then setup a BlockWrite command |
| 957 | */ |
| 958 | if (!(cmd_fibcontext = fib_alloc(dev))) { |
| 959 | scsicmd->result = DID_ERROR << 16; |
| 960 | aac_io_done(scsicmd); |
| 961 | return 0; |
| 962 | } |
| 963 | fib_init(cmd_fibcontext); |
| 964 | |
| 965 | if(dev->dac_support == 1) { |
| 966 | struct aac_write64 *writecmd; |
| 967 | writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext); |
| 968 | writecmd->command = cpu_to_le32(VM_CtHostWrite64); |
| 969 | writecmd->cid = cpu_to_le16(cid); |
| 970 | writecmd->sector_count = cpu_to_le16(count); |
| 971 | writecmd->block = cpu_to_le32(lba); |
| 972 | writecmd->pad = 0; |
| 973 | writecmd->flags = 0; |
| 974 | |
| 975 | aac_build_sg64(scsicmd, &writecmd->sg); |
| 976 | fibsize = sizeof(struct aac_write64) + |
| 977 | ((le32_to_cpu(writecmd->sg.count) - 1) * |
| 978 | sizeof (struct sgentry64)); |
| 979 | BUG_ON (fibsize > (sizeof(struct hw_fib) - |
| 980 | sizeof(struct aac_fibhdr))); |
| 981 | /* |
| 982 | * Now send the Fib to the adapter |
| 983 | */ |
| 984 | status = fib_send(ContainerCommand64, |
| 985 | cmd_fibcontext, |
| 986 | fibsize, |
| 987 | FsaNormal, |
| 988 | 0, 1, |
| 989 | (fib_callback) write_callback, |
| 990 | (void *) scsicmd); |
| 991 | } else { |
| 992 | struct aac_write *writecmd; |
| 993 | writecmd = (struct aac_write *) fib_data(cmd_fibcontext); |
| 994 | writecmd->command = cpu_to_le32(VM_CtBlockWrite); |
| 995 | writecmd->cid = cpu_to_le32(cid); |
| 996 | writecmd->block = cpu_to_le32(lba); |
| 997 | writecmd->count = cpu_to_le32(count * 512); |
| 998 | writecmd->sg.count = cpu_to_le32(1); |
| 999 | /* ->stable is not used - it did mean which type of write */ |
| 1000 | |
| 1001 | if (count * 512 > (64 * 1024)) { |
| 1002 | BUG(); |
| 1003 | } |
| 1004 | |
| 1005 | aac_build_sg(scsicmd, &writecmd->sg); |
| 1006 | fibsize = sizeof(struct aac_write) + |
| 1007 | ((le32_to_cpu(writecmd->sg.count) - 1) * |
| 1008 | sizeof (struct sgentry)); |
| 1009 | BUG_ON (fibsize > (sizeof(struct hw_fib) - |
| 1010 | sizeof(struct aac_fibhdr))); |
| 1011 | /* |
| 1012 | * Now send the Fib to the adapter |
| 1013 | */ |
| 1014 | status = fib_send(ContainerCommand, |
| 1015 | cmd_fibcontext, |
| 1016 | fibsize, |
| 1017 | FsaNormal, |
| 1018 | 0, 1, |
| 1019 | (fib_callback) write_callback, |
| 1020 | (void *) scsicmd); |
| 1021 | } |
| 1022 | |
| 1023 | /* |
| 1024 | * Check that the command queued to the controller |
| 1025 | */ |
| 1026 | if (status == -EINPROGRESS) |
| 1027 | { |
| 1028 | dprintk("write queued.\n"); |
| 1029 | return 0; |
| 1030 | } |
| 1031 | |
| 1032 | printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status); |
| 1033 | /* |
| 1034 | * For some reason, the Fib didn't queue, return QUEUE_FULL |
| 1035 | */ |
| 1036 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; |
| 1037 | aac_io_done(scsicmd); |
| 1038 | |
| 1039 | fib_complete(cmd_fibcontext); |
| 1040 | fib_free(cmd_fibcontext); |
| 1041 | return 0; |
| 1042 | } |
| 1043 | |
| 1044 | static void synchronize_callback(void *context, struct fib *fibptr) |
| 1045 | { |
| 1046 | struct aac_synchronize_reply *synchronizereply; |
| 1047 | struct scsi_cmnd *cmd; |
| 1048 | |
| 1049 | cmd = context; |
| 1050 | |
| 1051 | dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n", |
| 1052 | smp_processor_id(), jiffies)); |
| 1053 | BUG_ON(fibptr == NULL); |
| 1054 | |
| 1055 | |
| 1056 | synchronizereply = fib_data(fibptr); |
| 1057 | if (le32_to_cpu(synchronizereply->status) == CT_OK) |
| 1058 | cmd->result = DID_OK << 16 | |
| 1059 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
| 1060 | else { |
| 1061 | struct scsi_device *sdev = cmd->device; |
| 1062 | struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; |
| 1063 | u32 cid = ID_LUN_TO_CONTAINER(sdev->id, sdev->lun); |
| 1064 | printk(KERN_WARNING |
| 1065 | "synchronize_callback: synchronize failed, status = %d\n", |
| 1066 | le32_to_cpu(synchronizereply->status)); |
| 1067 | cmd->result = DID_OK << 16 | |
| 1068 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; |
| 1069 | set_sense((u8 *)&dev->fsa_dev[cid].sense_data, |
| 1070 | HARDWARE_ERROR, |
| 1071 | SENCODE_INTERNAL_TARGET_FAILURE, |
| 1072 | ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, |
| 1073 | 0, 0); |
| 1074 | memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, |
| 1075 | min(sizeof(dev->fsa_dev[cid].sense_data), |
| 1076 | sizeof(cmd->sense_buffer))); |
| 1077 | } |
| 1078 | |
| 1079 | fib_complete(fibptr); |
| 1080 | fib_free(fibptr); |
| 1081 | aac_io_done(cmd); |
| 1082 | } |
| 1083 | |
| 1084 | static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid) |
| 1085 | { |
| 1086 | int status; |
| 1087 | struct fib *cmd_fibcontext; |
| 1088 | struct aac_synchronize *synchronizecmd; |
| 1089 | struct scsi_cmnd *cmd; |
| 1090 | struct scsi_device *sdev = scsicmd->device; |
| 1091 | int active = 0; |
| 1092 | unsigned long flags; |
| 1093 | |
| 1094 | /* |
| 1095 | * Wait for all commands to complete to this specific |
| 1096 | * target (block). |
| 1097 | */ |
| 1098 | spin_lock_irqsave(&sdev->list_lock, flags); |
| 1099 | list_for_each_entry(cmd, &sdev->cmd_list, list) |
| 1100 | if (cmd != scsicmd && cmd->serial_number != 0) { |
| 1101 | ++active; |
| 1102 | break; |
| 1103 | } |
| 1104 | |
| 1105 | spin_unlock_irqrestore(&sdev->list_lock, flags); |
| 1106 | |
| 1107 | /* |
| 1108 | * Yield the processor (requeue for later) |
| 1109 | */ |
| 1110 | if (active) |
| 1111 | return SCSI_MLQUEUE_DEVICE_BUSY; |
| 1112 | |
| 1113 | /* |
| 1114 | * Alocate and initialize a Fib |
| 1115 | */ |
| 1116 | if (!(cmd_fibcontext = |
| 1117 | fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) |
| 1118 | return SCSI_MLQUEUE_HOST_BUSY; |
| 1119 | |
| 1120 | fib_init(cmd_fibcontext); |
| 1121 | |
| 1122 | synchronizecmd = fib_data(cmd_fibcontext); |
| 1123 | synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); |
| 1124 | synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE); |
| 1125 | synchronizecmd->cid = cpu_to_le32(cid); |
| 1126 | synchronizecmd->count = |
| 1127 | cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); |
| 1128 | |
| 1129 | /* |
| 1130 | * Now send the Fib to the adapter |
| 1131 | */ |
| 1132 | status = fib_send(ContainerCommand, |
| 1133 | cmd_fibcontext, |
| 1134 | sizeof(struct aac_synchronize), |
| 1135 | FsaNormal, |
| 1136 | 0, 1, |
| 1137 | (fib_callback)synchronize_callback, |
| 1138 | (void *)scsicmd); |
| 1139 | |
| 1140 | /* |
| 1141 | * Check that the command queued to the controller |
| 1142 | */ |
| 1143 | if (status == -EINPROGRESS) |
| 1144 | return 0; |
| 1145 | |
| 1146 | printk(KERN_WARNING |
| 1147 | "aac_synchronize: fib_send failed with status: %d.\n", status); |
| 1148 | fib_complete(cmd_fibcontext); |
| 1149 | fib_free(cmd_fibcontext); |
| 1150 | return SCSI_MLQUEUE_HOST_BUSY; |
| 1151 | } |
| 1152 | |
| 1153 | /** |
| 1154 | * aac_scsi_cmd() - Process SCSI command |
| 1155 | * @scsicmd: SCSI command block |
| 1156 | * |
| 1157 | * Emulate a SCSI command and queue the required request for the |
| 1158 | * aacraid firmware. |
| 1159 | */ |
| 1160 | |
| 1161 | int aac_scsi_cmd(struct scsi_cmnd * scsicmd) |
| 1162 | { |
| 1163 | u32 cid = 0; |
| 1164 | struct Scsi_Host *host = scsicmd->device->host; |
| 1165 | struct aac_dev *dev = (struct aac_dev *)host->hostdata; |
| 1166 | struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; |
| 1167 | int cardtype = dev->cardtype; |
| 1168 | int ret; |
| 1169 | |
| 1170 | /* |
| 1171 | * If the bus, id or lun is out of range, return fail |
| 1172 | * Test does not apply to ID 16, the pseudo id for the controller |
| 1173 | * itself. |
| 1174 | */ |
| 1175 | if (scsicmd->device->id != host->this_id) { |
| 1176 | if ((scsicmd->device->channel == 0) ){ |
| 1177 | if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){ |
| 1178 | scsicmd->result = DID_NO_CONNECT << 16; |
| 1179 | scsicmd->scsi_done(scsicmd); |
| 1180 | return 0; |
| 1181 | } |
| 1182 | cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); |
| 1183 | |
| 1184 | /* |
| 1185 | * If the target container doesn't exist, it may have |
| 1186 | * been newly created |
| 1187 | */ |
| 1188 | if ((fsa_dev_ptr[cid].valid & 1) == 0) { |
| 1189 | switch (scsicmd->cmnd[0]) { |
| 1190 | case INQUIRY: |
| 1191 | case READ_CAPACITY: |
| 1192 | case TEST_UNIT_READY: |
| 1193 | spin_unlock_irq(host->host_lock); |
| 1194 | probe_container(dev, cid); |
| 1195 | spin_lock_irq(host->host_lock); |
| 1196 | if (fsa_dev_ptr[cid].valid == 0) { |
| 1197 | scsicmd->result = DID_NO_CONNECT << 16; |
| 1198 | scsicmd->scsi_done(scsicmd); |
| 1199 | return 0; |
| 1200 | } |
| 1201 | default: |
| 1202 | break; |
| 1203 | } |
| 1204 | } |
| 1205 | /* |
| 1206 | * If the target container still doesn't exist, |
| 1207 | * return failure |
| 1208 | */ |
| 1209 | if (fsa_dev_ptr[cid].valid == 0) { |
| 1210 | scsicmd->result = DID_BAD_TARGET << 16; |
| 1211 | scsicmd->scsi_done(scsicmd); |
| 1212 | return 0; |
| 1213 | } |
| 1214 | } else { /* check for physical non-dasd devices */ |
| 1215 | if(dev->nondasd_support == 1){ |
| 1216 | return aac_send_srb_fib(scsicmd); |
| 1217 | } else { |
| 1218 | scsicmd->result = DID_NO_CONNECT << 16; |
| 1219 | scsicmd->scsi_done(scsicmd); |
| 1220 | return 0; |
| 1221 | } |
| 1222 | } |
| 1223 | } |
| 1224 | /* |
| 1225 | * else Command for the controller itself |
| 1226 | */ |
| 1227 | else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */ |
| 1228 | (scsicmd->cmnd[0] != TEST_UNIT_READY)) |
| 1229 | { |
| 1230 | dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0])); |
| 1231 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; |
| 1232 | set_sense((u8 *) &dev->fsa_dev[cid].sense_data, |
| 1233 | ILLEGAL_REQUEST, |
| 1234 | SENCODE_INVALID_COMMAND, |
| 1235 | ASENCODE_INVALID_COMMAND, 0, 0, 0, 0); |
| 1236 | memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, |
| 1237 | (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) |
| 1238 | ? sizeof(scsicmd->sense_buffer) |
| 1239 | : sizeof(dev->fsa_dev[cid].sense_data)); |
| 1240 | scsicmd->scsi_done(scsicmd); |
| 1241 | return 0; |
| 1242 | } |
| 1243 | |
| 1244 | |
| 1245 | /* Handle commands here that don't really require going out to the adapter */ |
| 1246 | switch (scsicmd->cmnd[0]) { |
| 1247 | case INQUIRY: |
| 1248 | { |
| 1249 | struct inquiry_data *inq_data_ptr; |
| 1250 | |
| 1251 | dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id)); |
| 1252 | inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer; |
| 1253 | memset(inq_data_ptr, 0, sizeof (struct inquiry_data)); |
| 1254 | |
| 1255 | inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */ |
| 1256 | inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */ |
| 1257 | inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ |
| 1258 | inq_data_ptr->inqd_len = 31; |
| 1259 | /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ |
| 1260 | inq_data_ptr->inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */ |
| 1261 | /* |
| 1262 | * Set the Vendor, Product, and Revision Level |
| 1263 | * see: <vendor>.c i.e. aac.c |
| 1264 | */ |
| 1265 | if (scsicmd->device->id == host->this_id) { |
| 1266 | setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), (sizeof(container_types)/sizeof(char *))); |
| 1267 | inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */ |
| 1268 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
| 1269 | scsicmd->scsi_done(scsicmd); |
| 1270 | return 0; |
| 1271 | } |
| 1272 | setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr[cid].type); |
| 1273 | inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ |
| 1274 | return aac_get_container_name(scsicmd, cid); |
| 1275 | } |
| 1276 | case READ_CAPACITY: |
| 1277 | { |
| 1278 | u32 capacity; |
| 1279 | char *cp; |
| 1280 | |
| 1281 | dprintk((KERN_DEBUG "READ CAPACITY command.\n")); |
| 1282 | if (fsa_dev_ptr[cid].size <= 0x100000000LL) |
| 1283 | capacity = fsa_dev_ptr[cid].size - 1; |
| 1284 | else |
| 1285 | capacity = (u32)-1; |
| 1286 | cp = scsicmd->request_buffer; |
| 1287 | cp[0] = (capacity >> 24) & 0xff; |
| 1288 | cp[1] = (capacity >> 16) & 0xff; |
| 1289 | cp[2] = (capacity >> 8) & 0xff; |
| 1290 | cp[3] = (capacity >> 0) & 0xff; |
| 1291 | cp[4] = 0; |
| 1292 | cp[5] = 0; |
| 1293 | cp[6] = 2; |
| 1294 | cp[7] = 0; |
| 1295 | |
| 1296 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
| 1297 | scsicmd->scsi_done(scsicmd); |
| 1298 | |
| 1299 | return 0; |
| 1300 | } |
| 1301 | |
| 1302 | case MODE_SENSE: |
| 1303 | { |
| 1304 | char *mode_buf; |
| 1305 | |
| 1306 | dprintk((KERN_DEBUG "MODE SENSE command.\n")); |
| 1307 | mode_buf = scsicmd->request_buffer; |
| 1308 | mode_buf[0] = 3; /* Mode data length */ |
| 1309 | mode_buf[1] = 0; /* Medium type - default */ |
| 1310 | mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */ |
| 1311 | mode_buf[3] = 0; /* Block descriptor length */ |
| 1312 | |
| 1313 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
| 1314 | scsicmd->scsi_done(scsicmd); |
| 1315 | |
| 1316 | return 0; |
| 1317 | } |
| 1318 | case MODE_SENSE_10: |
| 1319 | { |
| 1320 | char *mode_buf; |
| 1321 | |
| 1322 | dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); |
| 1323 | mode_buf = scsicmd->request_buffer; |
| 1324 | mode_buf[0] = 0; /* Mode data length (MSB) */ |
| 1325 | mode_buf[1] = 6; /* Mode data length (LSB) */ |
| 1326 | mode_buf[2] = 0; /* Medium type - default */ |
| 1327 | mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */ |
| 1328 | mode_buf[4] = 0; /* reserved */ |
| 1329 | mode_buf[5] = 0; /* reserved */ |
| 1330 | mode_buf[6] = 0; /* Block descriptor length (MSB) */ |
| 1331 | mode_buf[7] = 0; /* Block descriptor length (LSB) */ |
| 1332 | |
| 1333 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
| 1334 | scsicmd->scsi_done(scsicmd); |
| 1335 | |
| 1336 | return 0; |
| 1337 | } |
| 1338 | case REQUEST_SENSE: |
| 1339 | dprintk((KERN_DEBUG "REQUEST SENSE command.\n")); |
| 1340 | memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data)); |
| 1341 | memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data)); |
| 1342 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
| 1343 | scsicmd->scsi_done(scsicmd); |
| 1344 | return 0; |
| 1345 | |
| 1346 | case ALLOW_MEDIUM_REMOVAL: |
| 1347 | dprintk((KERN_DEBUG "LOCK command.\n")); |
| 1348 | if (scsicmd->cmnd[4]) |
| 1349 | fsa_dev_ptr[cid].locked = 1; |
| 1350 | else |
| 1351 | fsa_dev_ptr[cid].locked = 0; |
| 1352 | |
| 1353 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
| 1354 | scsicmd->scsi_done(scsicmd); |
| 1355 | return 0; |
| 1356 | /* |
| 1357 | * These commands are all No-Ops |
| 1358 | */ |
| 1359 | case TEST_UNIT_READY: |
| 1360 | case RESERVE: |
| 1361 | case RELEASE: |
| 1362 | case REZERO_UNIT: |
| 1363 | case REASSIGN_BLOCKS: |
| 1364 | case SEEK_10: |
| 1365 | case START_STOP: |
| 1366 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; |
| 1367 | scsicmd->scsi_done(scsicmd); |
| 1368 | return 0; |
| 1369 | } |
| 1370 | |
| 1371 | switch (scsicmd->cmnd[0]) |
| 1372 | { |
| 1373 | case READ_6: |
| 1374 | case READ_10: |
| 1375 | /* |
| 1376 | * Hack to keep track of ordinal number of the device that |
| 1377 | * corresponds to a container. Needed to convert |
| 1378 | * containers to /dev/sd device names |
| 1379 | */ |
| 1380 | |
| 1381 | spin_unlock_irq(host->host_lock); |
| 1382 | if (scsicmd->request->rq_disk) |
| 1383 | memcpy(fsa_dev_ptr[cid].devname, |
| 1384 | scsicmd->request->rq_disk->disk_name, |
| 1385 | 8); |
| 1386 | |
| 1387 | ret = aac_read(scsicmd, cid); |
| 1388 | spin_lock_irq(host->host_lock); |
| 1389 | return ret; |
| 1390 | |
| 1391 | case WRITE_6: |
| 1392 | case WRITE_10: |
| 1393 | spin_unlock_irq(host->host_lock); |
| 1394 | ret = aac_write(scsicmd, cid); |
| 1395 | spin_lock_irq(host->host_lock); |
| 1396 | return ret; |
| 1397 | |
| 1398 | case SYNCHRONIZE_CACHE: |
| 1399 | /* Issue FIB to tell Firmware to flush it's cache */ |
| 1400 | return aac_synchronize(scsicmd, cid); |
| 1401 | |
| 1402 | default: |
| 1403 | /* |
| 1404 | * Unhandled commands |
| 1405 | */ |
| 1406 | printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]); |
| 1407 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; |
| 1408 | set_sense((u8 *) &dev->fsa_dev[cid].sense_data, |
| 1409 | ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, |
| 1410 | ASENCODE_INVALID_COMMAND, 0, 0, 0, 0); |
| 1411 | memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, |
| 1412 | (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) |
| 1413 | ? sizeof(scsicmd->sense_buffer) |
| 1414 | : sizeof(dev->fsa_dev[cid].sense_data)); |
| 1415 | scsicmd->scsi_done(scsicmd); |
| 1416 | return 0; |
| 1417 | } |
| 1418 | } |
| 1419 | |
| 1420 | static int query_disk(struct aac_dev *dev, void __user *arg) |
| 1421 | { |
| 1422 | struct aac_query_disk qd; |
| 1423 | struct fsa_dev_info *fsa_dev_ptr; |
| 1424 | |
| 1425 | fsa_dev_ptr = dev->fsa_dev; |
| 1426 | if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) |
| 1427 | return -EFAULT; |
| 1428 | if (qd.cnum == -1) |
| 1429 | qd.cnum = ID_LUN_TO_CONTAINER(qd.id, qd.lun); |
| 1430 | else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) |
| 1431 | { |
| 1432 | if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) |
| 1433 | return -EINVAL; |
| 1434 | qd.instance = dev->scsi_host_ptr->host_no; |
| 1435 | qd.bus = 0; |
| 1436 | qd.id = CONTAINER_TO_ID(qd.cnum); |
| 1437 | qd.lun = CONTAINER_TO_LUN(qd.cnum); |
| 1438 | } |
| 1439 | else return -EINVAL; |
| 1440 | |
| 1441 | qd.valid = fsa_dev_ptr[qd.cnum].valid; |
| 1442 | qd.locked = fsa_dev_ptr[qd.cnum].locked; |
| 1443 | qd.deleted = fsa_dev_ptr[qd.cnum].deleted; |
| 1444 | |
| 1445 | if (fsa_dev_ptr[qd.cnum].devname[0] == '\0') |
| 1446 | qd.unmapped = 1; |
| 1447 | else |
| 1448 | qd.unmapped = 0; |
| 1449 | |
| 1450 | strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname, |
| 1451 | min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1)); |
| 1452 | |
| 1453 | if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk))) |
| 1454 | return -EFAULT; |
| 1455 | return 0; |
| 1456 | } |
| 1457 | |
| 1458 | static int force_delete_disk(struct aac_dev *dev, void __user *arg) |
| 1459 | { |
| 1460 | struct aac_delete_disk dd; |
| 1461 | struct fsa_dev_info *fsa_dev_ptr; |
| 1462 | |
| 1463 | fsa_dev_ptr = dev->fsa_dev; |
| 1464 | |
| 1465 | if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) |
| 1466 | return -EFAULT; |
| 1467 | |
| 1468 | if (dd.cnum >= dev->maximum_num_containers) |
| 1469 | return -EINVAL; |
| 1470 | /* |
| 1471 | * Mark this container as being deleted. |
| 1472 | */ |
| 1473 | fsa_dev_ptr[dd.cnum].deleted = 1; |
| 1474 | /* |
| 1475 | * Mark the container as no longer valid |
| 1476 | */ |
| 1477 | fsa_dev_ptr[dd.cnum].valid = 0; |
| 1478 | return 0; |
| 1479 | } |
| 1480 | |
| 1481 | static int delete_disk(struct aac_dev *dev, void __user *arg) |
| 1482 | { |
| 1483 | struct aac_delete_disk dd; |
| 1484 | struct fsa_dev_info *fsa_dev_ptr; |
| 1485 | |
| 1486 | fsa_dev_ptr = dev->fsa_dev; |
| 1487 | |
| 1488 | if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) |
| 1489 | return -EFAULT; |
| 1490 | |
| 1491 | if (dd.cnum >= dev->maximum_num_containers) |
| 1492 | return -EINVAL; |
| 1493 | /* |
| 1494 | * If the container is locked, it can not be deleted by the API. |
| 1495 | */ |
| 1496 | if (fsa_dev_ptr[dd.cnum].locked) |
| 1497 | return -EBUSY; |
| 1498 | else { |
| 1499 | /* |
| 1500 | * Mark the container as no longer being valid. |
| 1501 | */ |
| 1502 | fsa_dev_ptr[dd.cnum].valid = 0; |
| 1503 | fsa_dev_ptr[dd.cnum].devname[0] = '\0'; |
| 1504 | return 0; |
| 1505 | } |
| 1506 | } |
| 1507 | |
| 1508 | int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg) |
| 1509 | { |
| 1510 | switch (cmd) { |
| 1511 | case FSACTL_QUERY_DISK: |
| 1512 | return query_disk(dev, arg); |
| 1513 | case FSACTL_DELETE_DISK: |
| 1514 | return delete_disk(dev, arg); |
| 1515 | case FSACTL_FORCE_DELETE_DISK: |
| 1516 | return force_delete_disk(dev, arg); |
| 1517 | case FSACTL_GET_CONTAINERS: |
| 1518 | return aac_get_containers(dev); |
| 1519 | default: |
| 1520 | return -ENOTTY; |
| 1521 | } |
| 1522 | } |
| 1523 | |
| 1524 | /** |
| 1525 | * |
| 1526 | * aac_srb_callback |
| 1527 | * @context: the context set in the fib - here it is scsi cmd |
| 1528 | * @fibptr: pointer to the fib |
| 1529 | * |
| 1530 | * Handles the completion of a scsi command to a non dasd device |
| 1531 | * |
| 1532 | */ |
| 1533 | |
| 1534 | static void aac_srb_callback(void *context, struct fib * fibptr) |
| 1535 | { |
| 1536 | struct aac_dev *dev; |
| 1537 | struct aac_srb_reply *srbreply; |
| 1538 | struct scsi_cmnd *scsicmd; |
| 1539 | |
| 1540 | scsicmd = (struct scsi_cmnd *) context; |
| 1541 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
| 1542 | |
| 1543 | if (fibptr == NULL) |
| 1544 | BUG(); |
| 1545 | |
| 1546 | srbreply = (struct aac_srb_reply *) fib_data(fibptr); |
| 1547 | |
| 1548 | scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ |
| 1549 | /* |
| 1550 | * Calculate resid for sg |
| 1551 | */ |
| 1552 | |
| 1553 | scsicmd->resid = scsicmd->request_bufflen - |
| 1554 | le32_to_cpu(srbreply->data_xfer_length); |
| 1555 | |
| 1556 | if(scsicmd->use_sg) |
| 1557 | pci_unmap_sg(dev->pdev, |
| 1558 | (struct scatterlist *)scsicmd->buffer, |
| 1559 | scsicmd->use_sg, |
| 1560 | scsicmd->sc_data_direction); |
| 1561 | else if(scsicmd->request_bufflen) |
| 1562 | pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen, |
| 1563 | scsicmd->sc_data_direction); |
| 1564 | |
| 1565 | /* |
| 1566 | * First check the fib status |
| 1567 | */ |
| 1568 | |
| 1569 | if (le32_to_cpu(srbreply->status) != ST_OK){ |
| 1570 | int len; |
| 1571 | printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status)); |
| 1572 | len = (le32_to_cpu(srbreply->sense_data_size) > |
| 1573 | sizeof(scsicmd->sense_buffer)) ? |
| 1574 | sizeof(scsicmd->sense_buffer) : |
| 1575 | le32_to_cpu(srbreply->sense_data_size); |
| 1576 | scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; |
| 1577 | memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); |
| 1578 | } |
| 1579 | |
| 1580 | /* |
| 1581 | * Next check the srb status |
| 1582 | */ |
| 1583 | switch( (le32_to_cpu(srbreply->srb_status))&0x3f){ |
| 1584 | case SRB_STATUS_ERROR_RECOVERY: |
| 1585 | case SRB_STATUS_PENDING: |
| 1586 | case SRB_STATUS_SUCCESS: |
| 1587 | if(scsicmd->cmnd[0] == INQUIRY ){ |
| 1588 | u8 b; |
| 1589 | u8 b1; |
| 1590 | /* We can't expose disk devices because we can't tell whether they |
| 1591 | * are the raw container drives or stand alone drives. If they have |
| 1592 | * the removable bit set then we should expose them though. |
| 1593 | */ |
| 1594 | b = (*(u8*)scsicmd->buffer)&0x1f; |
| 1595 | b1 = ((u8*)scsicmd->buffer)[1]; |
| 1596 | if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER |
| 1597 | || (b==TYPE_DISK && (b1&0x80)) ){ |
| 1598 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; |
| 1599 | /* |
| 1600 | * We will allow disk devices if in RAID/SCSI mode and |
| 1601 | * the channel is 2 |
| 1602 | */ |
| 1603 | } else if ((dev->raid_scsi_mode) && |
| 1604 | (scsicmd->device->channel == 2)) { |
| 1605 | scsicmd->result = DID_OK << 16 | |
| 1606 | COMMAND_COMPLETE << 8; |
| 1607 | } else { |
| 1608 | scsicmd->result = DID_NO_CONNECT << 16 | |
| 1609 | COMMAND_COMPLETE << 8; |
| 1610 | } |
| 1611 | } else { |
| 1612 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; |
| 1613 | } |
| 1614 | break; |
| 1615 | case SRB_STATUS_DATA_OVERRUN: |
| 1616 | switch(scsicmd->cmnd[0]){ |
| 1617 | case READ_6: |
| 1618 | case WRITE_6: |
| 1619 | case READ_10: |
| 1620 | case WRITE_10: |
| 1621 | case READ_12: |
| 1622 | case WRITE_12: |
| 1623 | if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) { |
| 1624 | printk(KERN_WARNING"aacraid: SCSI CMD underflow\n"); |
| 1625 | } else { |
| 1626 | printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n"); |
| 1627 | } |
| 1628 | scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; |
| 1629 | break; |
| 1630 | case INQUIRY: { |
| 1631 | u8 b; |
| 1632 | u8 b1; |
| 1633 | /* We can't expose disk devices because we can't tell whether they |
| 1634 | * are the raw container drives or stand alone drives |
| 1635 | */ |
| 1636 | b = (*(u8*)scsicmd->buffer)&0x0f; |
| 1637 | b1 = ((u8*)scsicmd->buffer)[1]; |
| 1638 | if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER |
| 1639 | || (b==TYPE_DISK && (b1&0x80)) ){ |
| 1640 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; |
| 1641 | /* |
| 1642 | * We will allow disk devices if in RAID/SCSI mode and |
| 1643 | * the channel is 2 |
| 1644 | */ |
| 1645 | } else if ((dev->raid_scsi_mode) && |
| 1646 | (scsicmd->device->channel == 2)) { |
| 1647 | scsicmd->result = DID_OK << 16 | |
| 1648 | COMMAND_COMPLETE << 8; |
| 1649 | } else { |
| 1650 | scsicmd->result = DID_NO_CONNECT << 16 | |
| 1651 | COMMAND_COMPLETE << 8; |
| 1652 | } |
| 1653 | break; |
| 1654 | } |
| 1655 | default: |
| 1656 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; |
| 1657 | break; |
| 1658 | } |
| 1659 | break; |
| 1660 | case SRB_STATUS_ABORTED: |
| 1661 | scsicmd->result = DID_ABORT << 16 | ABORT << 8; |
| 1662 | break; |
| 1663 | case SRB_STATUS_ABORT_FAILED: |
| 1664 | // Not sure about this one - but assuming the hba was trying to abort for some reason |
| 1665 | scsicmd->result = DID_ERROR << 16 | ABORT << 8; |
| 1666 | break; |
| 1667 | case SRB_STATUS_PARITY_ERROR: |
| 1668 | scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8; |
| 1669 | break; |
| 1670 | case SRB_STATUS_NO_DEVICE: |
| 1671 | case SRB_STATUS_INVALID_PATH_ID: |
| 1672 | case SRB_STATUS_INVALID_TARGET_ID: |
| 1673 | case SRB_STATUS_INVALID_LUN: |
| 1674 | case SRB_STATUS_SELECTION_TIMEOUT: |
| 1675 | scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; |
| 1676 | break; |
| 1677 | |
| 1678 | case SRB_STATUS_COMMAND_TIMEOUT: |
| 1679 | case SRB_STATUS_TIMEOUT: |
| 1680 | scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8; |
| 1681 | break; |
| 1682 | |
| 1683 | case SRB_STATUS_BUSY: |
| 1684 | scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; |
| 1685 | break; |
| 1686 | |
| 1687 | case SRB_STATUS_BUS_RESET: |
| 1688 | scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8; |
| 1689 | break; |
| 1690 | |
| 1691 | case SRB_STATUS_MESSAGE_REJECTED: |
| 1692 | scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8; |
| 1693 | break; |
| 1694 | case SRB_STATUS_REQUEST_FLUSHED: |
| 1695 | case SRB_STATUS_ERROR: |
| 1696 | case SRB_STATUS_INVALID_REQUEST: |
| 1697 | case SRB_STATUS_REQUEST_SENSE_FAILED: |
| 1698 | case SRB_STATUS_NO_HBA: |
| 1699 | case SRB_STATUS_UNEXPECTED_BUS_FREE: |
| 1700 | case SRB_STATUS_PHASE_SEQUENCE_FAILURE: |
| 1701 | case SRB_STATUS_BAD_SRB_BLOCK_LENGTH: |
| 1702 | case SRB_STATUS_DELAYED_RETRY: |
| 1703 | case SRB_STATUS_BAD_FUNCTION: |
| 1704 | case SRB_STATUS_NOT_STARTED: |
| 1705 | case SRB_STATUS_NOT_IN_USE: |
| 1706 | case SRB_STATUS_FORCE_ABORT: |
| 1707 | case SRB_STATUS_DOMAIN_VALIDATION_FAIL: |
| 1708 | default: |
| 1709 | #ifdef AAC_DETAILED_STATUS_INFO |
| 1710 | printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n", |
| 1711 | le32_to_cpu(srbreply->srb_status) & 0x3F, |
| 1712 | aac_get_status_string( |
| 1713 | le32_to_cpu(srbreply->srb_status) & 0x3F), |
| 1714 | scsicmd->cmnd[0], |
| 1715 | le32_to_cpu(srbreply->scsi_status)); |
| 1716 | #endif |
| 1717 | scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; |
| 1718 | break; |
| 1719 | } |
| 1720 | if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition |
| 1721 | int len; |
| 1722 | scsicmd->result |= SAM_STAT_CHECK_CONDITION; |
| 1723 | len = (le32_to_cpu(srbreply->sense_data_size) > |
| 1724 | sizeof(scsicmd->sense_buffer)) ? |
| 1725 | sizeof(scsicmd->sense_buffer) : |
| 1726 | le32_to_cpu(srbreply->sense_data_size); |
| 1727 | #ifdef AAC_DETAILED_STATUS_INFO |
| 1728 | dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", |
| 1729 | le32_to_cpu(srbreply->status), len)); |
| 1730 | #endif |
| 1731 | memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); |
| 1732 | |
| 1733 | } |
| 1734 | /* |
| 1735 | * OR in the scsi status (already shifted up a bit) |
| 1736 | */ |
| 1737 | scsicmd->result |= le32_to_cpu(srbreply->scsi_status); |
| 1738 | |
| 1739 | fib_complete(fibptr); |
| 1740 | fib_free(fibptr); |
| 1741 | aac_io_done(scsicmd); |
| 1742 | } |
| 1743 | |
| 1744 | /** |
| 1745 | * |
| 1746 | * aac_send_scb_fib |
| 1747 | * @scsicmd: the scsi command block |
| 1748 | * |
| 1749 | * This routine will form a FIB and fill in the aac_srb from the |
| 1750 | * scsicmd passed in. |
| 1751 | */ |
| 1752 | |
| 1753 | static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) |
| 1754 | { |
| 1755 | struct fib* cmd_fibcontext; |
| 1756 | struct aac_dev* dev; |
| 1757 | int status; |
| 1758 | struct aac_srb *srbcmd; |
| 1759 | u16 fibsize; |
| 1760 | u32 flag; |
| 1761 | u32 timeout; |
| 1762 | |
| 1763 | if( scsicmd->device->id > 15 || scsicmd->device->lun > 7) { |
| 1764 | scsicmd->result = DID_NO_CONNECT << 16; |
| 1765 | scsicmd->scsi_done(scsicmd); |
| 1766 | return 0; |
| 1767 | } |
| 1768 | |
| 1769 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
| 1770 | switch(scsicmd->sc_data_direction){ |
| 1771 | case DMA_TO_DEVICE: |
| 1772 | flag = SRB_DataOut; |
| 1773 | break; |
| 1774 | case DMA_BIDIRECTIONAL: |
| 1775 | flag = SRB_DataIn | SRB_DataOut; |
| 1776 | break; |
| 1777 | case DMA_FROM_DEVICE: |
| 1778 | flag = SRB_DataIn; |
| 1779 | break; |
| 1780 | case DMA_NONE: |
| 1781 | default: /* shuts up some versions of gcc */ |
| 1782 | flag = SRB_NoDataXfer; |
| 1783 | break; |
| 1784 | } |
| 1785 | |
| 1786 | |
| 1787 | /* |
| 1788 | * Allocate and initialize a Fib then setup a BlockWrite command |
| 1789 | */ |
| 1790 | if (!(cmd_fibcontext = fib_alloc(dev))) { |
| 1791 | return -1; |
| 1792 | } |
| 1793 | fib_init(cmd_fibcontext); |
| 1794 | |
| 1795 | srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext); |
| 1796 | srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); |
| 1797 | srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->device->channel)); |
| 1798 | srbcmd->id = cpu_to_le32(scsicmd->device->id); |
| 1799 | srbcmd->lun = cpu_to_le32(scsicmd->device->lun); |
| 1800 | srbcmd->flags = cpu_to_le32(flag); |
| 1801 | timeout = (scsicmd->timeout-jiffies)/HZ; |
| 1802 | if(timeout == 0){ |
| 1803 | timeout = 1; |
| 1804 | } |
| 1805 | srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds |
| 1806 | srbcmd->retry_limit = 0; /* Obsolete parameter */ |
| 1807 | srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len); |
| 1808 | |
| 1809 | if( dev->dac_support == 1 ) { |
| 1810 | aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg); |
| 1811 | srbcmd->count = cpu_to_le32(scsicmd->request_bufflen); |
| 1812 | |
| 1813 | memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); |
| 1814 | memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len); |
| 1815 | /* |
| 1816 | * Build Scatter/Gather list |
| 1817 | */ |
| 1818 | fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) + |
| 1819 | ((le32_to_cpu(srbcmd->sg.count) & 0xff) * |
| 1820 | sizeof (struct sgentry64)); |
| 1821 | BUG_ON (fibsize > (sizeof(struct hw_fib) - |
| 1822 | sizeof(struct aac_fibhdr))); |
| 1823 | |
| 1824 | /* |
| 1825 | * Now send the Fib to the adapter |
| 1826 | */ |
| 1827 | status = fib_send(ScsiPortCommand64, cmd_fibcontext, |
| 1828 | fibsize, FsaNormal, 0, 1, |
| 1829 | (fib_callback) aac_srb_callback, |
| 1830 | (void *) scsicmd); |
| 1831 | } else { |
| 1832 | aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg); |
| 1833 | srbcmd->count = cpu_to_le32(scsicmd->request_bufflen); |
| 1834 | |
| 1835 | memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); |
| 1836 | memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len); |
| 1837 | /* |
| 1838 | * Build Scatter/Gather list |
| 1839 | */ |
| 1840 | fibsize = sizeof (struct aac_srb) + |
| 1841 | (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * |
| 1842 | sizeof (struct sgentry)); |
| 1843 | BUG_ON (fibsize > (sizeof(struct hw_fib) - |
| 1844 | sizeof(struct aac_fibhdr))); |
| 1845 | |
| 1846 | /* |
| 1847 | * Now send the Fib to the adapter |
| 1848 | */ |
| 1849 | status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1, |
| 1850 | (fib_callback) aac_srb_callback, (void *) scsicmd); |
| 1851 | } |
| 1852 | /* |
| 1853 | * Check that the command queued to the controller |
| 1854 | */ |
| 1855 | if (status == -EINPROGRESS){ |
| 1856 | return 0; |
| 1857 | } |
| 1858 | |
| 1859 | printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status); |
| 1860 | fib_complete(cmd_fibcontext); |
| 1861 | fib_free(cmd_fibcontext); |
| 1862 | |
| 1863 | return -1; |
| 1864 | } |
| 1865 | |
| 1866 | static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg) |
| 1867 | { |
| 1868 | struct aac_dev *dev; |
| 1869 | unsigned long byte_count = 0; |
| 1870 | |
| 1871 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
| 1872 | // Get rid of old data |
| 1873 | psg->count = 0; |
| 1874 | psg->sg[0].addr = 0; |
| 1875 | psg->sg[0].count = 0; |
| 1876 | if (scsicmd->use_sg) { |
| 1877 | struct scatterlist *sg; |
| 1878 | int i; |
| 1879 | int sg_count; |
| 1880 | sg = (struct scatterlist *) scsicmd->request_buffer; |
| 1881 | |
| 1882 | sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg, |
| 1883 | scsicmd->sc_data_direction); |
| 1884 | psg->count = cpu_to_le32(sg_count); |
| 1885 | |
| 1886 | byte_count = 0; |
| 1887 | |
| 1888 | for (i = 0; i < sg_count; i++) { |
| 1889 | psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg)); |
| 1890 | psg->sg[i].count = cpu_to_le32(sg_dma_len(sg)); |
| 1891 | byte_count += sg_dma_len(sg); |
| 1892 | sg++; |
| 1893 | } |
| 1894 | /* hba wants the size to be exact */ |
| 1895 | if(byte_count > scsicmd->request_bufflen){ |
| 1896 | psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen); |
| 1897 | byte_count = scsicmd->request_bufflen; |
| 1898 | } |
| 1899 | /* Check for command underflow */ |
| 1900 | if(scsicmd->underflow && (byte_count < scsicmd->underflow)){ |
| 1901 | printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", |
| 1902 | byte_count, scsicmd->underflow); |
| 1903 | } |
| 1904 | } |
| 1905 | else if(scsicmd->request_bufflen) { |
| 1906 | dma_addr_t addr; |
| 1907 | addr = pci_map_single(dev->pdev, |
| 1908 | scsicmd->request_buffer, |
| 1909 | scsicmd->request_bufflen, |
| 1910 | scsicmd->sc_data_direction); |
| 1911 | psg->count = cpu_to_le32(1); |
| 1912 | psg->sg[0].addr = cpu_to_le32(addr); |
| 1913 | psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen); |
| 1914 | scsicmd->SCp.dma_handle = addr; |
| 1915 | byte_count = scsicmd->request_bufflen; |
| 1916 | } |
| 1917 | return byte_count; |
| 1918 | } |
| 1919 | |
| 1920 | |
| 1921 | static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg) |
| 1922 | { |
| 1923 | struct aac_dev *dev; |
| 1924 | unsigned long byte_count = 0; |
| 1925 | u64 le_addr; |
| 1926 | |
| 1927 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
| 1928 | // Get rid of old data |
| 1929 | psg->count = 0; |
| 1930 | psg->sg[0].addr[0] = 0; |
| 1931 | psg->sg[0].addr[1] = 0; |
| 1932 | psg->sg[0].count = 0; |
| 1933 | if (scsicmd->use_sg) { |
| 1934 | struct scatterlist *sg; |
| 1935 | int i; |
| 1936 | int sg_count; |
| 1937 | sg = (struct scatterlist *) scsicmd->request_buffer; |
| 1938 | |
| 1939 | sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg, |
| 1940 | scsicmd->sc_data_direction); |
| 1941 | psg->count = cpu_to_le32(sg_count); |
| 1942 | |
| 1943 | byte_count = 0; |
| 1944 | |
| 1945 | for (i = 0; i < sg_count; i++) { |
| 1946 | le_addr = cpu_to_le64(sg_dma_address(sg)); |
| 1947 | psg->sg[i].addr[1] = (u32)(le_addr>>32); |
| 1948 | psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff); |
| 1949 | psg->sg[i].count = cpu_to_le32(sg_dma_len(sg)); |
| 1950 | byte_count += sg_dma_len(sg); |
| 1951 | sg++; |
| 1952 | } |
| 1953 | /* hba wants the size to be exact */ |
| 1954 | if(byte_count > scsicmd->request_bufflen){ |
| 1955 | psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen); |
| 1956 | byte_count = scsicmd->request_bufflen; |
| 1957 | } |
| 1958 | /* Check for command underflow */ |
| 1959 | if(scsicmd->underflow && (byte_count < scsicmd->underflow)){ |
| 1960 | printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", |
| 1961 | byte_count, scsicmd->underflow); |
| 1962 | } |
| 1963 | } |
| 1964 | else if(scsicmd->request_bufflen) { |
| 1965 | dma_addr_t addr; |
| 1966 | addr = pci_map_single(dev->pdev, |
| 1967 | scsicmd->request_buffer, |
| 1968 | scsicmd->request_bufflen, |
| 1969 | scsicmd->sc_data_direction); |
| 1970 | psg->count = cpu_to_le32(1); |
| 1971 | le_addr = cpu_to_le64(addr); |
| 1972 | psg->sg[0].addr[1] = (u32)(le_addr>>32); |
| 1973 | psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff); |
| 1974 | psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen); |
| 1975 | scsicmd->SCp.dma_handle = addr; |
| 1976 | byte_count = scsicmd->request_bufflen; |
| 1977 | } |
| 1978 | return byte_count; |
| 1979 | } |
| 1980 | |
| 1981 | #ifdef AAC_DETAILED_STATUS_INFO |
| 1982 | |
| 1983 | struct aac_srb_status_info { |
| 1984 | u32 status; |
| 1985 | char *str; |
| 1986 | }; |
| 1987 | |
| 1988 | |
| 1989 | static struct aac_srb_status_info srb_status_info[] = { |
| 1990 | { SRB_STATUS_PENDING, "Pending Status"}, |
| 1991 | { SRB_STATUS_SUCCESS, "Success"}, |
| 1992 | { SRB_STATUS_ABORTED, "Aborted Command"}, |
| 1993 | { SRB_STATUS_ABORT_FAILED, "Abort Failed"}, |
| 1994 | { SRB_STATUS_ERROR, "Error Event"}, |
| 1995 | { SRB_STATUS_BUSY, "Device Busy"}, |
| 1996 | { SRB_STATUS_INVALID_REQUEST, "Invalid Request"}, |
| 1997 | { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"}, |
| 1998 | { SRB_STATUS_NO_DEVICE, "No Device"}, |
| 1999 | { SRB_STATUS_TIMEOUT, "Timeout"}, |
| 2000 | { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"}, |
| 2001 | { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"}, |
| 2002 | { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"}, |
| 2003 | { SRB_STATUS_BUS_RESET, "Bus Reset"}, |
| 2004 | { SRB_STATUS_PARITY_ERROR, "Parity Error"}, |
| 2005 | { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"}, |
| 2006 | { SRB_STATUS_NO_HBA, "No HBA"}, |
| 2007 | { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"}, |
| 2008 | { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"}, |
| 2009 | { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"}, |
| 2010 | { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"}, |
| 2011 | { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"}, |
| 2012 | { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"}, |
| 2013 | { SRB_STATUS_INVALID_LUN, "Invalid LUN"}, |
| 2014 | { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"}, |
| 2015 | { SRB_STATUS_BAD_FUNCTION, "Bad Function"}, |
| 2016 | { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"}, |
| 2017 | { SRB_STATUS_NOT_STARTED, "Not Started"}, |
| 2018 | { SRB_STATUS_NOT_IN_USE, "Not In Use"}, |
| 2019 | { SRB_STATUS_FORCE_ABORT, "Force Abort"}, |
| 2020 | { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"}, |
| 2021 | { 0xff, "Unknown Error"} |
| 2022 | }; |
| 2023 | |
| 2024 | char *aac_get_status_string(u32 status) |
| 2025 | { |
| 2026 | int i; |
| 2027 | |
| 2028 | for(i=0; i < (sizeof(srb_status_info)/sizeof(struct aac_srb_status_info)); i++ ){ |
| 2029 | if(srb_status_info[i].status == status){ |
| 2030 | return srb_status_info[i].str; |
| 2031 | } |
| 2032 | } |
| 2033 | |
| 2034 | return "Bad Status Code"; |
| 2035 | } |
| 2036 | |
| 2037 | #endif |