blob: 04c2ef778ec69bc209bf830ab783a5234a56c108 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/s390/crypto/z90main.c
3 *
4 * z90crypt 1.3.2
5 *
6 * Copyright (C) 2001, 2004 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/uaccess.h> // copy_(from|to)_user
28#include <linux/compat.h>
29#include <linux/compiler.h>
30#include <linux/delay.h> // mdelay
31#include <linux/init.h>
32#include <linux/interrupt.h> // for tasklets
33#include <linux/ioctl32.h>
Eric Rossman2dee7022005-09-03 15:58:03 -070034#include <linux/miscdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/kobject_uevent.h>
38#include <linux/proc_fs.h>
39#include <linux/syscalls.h>
40#include <linux/version.h>
41#include "z90crypt.h"
42#include "z90common.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Eric Rossman2dee7022005-09-03 15:58:03 -070044#define VERSION_Z90MAIN_C "$Revision: 1.62 $"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46static char z90main_version[] __initdata =
47 "z90main.o (" VERSION_Z90MAIN_C "/"
48 VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
49
50extern char z90hardware_version[];
51
52/**
53 * Defaults that may be modified.
54 */
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/**
57 * You can specify a different minor at compile time.
58 */
59#ifndef Z90CRYPT_MINOR
60#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
61#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63/**
64 * You can specify a different domain at compile time or on the insmod
65 * command line.
66 */
67#ifndef DOMAIN_INDEX
68#define DOMAIN_INDEX -1
69#endif
70
71/**
72 * This is the name under which the device is registered in /proc/modules.
73 */
74#define REG_NAME "z90crypt"
75
76/**
77 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
78 * older than CLEANUPTIME seconds in the past.
79 */
80#ifndef CLEANUPTIME
Eric Rossman2dee7022005-09-03 15:58:03 -070081#define CLEANUPTIME 15
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#endif
83
84/**
85 * Config should run every CONFIGTIME seconds
86 */
87#ifndef CONFIGTIME
88#define CONFIGTIME 30
89#endif
90
91/**
92 * The first execution of the config task should take place
93 * immediately after initialization
94 */
95#ifndef INITIAL_CONFIGTIME
96#define INITIAL_CONFIGTIME 1
97#endif
98
99/**
100 * Reader should run every READERTIME milliseconds
101 * With the 100Hz patch for s390, z90crypt can lock the system solid while
102 * under heavy load. We'll try to avoid that.
103 */
104#ifndef READERTIME
105#if HZ > 1000
106#define READERTIME 2
107#else
108#define READERTIME 10
109#endif
110#endif
111
112/**
113 * turn long device array index into device pointer
114 */
115#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
116
117/**
118 * turn short device array index into long device array index
119 */
120#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
121
122/**
123 * turn short device array index into device pointer
124 */
125#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
126
127/**
128 * Status for a work-element
129 */
130#define STAT_DEFAULT 0x00 // request has not been processed
131
132#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
133 // else, device is determined each write
134#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
135 // before being sent to the hardware.
136#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
137// 0x20 // UNUSED state
138#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
139#define STAT_NOWORK 0x00 // bits off: no work on any queue
140#define STAT_RDWRMASK 0x30 // mask for bits 5-4
141
142/**
143 * Macros to check the status RDWRMASK
144 */
145#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
146#define SET_RDWRMASK(statbyte, newval) \
147 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
148
149/**
150 * Audit Trail. Progress of a Work element
151 * audit[0]: Unless noted otherwise, these bits are all set by the process
152 */
153#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
154#define FP_BUFFREQ 0x40 // Low Level buffer requested
155#define FP_BUFFGOT 0x20 // Low Level buffer obtained
156#define FP_SENT 0x10 // Work element sent to a crypto device
157 // (may be set by process or by reader task)
158#define FP_PENDING 0x08 // Work element placed on pending queue
159 // (may be set by process or by reader task)
160#define FP_REQUEST 0x04 // Work element placed on request queue
161#define FP_ASLEEP 0x02 // Work element about to sleep
162#define FP_AWAKE 0x01 // Work element has been awakened
163
164/**
165 * audit[1]: These bits are set by the reader task and/or the cleanup task
166 */
167#define FP_NOTPENDING 0x80 // Work element removed from pending queue
168#define FP_AWAKENING 0x40 // Caller about to be awakened
169#define FP_TIMEDOUT 0x20 // Caller timed out
170#define FP_RESPSIZESET 0x10 // Response size copied to work element
171#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
172#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
173#define FP_REMREQUEST 0x02 // Work element removed from request queue
174#define FP_SIGNALED 0x01 // Work element was awakened by a signal
175
176/**
177 * audit[2]: unused
178 */
179
180/**
181 * state of the file handle in private_data.status
182 */
183#define STAT_OPEN 0
184#define STAT_CLOSED 1
185
186/**
187 * PID() expands to the process ID of the current process
188 */
189#define PID() (current->pid)
190
191/**
192 * Selected Constants. The number of APs and the number of devices
193 */
194#ifndef Z90CRYPT_NUM_APS
195#define Z90CRYPT_NUM_APS 64
196#endif
197#ifndef Z90CRYPT_NUM_DEVS
198#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
199#endif
200
201/**
202 * Buffer size for receiving responses. The maximum Response Size
203 * is actually the maximum request size, since in an error condition
204 * the request itself may be returned unchanged.
205 */
206#define MAX_RESPONSE_SIZE 0x0000077C
207
208/**
209 * A count and status-byte mask
210 */
211struct status {
212 int st_count; // # of enabled devices
213 int disabled_count; // # of disabled devices
214 int user_disabled_count; // # of devices disabled via proc fs
215 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
216};
217
218/**
219 * The array of device indexes is a mechanism for fast indexing into
220 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
221 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
222 * z90CDeviceIndex[2] is 47.
223 */
224struct device_x {
225 int device_index[Z90CRYPT_NUM_DEVS];
226};
227
228/**
229 * All devices are arranged in a single array: 64 APs
230 */
231struct device {
232 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
233 // PCIXCC_MCL3, CEX2C
234 enum devstat dev_stat; // current device status
235 int dev_self_x; // Index in array
236 int disabled; // Set when device is in error
237 int user_disabled; // Set when device is disabled by user
238 int dev_q_depth; // q depth
239 unsigned char * dev_resp_p; // Response buffer address
240 int dev_resp_l; // Response Buffer length
241 int dev_caller_count; // Number of callers
242 int dev_total_req_cnt; // # requests for device since load
243 struct list_head dev_caller_list; // List of callers
244};
245
246/**
247 * There's a struct status and a struct device_x for each device type.
248 */
249struct hdware_block {
250 struct status hdware_mask;
251 struct status type_mask[Z90CRYPT_NUM_TYPES];
252 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
253 unsigned char device_type_array[Z90CRYPT_NUM_APS];
254};
255
256/**
257 * z90crypt is the topmost data structure in the hierarchy.
258 */
259struct z90crypt {
260 int max_count; // Nr of possible crypto devices
261 struct status mask;
262 int q_depth_array[Z90CRYPT_NUM_DEVS];
263 int dev_type_array[Z90CRYPT_NUM_DEVS];
264 struct device_x overall_device_x; // array device indexes
265 struct device * device_p[Z90CRYPT_NUM_DEVS];
266 int terminating;
267 int domain_established;// TRUE: domain has been found
268 int cdx; // Crypto Domain Index
269 int len; // Length of this data structure
270 struct hdware_block *hdware_info;
271};
272
273/**
274 * An array of these structures is pointed to from dev_caller
275 * The length of the array depends on the device type. For APs,
276 * there are 8.
277 *
278 * The caller buffer is allocated to the user at OPEN. At WRITE,
279 * it contains the request; at READ, the response. The function
280 * send_to_crypto_device converts the request to device-dependent
281 * form and use the caller's OPEN-allocated buffer for the response.
Eric Rossman2dee7022005-09-03 15:58:03 -0700282 *
283 * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
284 * because that points to it, see the discussion in z90hardware.c.
285 * Search for "extended request message block".
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 */
287struct caller {
288 int caller_buf_l; // length of original request
289 unsigned char * caller_buf_p; // Original request on WRITE
290 int caller_dev_dep_req_l; // len device dependent request
291 unsigned char * caller_dev_dep_req_p; // Device dependent form
292 unsigned char caller_id[8]; // caller-supplied message id
293 struct list_head caller_liste;
294 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
295};
296
297/**
298 * Function prototypes from z90hardware.c
299 */
300enum hdstat query_online(int, int, int, int *, int *);
301enum devstat reset_device(int, int, int);
302enum devstat send_to_AP(int, int, int, unsigned char *);
303enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *);
304int convert_request(unsigned char *, int, short, int, int, int *,
305 unsigned char *);
306int convert_response(unsigned char *, unsigned char *, int *, unsigned char *);
307
308/**
309 * Low level function prototypes
310 */
311static int create_z90crypt(int *);
312static int refresh_z90crypt(int *);
313static int find_crypto_devices(struct status *);
314static int create_crypto_device(int);
315static int destroy_crypto_device(int);
316static void destroy_z90crypt(void);
317static int refresh_index_array(struct status *, struct device_x *);
318static int probe_device_type(struct device *);
319static int probe_PCIXCC_type(struct device *);
320
321/**
322 * proc fs definitions
323 */
324static struct proc_dir_entry *z90crypt_entry;
325
326/**
327 * data structures
328 */
329
330/**
331 * work_element.opener points back to this structure
332 */
333struct priv_data {
334 pid_t opener_pid;
335 unsigned char status; // 0: open 1: closed
336};
337
338/**
339 * A work element is allocated for each request
340 */
341struct work_element {
342 struct priv_data *priv_data;
343 pid_t pid;
344 int devindex; // index of device processing this w_e
345 // (If request did not specify device,
346 // -1 until placed onto a queue)
347 int devtype;
348 struct list_head liste; // used for requestq and pendingq
349 char buffer[128]; // local copy of user request
350 int buff_size; // size of the buffer for the request
351 char resp_buff[RESPBUFFSIZE];
352 int resp_buff_size;
353 char __user * resp_addr; // address of response in user space
354 unsigned int funccode; // function code of request
355 wait_queue_head_t waitq;
356 unsigned long requestsent; // time at which the request was sent
357 atomic_t alarmrung; // wake-up signal
358 unsigned char caller_id[8]; // pid + counter, for this w_e
359 unsigned char status[1]; // bits to mark status of the request
360 unsigned char audit[3]; // record of work element's progress
361 unsigned char * requestptr; // address of request buffer
362 int retcode; // return code of request
363};
364
365/**
366 * High level function prototypes
367 */
368static int z90crypt_open(struct inode *, struct file *);
369static int z90crypt_release(struct inode *, struct file *);
370static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
371static ssize_t z90crypt_write(struct file *, const char __user *,
372 size_t, loff_t *);
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700373static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
374static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
376static void z90crypt_reader_task(unsigned long);
377static void z90crypt_schedule_reader_task(unsigned long);
378static void z90crypt_config_task(unsigned long);
379static void z90crypt_cleanup_task(unsigned long);
380
381static int z90crypt_status(char *, char **, off_t, int, int *, void *);
382static int z90crypt_status_write(struct file *, const char __user *,
383 unsigned long, void *);
384
385/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 * Storage allocated at initialization and used throughout the life of
387 * this insmod
388 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389static int domain = DOMAIN_INDEX;
390static struct z90crypt z90crypt;
391static int quiesce_z90crypt;
392static spinlock_t queuespinlock;
393static struct list_head request_list;
394static int requestq_count;
395static struct list_head pending_list;
396static int pendingq_count;
397
398static struct tasklet_struct reader_tasklet;
399static struct timer_list reader_timer;
400static struct timer_list config_timer;
401static struct timer_list cleanup_timer;
402static atomic_t total_open;
403static atomic_t z90crypt_step;
404
405static struct file_operations z90crypt_fops = {
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700406 .owner = THIS_MODULE,
407 .read = z90crypt_read,
408 .write = z90crypt_write,
409 .unlocked_ioctl = z90crypt_unlocked_ioctl,
410#ifdef CONFIG_COMPAT
411 .compat_ioctl = z90crypt_compat_ioctl,
412#endif
413 .open = z90crypt_open,
414 .release = z90crypt_release
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415};
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417static struct miscdevice z90crypt_misc_device = {
418 .minor = Z90CRYPT_MINOR,
419 .name = DEV_NAME,
420 .fops = &z90crypt_fops,
421 .devfs_name = DEV_NAME
422};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
424/**
425 * Documentation values.
426 */
427MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
428 "and Jochen Roehrig");
429MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
430 "Copyright 2001, 2004 IBM Corporation");
431MODULE_LICENSE("GPL");
432module_param(domain, int, 0);
433MODULE_PARM_DESC(domain, "domain index for device");
434
435#ifdef CONFIG_COMPAT
436/**
437 * ioctl32 conversion routines
438 */
439struct ica_rsa_modexpo_32 { // For 32-bit callers
440 compat_uptr_t inputdata;
441 unsigned int inputdatalength;
442 compat_uptr_t outputdata;
443 unsigned int outputdatalength;
444 compat_uptr_t b_key;
445 compat_uptr_t n_modulus;
446};
447
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700448static long
449trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
451 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
452 struct ica_rsa_modexpo_32 mex32k;
453 struct ica_rsa_modexpo __user *mex64;
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700454 long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 unsigned int i;
456
457 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
458 return -EFAULT;
459 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
460 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
461 return -EFAULT;
462 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
463 return -EFAULT;
464 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
465 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
466 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
467 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
468 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
469 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
470 return -EFAULT;
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700471 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 if (!ret)
473 if (__get_user(i, &mex64->outputdatalength) ||
474 __put_user(i, &mex32u->outputdatalength))
475 ret = -EFAULT;
476 return ret;
477}
478
479struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
480 compat_uptr_t inputdata;
481 unsigned int inputdatalength;
482 compat_uptr_t outputdata;
483 unsigned int outputdatalength;
484 compat_uptr_t bp_key;
485 compat_uptr_t bq_key;
486 compat_uptr_t np_prime;
487 compat_uptr_t nq_prime;
488 compat_uptr_t u_mult_inv;
489};
490
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700491static long
492trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493{
494 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
495 struct ica_rsa_modexpo_crt_32 crt32k;
496 struct ica_rsa_modexpo_crt __user *crt64;
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700497 long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 unsigned int i;
499
500 if (!access_ok(VERIFY_WRITE, crt32u,
501 sizeof(struct ica_rsa_modexpo_crt_32)))
502 return -EFAULT;
503 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
504 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
505 return -EFAULT;
506 if (copy_from_user(&crt32k, crt32u,
507 sizeof(struct ica_rsa_modexpo_crt_32)))
508 return -EFAULT;
509 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
510 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
511 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
512 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
513 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
514 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
515 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
516 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
517 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700518 return -EFAULT;
519 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 if (!ret)
521 if (__get_user(i, &crt64->outputdatalength) ||
522 __put_user(i, &crt32u->outputdatalength))
523 ret = -EFAULT;
524 return ret;
525}
526
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700527static long
528z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529{
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700530 switch (cmd) {
531 case ICAZ90STATUS:
532 case Z90QUIESCE:
533 case Z90STAT_TOTALCOUNT:
534 case Z90STAT_PCICACOUNT:
535 case Z90STAT_PCICCCOUNT:
536 case Z90STAT_PCIXCCCOUNT:
537 case Z90STAT_PCIXCCMCL2COUNT:
538 case Z90STAT_PCIXCCMCL3COUNT:
539 case Z90STAT_CEX2CCOUNT:
540 case Z90STAT_REQUESTQ_COUNT:
541 case Z90STAT_PENDINGQ_COUNT:
542 case Z90STAT_TOTALOPEN_COUNT:
543 case Z90STAT_DOMAIN_INDEX:
544 case Z90STAT_STATUS_MASK:
545 case Z90STAT_QDEPTH_MASK:
546 case Z90STAT_PERDEV_REQCNT:
547 return z90crypt_unlocked_ioctl(filp, cmd, arg);
548 case ICARSAMODEXPO:
549 return trans_modexpo32(filp, cmd, arg);
550 case ICARSACRT:
551 return trans_modexpo_crt32(filp, cmd, arg);
552 default:
553 return -ENOIOCTLCMD;
554 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555}
556#endif
557
558/**
559 * The module initialization code.
560 */
561static int __init
562z90crypt_init_module(void)
563{
564 int result, nresult;
565 struct proc_dir_entry *entry;
566
567 PDEBUG("PID %d\n", PID());
568
569 if ((domain < -1) || (domain > 15)) {
570 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
571 return -EINVAL;
572 }
573
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 /* Register as misc device with given minor (or get a dynamic one). */
575 result = misc_register(&z90crypt_misc_device);
576 if (result < 0) {
577 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
578 z90crypt_misc_device.minor, result);
579 return result;
580 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
582 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
583
584 result = create_z90crypt(&domain);
585 if (result != 0) {
586 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
587 domain, result);
588 result = -ENOMEM;
589 goto init_module_cleanup;
590 }
591
592 if (result == 0) {
593 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
594 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
595 __DATE__, __TIME__);
596 PRINTKN("%s\n", z90main_version);
597 PRINTKN("%s\n", z90hardware_version);
598 PDEBUG("create_z90crypt (domain index %d) successful.\n",
599 domain);
600 } else
601 PRINTK("No devices at startup\n");
602
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 /* Initialize globals. */
604 spin_lock_init(&queuespinlock);
605
606 INIT_LIST_HEAD(&pending_list);
607 pendingq_count = 0;
608
609 INIT_LIST_HEAD(&request_list);
610 requestq_count = 0;
611
612 quiesce_z90crypt = 0;
613
614 atomic_set(&total_open, 0);
615 atomic_set(&z90crypt_step, 0);
616
617 /* Set up the cleanup task. */
618 init_timer(&cleanup_timer);
619 cleanup_timer.function = z90crypt_cleanup_task;
620 cleanup_timer.data = 0;
621 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
622 add_timer(&cleanup_timer);
623
624 /* Set up the proc file system */
625 entry = create_proc_entry("driver/z90crypt", 0644, 0);
626 if (entry) {
627 entry->nlink = 1;
628 entry->data = 0;
629 entry->read_proc = z90crypt_status;
630 entry->write_proc = z90crypt_status_write;
631 }
632 else
633 PRINTK("Couldn't create z90crypt proc entry\n");
634 z90crypt_entry = entry;
635
636 /* Set up the configuration task. */
637 init_timer(&config_timer);
638 config_timer.function = z90crypt_config_task;
639 config_timer.data = 0;
640 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
641 add_timer(&config_timer);
642
643 /* Set up the reader task */
644 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
645 init_timer(&reader_timer);
646 reader_timer.function = z90crypt_schedule_reader_task;
647 reader_timer.data = 0;
648 reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
649 add_timer(&reader_timer);
650
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 return 0; // success
652
653init_module_cleanup:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 if ((nresult = misc_deregister(&z90crypt_misc_device)))
655 PRINTK("misc_deregister failed with %d.\n", nresult);
656 else
657 PDEBUG("misc_deregister successful.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
659 return result; // failure
660}
661
662/**
663 * The module termination code
664 */
665static void __exit
666z90crypt_cleanup_module(void)
667{
668 int nresult;
669
670 PDEBUG("PID %d\n", PID());
671
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 remove_proc_entry("driver/z90crypt", 0);
673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 if ((nresult = misc_deregister(&z90crypt_misc_device)))
675 PRINTK("misc_deregister failed with %d.\n", nresult);
676 else
677 PDEBUG("misc_deregister successful.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 /* Remove the tasks */
680 tasklet_kill(&reader_tasklet);
681 del_timer(&reader_timer);
682 del_timer(&config_timer);
683 del_timer(&cleanup_timer);
684
685 destroy_z90crypt();
686
687 PRINTKN("Unloaded.\n");
688}
689
690/**
691 * Functions running under a process id
692 *
693 * The I/O functions:
694 * z90crypt_open
695 * z90crypt_release
696 * z90crypt_read
697 * z90crypt_write
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -0700698 * z90crypt_unlocked_ioctl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 * z90crypt_status
700 * z90crypt_status_write
701 * disable_card
702 * enable_card
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 *
704 * Helper functions:
705 * z90crypt_rsa
706 * z90crypt_prepare
707 * z90crypt_send
708 * z90crypt_process_results
709 *
710 */
711static int
712z90crypt_open(struct inode *inode, struct file *filp)
713{
714 struct priv_data *private_data_p;
715
716 if (quiesce_z90crypt)
717 return -EQUIESCE;
718
719 private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
720 if (!private_data_p) {
721 PRINTK("Memory allocate failed\n");
722 return -ENOMEM;
723 }
724
725 memset((void *)private_data_p, 0, sizeof(struct priv_data));
726 private_data_p->status = STAT_OPEN;
727 private_data_p->opener_pid = PID();
728 filp->private_data = private_data_p;
729 atomic_inc(&total_open);
730
731 return 0;
732}
733
734static int
735z90crypt_release(struct inode *inode, struct file *filp)
736{
737 struct priv_data *private_data_p = filp->private_data;
738
739 PDEBUG("PID %d (filp %p)\n", PID(), filp);
740
741 private_data_p->status = STAT_CLOSED;
742 memset(private_data_p, 0, sizeof(struct priv_data));
743 kfree(private_data_p);
744 atomic_dec(&total_open);
745
746 return 0;
747}
748
749/*
750 * there are two read functions, of which compile options will choose one
751 * without USE_GET_RANDOM_BYTES
752 * => read() always returns -EPERM;
753 * otherwise
754 * => read() uses get_random_bytes() kernel function
755 */
756#ifndef USE_GET_RANDOM_BYTES
757/**
758 * z90crypt_read will not be supported beyond z90crypt 1.3.1
759 */
760static ssize_t
761z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
762{
763 PDEBUG("filp %p (PID %d)\n", filp, PID());
764 return -EPERM;
765}
766#else // we want to use get_random_bytes
767/**
768 * read() just returns a string of random bytes. Since we have no way
769 * to generate these cryptographically, we just execute get_random_bytes
770 * for the length specified.
771 */
772#include <linux/random.h>
773static ssize_t
774z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
775{
776 unsigned char *temp_buff;
777
778 PDEBUG("filp %p (PID %d)\n", filp, PID());
779
780 if (quiesce_z90crypt)
781 return -EQUIESCE;
782 if (count < 0) {
783 PRINTK("Requested random byte count negative: %ld\n", count);
784 return -EINVAL;
785 }
786 if (count > RESPBUFFSIZE) {
787 PDEBUG("count[%d] > RESPBUFFSIZE", count);
788 return -EINVAL;
789 }
790 if (count == 0)
791 return 0;
792 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
793 if (!temp_buff) {
794 PRINTK("Memory allocate failed\n");
795 return -ENOMEM;
796 }
797 get_random_bytes(temp_buff, count);
798
799 if (copy_to_user(buf, temp_buff, count) != 0) {
800 kfree(temp_buff);
801 return -EFAULT;
802 }
803 kfree(temp_buff);
804 return count;
805}
806#endif
807
808/**
809 * Write is is not allowed
810 */
811static ssize_t
812z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
813{
814 PDEBUG("filp %p (PID %d)\n", filp, PID());
815 return -EPERM;
816}
817
818/**
819 * New status functions
820 */
821static inline int
822get_status_totalcount(void)
823{
824 return z90crypt.hdware_info->hdware_mask.st_count;
825}
826
827static inline int
828get_status_PCICAcount(void)
829{
830 return z90crypt.hdware_info->type_mask[PCICA].st_count;
831}
832
833static inline int
834get_status_PCICCcount(void)
835{
836 return z90crypt.hdware_info->type_mask[PCICC].st_count;
837}
838
839static inline int
840get_status_PCIXCCcount(void)
841{
842 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
843 z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
844}
845
846static inline int
847get_status_PCIXCCMCL2count(void)
848{
849 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
850}
851
852static inline int
853get_status_PCIXCCMCL3count(void)
854{
855 return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
856}
857
858static inline int
859get_status_CEX2Ccount(void)
860{
861 return z90crypt.hdware_info->type_mask[CEX2C].st_count;
862}
863
864static inline int
865get_status_requestq_count(void)
866{
867 return requestq_count;
868}
869
870static inline int
871get_status_pendingq_count(void)
872{
873 return pendingq_count;
874}
875
876static inline int
877get_status_totalopen_count(void)
878{
879 return atomic_read(&total_open);
880}
881
882static inline int
883get_status_domain_index(void)
884{
885 return z90crypt.cdx;
886}
887
888static inline unsigned char *
889get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
890{
891 int i, ix;
892
893 memcpy(status, z90crypt.hdware_info->device_type_array,
894 Z90CRYPT_NUM_APS);
895
896 for (i = 0; i < get_status_totalcount(); i++) {
897 ix = SHRT2LONG(i);
898 if (LONG2DEVPTR(ix)->user_disabled)
899 status[ix] = 0x0d;
900 }
901
902 return status;
903}
904
905static inline unsigned char *
906get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
907{
908 int i, ix;
909
910 memset(qdepth, 0, Z90CRYPT_NUM_APS);
911
912 for (i = 0; i < get_status_totalcount(); i++) {
913 ix = SHRT2LONG(i);
914 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
915 }
916
917 return qdepth;
918}
919
920static inline unsigned int *
921get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
922{
923 int i, ix;
924
925 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
926
927 for (i = 0; i < get_status_totalcount(); i++) {
928 ix = SHRT2LONG(i);
929 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
930 }
931
932 return reqcnt;
933}
934
935static inline void
936init_work_element(struct work_element *we_p,
937 struct priv_data *priv_data, pid_t pid)
938{
939 int step;
940
941 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
942 /* Come up with a unique id for this caller. */
943 step = atomic_inc_return(&z90crypt_step);
944 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
945 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
946 we_p->pid = pid;
947 we_p->priv_data = priv_data;
948 we_p->status[0] = STAT_DEFAULT;
949 we_p->audit[0] = 0x00;
950 we_p->audit[1] = 0x00;
951 we_p->audit[2] = 0x00;
952 we_p->resp_buff_size = 0;
953 we_p->retcode = 0;
954 we_p->devindex = -1;
955 we_p->devtype = -1;
956 atomic_set(&we_p->alarmrung, 0);
957 init_waitqueue_head(&we_p->waitq);
958 INIT_LIST_HEAD(&(we_p->liste));
959}
960
961static inline int
962allocate_work_element(struct work_element **we_pp,
963 struct priv_data *priv_data_p, pid_t pid)
964{
965 struct work_element *we_p;
966
967 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
968 if (!we_p)
969 return -ENOMEM;
970 init_work_element(we_p, priv_data_p, pid);
971 *we_pp = we_p;
972 return 0;
973}
974
975static inline void
976remove_device(struct device *device_p)
977{
978 if (!device_p || (device_p->disabled != 0))
979 return;
980 device_p->disabled = 1;
981 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
982 z90crypt.hdware_info->hdware_mask.disabled_count++;
983}
984
985/**
986 * Bitlength limits for each card
987 *
988 * There are new MCLs which allow more bitlengths. See the table for details.
989 * The MCL must be applied and the newer bitlengths enabled for these to work.
990 *
991 * Card Type Old limit New limit
Eric Rossman2dee7022005-09-03 15:58:03 -0700992 * PCICA ??-2048 same (the lower limit is less than 128 bit...)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 * PCICC 512-1024 512-2048
Eric Rossman2dee7022005-09-03 15:58:03 -0700994 * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
995 * PCIXCC_MCL3 ----- 128-2048
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 * CEX2C 512-2048 128-2048
997 *
998 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
999 * MCL to just one card in a machine. We assume, at first, that all cards have
1000 * these capabilities.
1001 */
1002int ext_bitlens = 1; // This is global
1003#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
1004#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
1005#define PCICC_MIN_MOD_SIZE 64 // 512 bits
1006#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
1007#define MAX_MOD_SIZE 256 // 2048 bits
1008
1009static inline int
1010select_device_type(int *dev_type_p, int bytelength)
1011{
1012 static int count = 0;
1013 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use;
1014 struct status *stat;
1015 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1016 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1017 (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV))
1018 return -1;
1019 if (*dev_type_p != ANYDEV) {
1020 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
1021 if (stat->st_count >
1022 (stat->disabled_count + stat->user_disabled_count))
1023 return 0;
1024 return -1;
1025 }
1026
1027 /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */
1028 stat = &z90crypt.hdware_info->type_mask[PCICA];
1029 PCICA_avail = stat->st_count -
1030 (stat->disabled_count + stat->user_disabled_count);
1031 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
1032 PCIXCC_MCL3_avail = stat->st_count -
1033 (stat->disabled_count + stat->user_disabled_count);
1034 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1035 CEX2C_avail = stat->st_count -
1036 (stat->disabled_count + stat->user_disabled_count);
1037 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) {
1038 /**
1039 * bitlength is a factor, PCICA is the most capable, even with
Eric Rossman2dee7022005-09-03 15:58:03 -07001040 * the new MCL for PCIXCC.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 */
1042 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1043 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1044 if (!PCICA_avail)
1045 return -1;
1046 else {
1047 *dev_type_p = PCICA;
1048 return 0;
1049 }
1050 }
1051
1052 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1053 CEX2C_avail);
1054 if (index_to_use < PCICA_avail)
1055 *dev_type_p = PCICA;
1056 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1057 *dev_type_p = PCIXCC_MCL3;
1058 else
1059 *dev_type_p = CEX2C;
1060 count++;
1061 return 0;
1062 }
1063
1064 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
1065 if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
1066 return -1;
1067 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
1068 if (stat->st_count >
1069 (stat->disabled_count + stat->user_disabled_count)) {
1070 *dev_type_p = PCIXCC_MCL2;
1071 return 0;
1072 }
1073
1074 /**
1075 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
1076 * (if we don't have the MCL applied and the newer bitlengths enabled)
1077 * cannot go to a PCICC
1078 */
1079 if ((bytelength < PCICC_MIN_MOD_SIZE) ||
1080 (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
1081 return -1;
1082 }
1083 stat = &z90crypt.hdware_info->type_mask[PCICC];
1084 if (stat->st_count >
1085 (stat->disabled_count + stat->user_disabled_count)) {
1086 *dev_type_p = PCICC;
1087 return 0;
1088 }
1089
1090 return -1;
1091}
1092
1093/**
1094 * Try the selected number, then the selected type (can be ANYDEV)
1095 */
1096static inline int
1097select_device(int *dev_type_p, int *device_nr_p, int bytelength)
1098{
1099 int i, indx, devTp, low_count, low_indx;
1100 struct device_x *index_p;
1101 struct device *dev_ptr;
1102
1103 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
1104 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
1105 PDEBUG("trying index = %d\n", *device_nr_p);
1106 dev_ptr = z90crypt.device_p[*device_nr_p];
1107
1108 if (dev_ptr &&
1109 (dev_ptr->dev_stat != DEV_GONE) &&
1110 (dev_ptr->disabled == 0) &&
1111 (dev_ptr->user_disabled == 0)) {
1112 PDEBUG("selected by number, index = %d\n",
1113 *device_nr_p);
1114 *dev_type_p = dev_ptr->dev_type;
1115 return *device_nr_p;
1116 }
1117 }
1118 *device_nr_p = -1;
1119 PDEBUG("trying type = %d\n", *dev_type_p);
1120 devTp = *dev_type_p;
1121 if (select_device_type(&devTp, bytelength) == -1) {
1122 PDEBUG("failed to select by type\n");
1123 return -1;
1124 }
1125 PDEBUG("selected type = %d\n", devTp);
1126 index_p = &z90crypt.hdware_info->type_x_addr[devTp];
1127 low_count = 0x0000FFFF;
1128 low_indx = -1;
1129 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
1130 indx = index_p->device_index[i];
1131 dev_ptr = z90crypt.device_p[indx];
1132 if (dev_ptr &&
1133 (dev_ptr->dev_stat != DEV_GONE) &&
1134 (dev_ptr->disabled == 0) &&
1135 (dev_ptr->user_disabled == 0) &&
1136 (devTp == dev_ptr->dev_type) &&
1137 (low_count > dev_ptr->dev_caller_count)) {
1138 low_count = dev_ptr->dev_caller_count;
1139 low_indx = indx;
1140 }
1141 }
1142 *device_nr_p = low_indx;
1143 return low_indx;
1144}
1145
1146static inline int
1147send_to_crypto_device(struct work_element *we_p)
1148{
1149 struct caller *caller_p;
1150 struct device *device_p;
1151 int dev_nr;
1152 int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
1153
1154 if (!we_p->requestptr)
1155 return SEN_FATAL_ERROR;
1156 caller_p = (struct caller *)we_p->requestptr;
1157 dev_nr = we_p->devindex;
1158 if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
1159 if (z90crypt.hdware_info->hdware_mask.st_count != 0)
1160 return SEN_RETRY;
1161 else
1162 return SEN_NOT_AVAIL;
1163 }
1164 we_p->devindex = dev_nr;
1165 device_p = z90crypt.device_p[dev_nr];
1166 if (!device_p)
1167 return SEN_NOT_AVAIL;
1168 if (device_p->dev_type != we_p->devtype)
1169 return SEN_RETRY;
1170 if (device_p->dev_caller_count >= device_p->dev_q_depth)
1171 return SEN_QUEUE_FULL;
1172 PDEBUG("device number prior to send: %d\n", dev_nr);
1173 switch (send_to_AP(dev_nr, z90crypt.cdx,
1174 caller_p->caller_dev_dep_req_l,
1175 caller_p->caller_dev_dep_req_p)) {
1176 case DEV_SEN_EXCEPTION:
1177 PRINTKC("Exception during send to device %d\n", dev_nr);
1178 z90crypt.terminating = 1;
1179 return SEN_FATAL_ERROR;
1180 case DEV_GONE:
1181 PRINTK("Device %d not available\n", dev_nr);
1182 remove_device(device_p);
1183 return SEN_NOT_AVAIL;
1184 case DEV_EMPTY:
1185 return SEN_NOT_AVAIL;
1186 case DEV_NO_WORK:
1187 return SEN_FATAL_ERROR;
1188 case DEV_BAD_MESSAGE:
1189 return SEN_USER_ERROR;
1190 case DEV_QUEUE_FULL:
1191 return SEN_QUEUE_FULL;
1192 default:
1193 case DEV_ONLINE:
1194 break;
1195 }
1196 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
1197 device_p->dev_caller_count++;
1198 return 0;
1199}
1200
1201/**
1202 * Send puts the user's work on one of two queues:
1203 * the pending queue if the send was successful
1204 * the request queue if the send failed because device full or busy
1205 */
1206static inline int
1207z90crypt_send(struct work_element *we_p, const char *buf)
1208{
1209 int rv;
1210
1211 PDEBUG("PID %d\n", PID());
1212
1213 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
1214 PDEBUG("PID %d tried to send more work but has outstanding "
1215 "work.\n", PID());
1216 return -EWORKPEND;
1217 }
1218 we_p->devindex = -1; // Reset device number
1219 spin_lock_irq(&queuespinlock);
1220 rv = send_to_crypto_device(we_p);
1221 switch (rv) {
1222 case 0:
1223 we_p->requestsent = jiffies;
1224 we_p->audit[0] |= FP_SENT;
1225 list_add_tail(&we_p->liste, &pending_list);
1226 ++pendingq_count;
1227 we_p->audit[0] |= FP_PENDING;
1228 break;
1229 case SEN_BUSY:
1230 case SEN_QUEUE_FULL:
1231 rv = 0;
1232 we_p->devindex = -1; // any device will do
1233 we_p->requestsent = jiffies;
1234 list_add_tail(&we_p->liste, &request_list);
1235 ++requestq_count;
1236 we_p->audit[0] |= FP_REQUEST;
1237 break;
1238 case SEN_RETRY:
1239 rv = -ERESTARTSYS;
1240 break;
1241 case SEN_NOT_AVAIL:
1242 PRINTK("*** No devices available.\n");
1243 rv = we_p->retcode = -ENODEV;
1244 we_p->status[0] |= STAT_FAILED;
1245 break;
1246 case REC_OPERAND_INV:
1247 case REC_OPERAND_SIZE:
1248 case REC_EVEN_MOD:
1249 case REC_INVALID_PAD:
1250 rv = we_p->retcode = -EINVAL;
1251 we_p->status[0] |= STAT_FAILED;
1252 break;
1253 default:
1254 we_p->retcode = rv;
1255 we_p->status[0] |= STAT_FAILED;
1256 break;
1257 }
1258 if (rv != -ERESTARTSYS)
1259 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1260 spin_unlock_irq(&queuespinlock);
1261 if (rv == 0)
1262 tasklet_schedule(&reader_tasklet);
1263 return rv;
1264}
1265
1266/**
1267 * process_results copies the user's work from kernel space.
1268 */
1269static inline int
1270z90crypt_process_results(struct work_element *we_p, char __user *buf)
1271{
1272 int rv;
1273
1274 PDEBUG("we_p %p (PID %d)\n", we_p, PID());
1275
1276 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
1277 SET_RDWRMASK(we_p->status[0], STAT_READPEND);
1278
1279 rv = 0;
1280 if (!we_p->buffer) {
1281 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1282 we_p, PID());
1283 rv = -ENOBUFF;
1284 }
1285
1286 if (!rv)
1287 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
1288 PDEBUG("copy_to_user failed: rv = %d\n", rv);
1289 rv = -EFAULT;
1290 }
1291
1292 if (!rv)
1293 rv = we_p->retcode;
1294 if (!rv)
1295 if (we_p->resp_buff_size
1296 && copy_to_user(we_p->resp_addr, we_p->resp_buff,
1297 we_p->resp_buff_size))
1298 rv = -EFAULT;
1299
1300 SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
1301 return rv;
1302}
1303
1304static unsigned char NULL_psmid[8] =
1305{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1306
1307/**
1308 * Used in device configuration functions
1309 */
1310#define MAX_RESET 90
1311
1312/**
1313 * This is used only for PCICC support
1314 */
1315static inline int
1316is_PKCS11_padded(unsigned char *buffer, int length)
1317{
1318 int i;
1319 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
1320 return 0;
1321 for (i = 2; i < length; i++)
1322 if (buffer[i] != 0xFF)
1323 break;
1324 if ((i < 10) || (i == length))
1325 return 0;
1326 if (buffer[i] != 0x00)
1327 return 0;
1328 return 1;
1329}
1330
1331/**
1332 * This is used only for PCICC support
1333 */
1334static inline int
1335is_PKCS12_padded(unsigned char *buffer, int length)
1336{
1337 int i;
1338 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
1339 return 0;
1340 for (i = 2; i < length; i++)
1341 if (buffer[i] == 0x00)
1342 break;
1343 if ((i < 10) || (i == length))
1344 return 0;
1345 if (buffer[i] != 0x00)
1346 return 0;
1347 return 1;
1348}
1349
1350/**
1351 * builds struct caller and converts message from generic format to
1352 * device-dependent format
1353 * func is ICARSAMODEXPO or ICARSACRT
1354 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1355 */
1356static inline int
1357build_caller(struct work_element *we_p, short function)
1358{
1359 int rv;
1360 struct caller *caller_p = (struct caller *)we_p->requestptr;
1361
1362 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1363 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1364 (we_p->devtype != CEX2C))
1365 return SEN_NOT_AVAIL;
1366
1367 memcpy(caller_p->caller_id, we_p->caller_id,
1368 sizeof(caller_p->caller_id));
1369 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
1370 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
1371 caller_p->caller_buf_p = we_p->buffer;
1372 INIT_LIST_HEAD(&(caller_p->caller_liste));
1373
1374 rv = convert_request(we_p->buffer, we_p->funccode, function,
1375 z90crypt.cdx, we_p->devtype,
1376 &caller_p->caller_dev_dep_req_l,
1377 caller_p->caller_dev_dep_req_p);
1378 if (rv) {
1379 if (rv == SEN_NOT_AVAIL)
1380 PDEBUG("request can't be processed on hdwr avail\n");
1381 else
1382 PRINTK("Error from convert_request: %d\n", rv);
1383 }
1384 else
1385 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
1386 return rv;
1387}
1388
1389static inline void
1390unbuild_caller(struct device *device_p, struct caller *caller_p)
1391{
1392 if (!caller_p)
1393 return;
1394 if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
1395 if (!list_empty(&caller_p->caller_liste)) {
1396 list_del_init(&caller_p->caller_liste);
1397 device_p->dev_caller_count--;
1398 }
1399 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
1400}
1401
1402static inline int
1403get_crypto_request_buffer(struct work_element *we_p)
1404{
1405 struct ica_rsa_modexpo *mex_p;
1406 struct ica_rsa_modexpo_crt *crt_p;
1407 unsigned char *temp_buffer;
1408 short function;
1409 int rv;
1410
1411 mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
1412 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
1413
1414 PDEBUG("device type input = %d\n", we_p->devtype);
1415
1416 if (z90crypt.terminating)
1417 return REC_NO_RESPONSE;
1418 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
1419 PRINTK("psmid zeroes\n");
1420 return SEN_FATAL_ERROR;
1421 }
1422 if (!we_p->buffer) {
1423 PRINTK("buffer pointer NULL\n");
1424 return SEN_USER_ERROR;
1425 }
1426 if (!we_p->requestptr) {
1427 PRINTK("caller pointer NULL\n");
1428 return SEN_USER_ERROR;
1429 }
1430
1431 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1432 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1433 (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) {
1434 PRINTK("invalid device type\n");
1435 return SEN_USER_ERROR;
1436 }
1437
1438 if ((mex_p->inputdatalength < 1) ||
1439 (mex_p->inputdatalength > MAX_MOD_SIZE)) {
1440 PRINTK("inputdatalength[%d] is not valid\n",
1441 mex_p->inputdatalength);
1442 return SEN_USER_ERROR;
1443 }
1444
1445 if (mex_p->outputdatalength < mex_p->inputdatalength) {
1446 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1447 mex_p->outputdatalength, mex_p->inputdatalength);
1448 return SEN_USER_ERROR;
1449 }
1450
1451 if (!mex_p->inputdata || !mex_p->outputdata) {
1452 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1453 mex_p->outputdata, mex_p->inputdata);
1454 return SEN_USER_ERROR;
1455 }
1456
1457 /**
1458 * As long as outputdatalength is big enough, we can set the
1459 * outputdatalength equal to the inputdatalength, since that is the
1460 * number of bytes we will copy in any case
1461 */
1462 mex_p->outputdatalength = mex_p->inputdatalength;
1463
1464 rv = 0;
1465 switch (we_p->funccode) {
1466 case ICARSAMODEXPO:
1467 if (!mex_p->b_key || !mex_p->n_modulus)
1468 rv = SEN_USER_ERROR;
1469 break;
1470 case ICARSACRT:
1471 if (!IS_EVEN(crt_p->inputdatalength)) {
1472 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1473 crt_p->inputdatalength);
1474 rv = SEN_USER_ERROR;
1475 break;
1476 }
1477 if (!crt_p->bp_key ||
1478 !crt_p->bq_key ||
1479 !crt_p->np_prime ||
1480 !crt_p->nq_prime ||
1481 !crt_p->u_mult_inv) {
1482 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1483 crt_p->bp_key, crt_p->bq_key,
1484 crt_p->np_prime, crt_p->nq_prime,
1485 crt_p->u_mult_inv);
1486 rv = SEN_USER_ERROR;
1487 }
1488 break;
1489 default:
1490 PRINTK("bad func = %d\n", we_p->funccode);
1491 rv = SEN_USER_ERROR;
1492 break;
1493 }
1494 if (rv != 0)
1495 return rv;
1496
1497 if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
1498 return SEN_NOT_AVAIL;
1499
1500 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
1501 sizeof(struct caller);
1502 if (copy_from_user(temp_buffer, mex_p->inputdata,
1503 mex_p->inputdatalength) != 0)
1504 return SEN_RELEASED;
1505
1506 function = PCI_FUNC_KEY_ENCRYPT;
1507 switch (we_p->devtype) {
1508 /* PCICA does everything with a simple RSA mod-expo operation */
1509 case PCICA:
1510 function = PCI_FUNC_KEY_ENCRYPT;
1511 break;
1512 /**
1513 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
1514 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1515 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
1516 * mod-expo operation
1517 */
1518 case PCIXCC_MCL2:
1519 if (we_p->funccode == ICARSAMODEXPO)
1520 function = PCI_FUNC_KEY_ENCRYPT;
1521 else
1522 function = PCI_FUNC_KEY_DECRYPT;
1523 break;
1524 case PCIXCC_MCL3:
1525 case CEX2C:
1526 if (we_p->funccode == ICARSAMODEXPO)
1527 function = PCI_FUNC_KEY_ENCRYPT;
1528 else
1529 function = PCI_FUNC_KEY_DECRYPT;
1530 break;
1531 /**
1532 * PCICC does everything as a PKCS-1.2 format request
1533 */
1534 case PCICC:
1535 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1536 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
1537 return SEN_NOT_AVAIL;
1538 }
1539 if (we_p->funccode == ICARSAMODEXPO) {
1540 if (is_PKCS12_padded(temp_buffer,
1541 mex_p->inputdatalength))
1542 function = PCI_FUNC_KEY_ENCRYPT;
1543 else
1544 function = PCI_FUNC_KEY_DECRYPT;
1545 } else
1546 /* all CRT forms are decrypts */
1547 function = PCI_FUNC_KEY_DECRYPT;
1548 break;
1549 }
1550 PDEBUG("function: %04x\n", function);
1551 rv = build_caller(we_p, function);
1552 PDEBUG("rv from build_caller = %d\n", rv);
1553 return rv;
1554}
1555
1556static inline int
1557z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
1558 const char __user *buffer)
1559{
1560 int rv;
1561
1562 we_p->devindex = -1;
1563 if (funccode == ICARSAMODEXPO)
1564 we_p->buff_size = sizeof(struct ica_rsa_modexpo);
1565 else
1566 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
1567
1568 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
1569 return -EFAULT;
1570
1571 we_p->audit[0] |= FP_COPYFROM;
1572 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1573 we_p->funccode = funccode;
1574 we_p->devtype = -1;
1575 we_p->audit[0] |= FP_BUFFREQ;
1576 rv = get_crypto_request_buffer(we_p);
1577 switch (rv) {
1578 case 0:
1579 we_p->audit[0] |= FP_BUFFGOT;
1580 break;
1581 case SEN_USER_ERROR:
1582 rv = -EINVAL;
1583 break;
1584 case SEN_QUEUE_FULL:
1585 rv = 0;
1586 break;
1587 case SEN_RELEASED:
1588 rv = -EFAULT;
1589 break;
1590 case REC_NO_RESPONSE:
1591 rv = -ENODEV;
1592 break;
1593 case SEN_NOT_AVAIL:
1594 case EGETBUFF:
1595 rv = -EGETBUFF;
1596 break;
1597 default:
1598 PRINTK("rv = %d\n", rv);
1599 rv = -EGETBUFF;
1600 break;
1601 }
1602 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
1603 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
1604 return rv;
1605}
1606
1607static inline void
1608purge_work_element(struct work_element *we_p)
1609{
1610 struct list_head *lptr;
1611
1612 spin_lock_irq(&queuespinlock);
1613 list_for_each(lptr, &request_list) {
1614 if (lptr == &we_p->liste) {
1615 list_del_init(lptr);
1616 requestq_count--;
1617 break;
1618 }
1619 }
1620 list_for_each(lptr, &pending_list) {
1621 if (lptr == &we_p->liste) {
1622 list_del_init(lptr);
1623 pendingq_count--;
1624 break;
1625 }
1626 }
1627 spin_unlock_irq(&queuespinlock);
1628}
1629
1630/**
1631 * Build the request and send it.
1632 */
1633static inline int
1634z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1635 unsigned int cmd, unsigned long arg)
1636{
1637 struct work_element *we_p;
1638 int rv;
1639
1640 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
1641 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
1642 return rv;
1643 }
1644 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
1645 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
1646 if (!rv)
1647 if ((rv = z90crypt_send(we_p, (const char *)arg)))
1648 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
1649 if (!rv) {
1650 we_p->audit[0] |= FP_ASLEEP;
1651 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
1652 we_p->audit[0] |= FP_AWAKE;
1653 rv = we_p->retcode;
1654 }
1655 if (!rv)
1656 rv = z90crypt_process_results(we_p, (char __user *)arg);
1657
1658 if ((we_p->status[0] & STAT_FAILED)) {
1659 switch (rv) {
1660 /**
1661 * EINVAL *after* receive is almost always a padding error or
1662 * length error issued by a coprocessor (not an accelerator).
1663 * We convert this return value to -EGETBUFF which should
1664 * trigger a fallback to software.
1665 */
1666 case -EINVAL:
1667 if (we_p->devtype != PCICA)
1668 rv = -EGETBUFF;
1669 break;
1670 case -ETIMEOUT:
1671 if (z90crypt.mask.st_count > 0)
1672 rv = -ERESTARTSYS; // retry with another
1673 else
1674 rv = -ENODEV; // no cards left
1675 /* fall through to clean up request queue */
1676 case -ERESTARTSYS:
1677 case -ERELEASED:
1678 switch (CHK_RDWRMASK(we_p->status[0])) {
1679 case STAT_WRITTEN:
1680 purge_work_element(we_p);
1681 break;
1682 case STAT_READPEND:
1683 case STAT_NOWORK:
1684 default:
1685 break;
1686 }
1687 break;
1688 default:
1689 we_p->status[0] ^= STAT_FAILED;
1690 break;
1691 }
1692 }
1693 free_page((long)we_p);
1694 return rv;
1695}
1696
1697/**
1698 * This function is a little long, but it's really just one large switch
1699 * statement.
1700 */
Cornelia Huckaf6c8ee2005-05-01 08:59:00 -07001701static long
1702z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
1704 struct priv_data *private_data_p = filp->private_data;
1705 unsigned char *status;
1706 unsigned char *qdepth;
1707 unsigned int *reqcnt;
1708 struct ica_z90_status *pstat;
1709 int ret, i, loopLim, tempstat;
1710 static int deprecated_msg_count1 = 0;
1711 static int deprecated_msg_count2 = 0;
1712
1713 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
1714 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1715 cmd,
1716 !_IOC_DIR(cmd) ? "NO"
1717 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
1718 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
1719 : "WR")),
1720 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
1721
1722 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
1723 PRINTK("cmd 0x%08X contains bad magic\n", cmd);
1724 return -ENOTTY;
1725 }
1726
1727 ret = 0;
1728 switch (cmd) {
1729 case ICARSAMODEXPO:
1730 case ICARSACRT:
1731 if (quiesce_z90crypt) {
1732 ret = -EQUIESCE;
1733 break;
1734 }
1735 ret = -ENODEV; // Default if no devices
1736 loopLim = z90crypt.hdware_info->hdware_mask.st_count -
1737 (z90crypt.hdware_info->hdware_mask.disabled_count +
1738 z90crypt.hdware_info->hdware_mask.user_disabled_count);
1739 for (i = 0; i < loopLim; i++) {
1740 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
1741 if (ret != -ERESTARTSYS)
1742 break;
1743 }
1744 if (ret == -ERESTARTSYS)
1745 ret = -ENODEV;
1746 break;
1747
1748 case Z90STAT_TOTALCOUNT:
1749 tempstat = get_status_totalcount();
1750 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
1751 ret = -EFAULT;
1752 break;
1753
1754 case Z90STAT_PCICACOUNT:
1755 tempstat = get_status_PCICAcount();
1756 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1757 ret = -EFAULT;
1758 break;
1759
1760 case Z90STAT_PCICCCOUNT:
1761 tempstat = get_status_PCICCcount();
1762 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1763 ret = -EFAULT;
1764 break;
1765
1766 case Z90STAT_PCIXCCMCL2COUNT:
1767 tempstat = get_status_PCIXCCMCL2count();
1768 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1769 ret = -EFAULT;
1770 break;
1771
1772 case Z90STAT_PCIXCCMCL3COUNT:
1773 tempstat = get_status_PCIXCCMCL3count();
1774 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1775 ret = -EFAULT;
1776 break;
1777
1778 case Z90STAT_CEX2CCOUNT:
1779 tempstat = get_status_CEX2Ccount();
1780 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1781 ret = -EFAULT;
1782 break;
1783
1784 case Z90STAT_REQUESTQ_COUNT:
1785 tempstat = get_status_requestq_count();
1786 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1787 ret = -EFAULT;
1788 break;
1789
1790 case Z90STAT_PENDINGQ_COUNT:
1791 tempstat = get_status_pendingq_count();
1792 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1793 ret = -EFAULT;
1794 break;
1795
1796 case Z90STAT_TOTALOPEN_COUNT:
1797 tempstat = get_status_totalopen_count();
1798 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1799 ret = -EFAULT;
1800 break;
1801
1802 case Z90STAT_DOMAIN_INDEX:
1803 tempstat = get_status_domain_index();
1804 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1805 ret = -EFAULT;
1806 break;
1807
1808 case Z90STAT_STATUS_MASK:
1809 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1810 if (!status) {
1811 PRINTK("kmalloc for status failed!\n");
1812 ret = -ENOMEM;
1813 break;
1814 }
1815 get_status_status_mask(status);
1816 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
1817 != 0)
1818 ret = -EFAULT;
1819 kfree(status);
1820 break;
1821
1822 case Z90STAT_QDEPTH_MASK:
1823 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1824 if (!qdepth) {
1825 PRINTK("kmalloc for qdepth failed!\n");
1826 ret = -ENOMEM;
1827 break;
1828 }
1829 get_status_qdepth_mask(qdepth);
1830 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
1831 ret = -EFAULT;
1832 kfree(qdepth);
1833 break;
1834
1835 case Z90STAT_PERDEV_REQCNT:
1836 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
1837 if (!reqcnt) {
1838 PRINTK("kmalloc for reqcnt failed!\n");
1839 ret = -ENOMEM;
1840 break;
1841 }
1842 get_status_perdevice_reqcnt(reqcnt);
1843 if (copy_to_user((char __user *) arg, reqcnt,
1844 Z90CRYPT_NUM_APS * sizeof(int)) != 0)
1845 ret = -EFAULT;
1846 kfree(reqcnt);
1847 break;
1848
1849 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1850 case ICAZ90STATUS:
1851 if (deprecated_msg_count1 < 20) {
1852 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1853 deprecated_msg_count1++;
1854 if (deprecated_msg_count1 == 20)
1855 PRINTK("No longer issuing messages related to "
1856 "deprecated call to ICAZ90STATUS.\n");
1857 }
1858
1859 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
1860 if (!pstat) {
1861 PRINTK("kmalloc for pstat failed!\n");
1862 ret = -ENOMEM;
1863 break;
1864 }
1865
1866 pstat->totalcount = get_status_totalcount();
1867 pstat->leedslitecount = get_status_PCICAcount();
1868 pstat->leeds2count = get_status_PCICCcount();
1869 pstat->requestqWaitCount = get_status_requestq_count();
1870 pstat->pendingqWaitCount = get_status_pendingq_count();
1871 pstat->totalOpenCount = get_status_totalopen_count();
1872 pstat->cryptoDomain = get_status_domain_index();
1873 get_status_status_mask(pstat->status);
1874 get_status_qdepth_mask(pstat->qdepth);
1875
1876 if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
1877 sizeof(struct ica_z90_status)) != 0)
1878 ret = -EFAULT;
1879 kfree(pstat);
1880 break;
1881
1882 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1883 case Z90STAT_PCIXCCCOUNT:
1884 if (deprecated_msg_count2 < 20) {
1885 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
1886 deprecated_msg_count2++;
1887 if (deprecated_msg_count2 == 20)
1888 PRINTK("No longer issuing messages about depre"
1889 "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
1890 }
1891
1892 tempstat = get_status_PCIXCCcount();
1893 if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
1894 ret = -EFAULT;
1895 break;
1896
1897 case Z90QUIESCE:
1898 if (current->euid != 0) {
1899 PRINTK("QUIESCE fails: euid %d\n",
1900 current->euid);
1901 ret = -EACCES;
1902 } else {
1903 PRINTK("QUIESCE device from PID %d\n", PID());
1904 quiesce_z90crypt = 1;
1905 }
1906 break;
1907
1908 default:
1909 /* user passed an invalid IOCTL number */
1910 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
1911 ret = -ENOTTY;
1912 break;
1913 }
1914
1915 return ret;
1916}
1917
1918static inline int
1919sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1920{
1921 int hl, i;
1922
1923 hl = 0;
1924 for (i = 0; i < len; i++)
1925 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
1926 hl += sprintf(outaddr+hl, " ");
1927
1928 return hl;
1929}
1930
1931static inline int
1932sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1933{
1934 int hl, inl, c, cx;
1935
1936 hl = sprintf(outaddr, " ");
1937 inl = 0;
1938 for (c = 0; c < (len / 16); c++) {
1939 hl += sprintcl(outaddr+hl, addr+inl, 16);
1940 inl += 16;
1941 }
1942
1943 cx = len%16;
1944 if (cx) {
1945 hl += sprintcl(outaddr+hl, addr+inl, cx);
1946 inl += cx;
1947 }
1948
1949 hl += sprintf(outaddr+hl, "\n");
1950
1951 return hl;
1952}
1953
1954static inline int
1955sprinthx(unsigned char *title, unsigned char *outaddr,
1956 unsigned char *addr, unsigned int len)
1957{
1958 int hl, inl, r, rx;
1959
1960 hl = sprintf(outaddr, "\n%s\n", title);
1961 inl = 0;
1962 for (r = 0; r < (len / 64); r++) {
1963 hl += sprintrw(outaddr+hl, addr+inl, 64);
1964 inl += 64;
1965 }
1966 rx = len % 64;
1967 if (rx) {
1968 hl += sprintrw(outaddr+hl, addr+inl, rx);
1969 inl += rx;
1970 }
1971
1972 hl += sprintf(outaddr+hl, "\n");
1973
1974 return hl;
1975}
1976
1977static inline int
1978sprinthx4(unsigned char *title, unsigned char *outaddr,
1979 unsigned int *array, unsigned int len)
1980{
1981 int hl, r;
1982
1983 hl = sprintf(outaddr, "\n%s\n", title);
1984
1985 for (r = 0; r < len; r++) {
1986 if ((r % 8) == 0)
1987 hl += sprintf(outaddr+hl, " ");
1988 hl += sprintf(outaddr+hl, "%08X ", array[r]);
1989 if ((r % 8) == 7)
1990 hl += sprintf(outaddr+hl, "\n");
1991 }
1992
1993 hl += sprintf(outaddr+hl, "\n");
1994
1995 return hl;
1996}
1997
1998static int
1999z90crypt_status(char *resp_buff, char **start, off_t offset,
2000 int count, int *eof, void *data)
2001{
2002 unsigned char *workarea;
2003 int len;
2004
2005 /* resp_buff is a page. Use the right half for a work area */
2006 workarea = resp_buff+2000;
2007 len = 0;
2008 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
2009 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
2010 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
2011 get_status_domain_index());
2012 len += sprintf(resp_buff+len, "Total device count: %d\n",
2013 get_status_totalcount());
2014 len += sprintf(resp_buff+len, "PCICA count: %d\n",
2015 get_status_PCICAcount());
2016 len += sprintf(resp_buff+len, "PCICC count: %d\n",
2017 get_status_PCICCcount());
2018 len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
2019 get_status_PCIXCCMCL2count());
2020 len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
2021 get_status_PCIXCCMCL3count());
2022 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2023 get_status_CEX2Ccount());
2024 len += sprintf(resp_buff+len, "requestq count: %d\n",
2025 get_status_requestq_count());
2026 len += sprintf(resp_buff+len, "pendingq count: %d\n",
2027 get_status_pendingq_count());
2028 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2029 get_status_totalopen_count());
2030 len += sprinthx(
2031 "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), "
2032 "4: PCIXCC (MCL3), 5: CEX2C",
2033 resp_buff+len,
2034 get_status_status_mask(workarea),
2035 Z90CRYPT_NUM_APS);
2036 len += sprinthx("Waiting work element counts",
2037 resp_buff+len,
2038 get_status_qdepth_mask(workarea),
2039 Z90CRYPT_NUM_APS);
2040 len += sprinthx4(
2041 "Per-device successfully completed request counts",
2042 resp_buff+len,
2043 get_status_perdevice_reqcnt((unsigned int *)workarea),
2044 Z90CRYPT_NUM_APS);
2045 *eof = 1;
2046 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
2047 return len;
2048}
2049
2050static inline void
2051disable_card(int card_index)
2052{
2053 struct device *devp;
2054
2055 devp = LONG2DEVPTR(card_index);
2056 if (!devp || devp->user_disabled)
2057 return;
2058 devp->user_disabled = 1;
2059 z90crypt.hdware_info->hdware_mask.user_disabled_count++;
2060 if (devp->dev_type == -1)
2061 return;
2062 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
2063}
2064
2065static inline void
2066enable_card(int card_index)
2067{
2068 struct device *devp;
2069
2070 devp = LONG2DEVPTR(card_index);
2071 if (!devp || !devp->user_disabled)
2072 return;
2073 devp->user_disabled = 0;
2074 z90crypt.hdware_info->hdware_mask.user_disabled_count--;
2075 if (devp->dev_type == -1)
2076 return;
2077 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2078}
2079
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080static int
2081z90crypt_status_write(struct file *file, const char __user *buffer,
2082 unsigned long count, void *data)
2083{
Eric Rossman2dee7022005-09-03 15:58:03 -07002084 int j, eol;
2085 unsigned char *lbuf, *ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 unsigned int local_count;
2087
Eric Rossman2dee7022005-09-03 15:58:03 -07002088#define LBUFSIZE 1200
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2090 if (!lbuf) {
2091 PRINTK("kmalloc failed!\n");
2092 return 0;
2093 }
2094
2095 if (count <= 0)
2096 return 0;
2097
2098 local_count = UMIN((unsigned int)count, LBUFSIZE-1);
2099
2100 if (copy_from_user(lbuf, buffer, local_count) != 0) {
2101 kfree(lbuf);
2102 return -EFAULT;
2103 }
2104
Eric Rossman2dee7022005-09-03 15:58:03 -07002105 lbuf[local_count] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106
Eric Rossman2dee7022005-09-03 15:58:03 -07002107 ptr = strstr(lbuf, "Online devices");
2108 if (ptr == 0) {
2109 PRINTK("Unable to parse data (missing \"Online devices\")\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 kfree(lbuf);
2111 return count;
2112 }
2113
Eric Rossman2dee7022005-09-03 15:58:03 -07002114 ptr = strstr(ptr, "\n");
2115 if (ptr == 0) {
2116 PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
2117 kfree(lbuf);
2118 return count;
2119 }
2120 ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
Eric Rossman2dee7022005-09-03 15:58:03 -07002122 if (strstr(ptr, "Waiting work element counts") == NULL) {
2123 PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 kfree(lbuf);
2125 return count;
2126 }
2127
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 j = 0;
Eric Rossman2dee7022005-09-03 15:58:03 -07002129 eol = 0;
2130 while ((j < 64) && (*ptr != '\0')) {
2131 switch (*ptr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 case '\t':
2133 case ' ':
2134 break;
2135 case '\n':
2136 default:
Eric Rossman2dee7022005-09-03 15:58:03 -07002137 eol = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 break;
Eric Rossman2dee7022005-09-03 15:58:03 -07002139 case '0': // no device
2140 case '1': // PCICA
2141 case '2': // PCICC
2142 case '3': // PCIXCC_MCL2
2143 case '4': // PCIXCC_MCL3
2144 case '5': // CEX2C
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 j++;
2146 break;
2147 case 'd':
2148 case 'D':
2149 disable_card(j);
2150 j++;
2151 break;
2152 case 'e':
2153 case 'E':
2154 enable_card(j);
2155 j++;
2156 break;
2157 }
Eric Rossman2dee7022005-09-03 15:58:03 -07002158 if (eol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 break;
Eric Rossman2dee7022005-09-03 15:58:03 -07002160 ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 }
2162
2163 kfree(lbuf);
2164 return count;
2165}
2166
2167/**
2168 * Functions that run under a timer, with no process id
2169 *
2170 * The task functions:
2171 * z90crypt_reader_task
2172 * helper_send_work
2173 * helper_handle_work_element
2174 * helper_receive_rc
2175 * z90crypt_config_task
2176 * z90crypt_cleanup_task
2177 *
2178 * Helper functions:
2179 * z90crypt_schedule_reader_timer
2180 * z90crypt_schedule_reader_task
2181 * z90crypt_schedule_config_task
2182 * z90crypt_schedule_cleanup_task
2183 */
2184static inline int
2185receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
2186 unsigned char *buff, unsigned char __user **dest_p_p)
2187{
2188 int dv, rv;
2189 struct device *dev_ptr;
2190 struct caller *caller_p;
2191 struct ica_rsa_modexpo *icaMsg_p;
2192 struct list_head *ptr, *tptr;
2193
2194 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
2195
2196 if (z90crypt.terminating)
2197 return REC_FATAL_ERROR;
2198
2199 caller_p = 0;
2200 dev_ptr = z90crypt.device_p[index];
2201 rv = 0;
2202 do {
2203 if (!dev_ptr || dev_ptr->disabled) {
2204 rv = REC_NO_WORK; // a disabled device can't return work
2205 break;
2206 }
2207 if (dev_ptr->dev_self_x != index) {
2208 PRINTKC("Corrupt dev ptr\n");
2209 z90crypt.terminating = 1;
2210 rv = REC_FATAL_ERROR;
2211 break;
2212 }
2213 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
2214 dv = DEV_REC_EXCEPTION;
2215 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2216 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
2217 } else {
2218 PDEBUG("Dequeue called for device %d\n", index);
2219 dv = receive_from_AP(index, z90crypt.cdx,
2220 dev_ptr->dev_resp_l,
2221 dev_ptr->dev_resp_p, psmid);
2222 }
2223 switch (dv) {
2224 case DEV_REC_EXCEPTION:
2225 rv = REC_FATAL_ERROR;
2226 z90crypt.terminating = 1;
2227 PRINTKC("Exception in receive from device %d\n",
2228 index);
2229 break;
2230 case DEV_ONLINE:
2231 rv = 0;
2232 break;
2233 case DEV_EMPTY:
2234 rv = REC_EMPTY;
2235 break;
2236 case DEV_NO_WORK:
2237 rv = REC_NO_WORK;
2238 break;
2239 case DEV_BAD_MESSAGE:
2240 case DEV_GONE:
2241 case REC_HARDWAR_ERR:
2242 default:
2243 rv = REC_NO_RESPONSE;
2244 break;
2245 }
2246 if (rv)
2247 break;
2248 if (dev_ptr->dev_caller_count <= 0) {
2249 rv = REC_USER_GONE;
2250 break;
2251 }
2252
2253 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
2254 caller_p = list_entry(ptr, struct caller, caller_liste);
2255 if (!memcmp(caller_p->caller_id, psmid,
2256 sizeof(caller_p->caller_id))) {
2257 if (!list_empty(&caller_p->caller_liste)) {
2258 list_del_init(ptr);
2259 dev_ptr->dev_caller_count--;
2260 break;
2261 }
2262 }
2263 caller_p = 0;
2264 }
2265 if (!caller_p) {
2266 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
2267 "%02X%02X%02X in device list\n",
2268 psmid[0], psmid[1], psmid[2], psmid[3],
2269 psmid[4], psmid[5], psmid[6], psmid[7]);
2270 rv = REC_USER_GONE;
2271 break;
2272 }
2273
2274 PDEBUG("caller_p after successful receive: %p\n", caller_p);
2275 rv = convert_response(dev_ptr->dev_resp_p,
2276 caller_p->caller_buf_p, buff_len_p, buff);
2277 switch (rv) {
2278 case REC_USE_PCICA:
2279 break;
2280 case REC_OPERAND_INV:
2281 case REC_OPERAND_SIZE:
2282 case REC_EVEN_MOD:
2283 case REC_INVALID_PAD:
2284 PDEBUG("device %d: 'user error' %d\n", index, rv);
2285 break;
2286 case WRONG_DEVICE_TYPE:
2287 case REC_HARDWAR_ERR:
2288 case REC_BAD_MESSAGE:
2289 PRINTKW("device %d: hardware error %d\n", index, rv);
2290 rv = REC_NO_RESPONSE;
2291 break;
2292 default:
2293 PDEBUG("device %d: rv = %d\n", index, rv);
2294 break;
2295 }
2296 } while (0);
2297
2298 switch (rv) {
2299 case 0:
2300 PDEBUG("Successful receive from device %d\n", index);
2301 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
2302 *dest_p_p = icaMsg_p->outputdata;
2303 if (*buff_len_p == 0)
2304 PRINTK("Zero *buff_len_p\n");
2305 break;
2306 case REC_NO_RESPONSE:
2307 PRINTKW("Removing device %d from availability\n", index);
2308 remove_device(dev_ptr);
2309 break;
2310 }
2311
2312 if (caller_p)
2313 unbuild_caller(dev_ptr, caller_p);
2314
2315 return rv;
2316}
2317
2318static inline void
2319helper_send_work(int index)
2320{
2321 struct work_element *rq_p;
2322 int rv;
2323
2324 if (list_empty(&request_list))
2325 return;
2326 requestq_count--;
2327 rq_p = list_entry(request_list.next, struct work_element, liste);
2328 list_del_init(&rq_p->liste);
2329 rq_p->audit[1] |= FP_REMREQUEST;
2330 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
2331 rq_p->devindex = SHRT2LONG(index);
2332 rv = send_to_crypto_device(rq_p);
2333 if (rv == 0) {
2334 rq_p->requestsent = jiffies;
2335 rq_p->audit[0] |= FP_SENT;
2336 list_add_tail(&rq_p->liste, &pending_list);
2337 ++pendingq_count;
2338 rq_p->audit[0] |= FP_PENDING;
2339 } else {
2340 switch (rv) {
2341 case REC_OPERAND_INV:
2342 case REC_OPERAND_SIZE:
2343 case REC_EVEN_MOD:
2344 case REC_INVALID_PAD:
2345 rq_p->retcode = -EINVAL;
2346 break;
2347 case SEN_NOT_AVAIL:
2348 case SEN_RETRY:
2349 case REC_NO_RESPONSE:
2350 default:
2351 if (z90crypt.mask.st_count > 1)
2352 rq_p->retcode =
2353 -ERESTARTSYS;
2354 else
2355 rq_p->retcode = -ENODEV;
2356 break;
2357 }
2358 rq_p->status[0] |= STAT_FAILED;
2359 rq_p->audit[1] |= FP_AWAKENING;
2360 atomic_set(&rq_p->alarmrung, 1);
2361 wake_up(&rq_p->waitq);
2362 }
2363 } else {
2364 if (z90crypt.mask.st_count > 1)
2365 rq_p->retcode = -ERESTARTSYS;
2366 else
2367 rq_p->retcode = -ENODEV;
2368 rq_p->status[0] |= STAT_FAILED;
2369 rq_p->audit[1] |= FP_AWAKENING;
2370 atomic_set(&rq_p->alarmrung, 1);
2371 wake_up(&rq_p->waitq);
2372 }
2373}
2374
2375static inline void
2376helper_handle_work_element(int index, unsigned char psmid[8], int rc,
2377 int buff_len, unsigned char *buff,
2378 unsigned char __user *resp_addr)
2379{
2380 struct work_element *pq_p;
2381 struct list_head *lptr, *tptr;
2382
2383 pq_p = 0;
2384 list_for_each_safe(lptr, tptr, &pending_list) {
2385 pq_p = list_entry(lptr, struct work_element, liste);
2386 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
2387 list_del_init(lptr);
2388 pendingq_count--;
2389 pq_p->audit[1] |= FP_NOTPENDING;
2390 break;
2391 }
2392 pq_p = 0;
2393 }
2394
2395 if (!pq_p) {
2396 PRINTK("device %d has work but no caller exists on pending Q\n",
2397 SHRT2LONG(index));
2398 return;
2399 }
2400
2401 switch (rc) {
2402 case 0:
2403 pq_p->resp_buff_size = buff_len;
2404 pq_p->audit[1] |= FP_RESPSIZESET;
2405 if (buff_len) {
2406 pq_p->resp_addr = resp_addr;
2407 pq_p->audit[1] |= FP_RESPADDRCOPIED;
2408 memcpy(pq_p->resp_buff, buff, buff_len);
2409 pq_p->audit[1] |= FP_RESPBUFFCOPIED;
2410 }
2411 break;
2412 case REC_OPERAND_INV:
2413 case REC_OPERAND_SIZE:
2414 case REC_EVEN_MOD:
2415 case REC_INVALID_PAD:
2416 PDEBUG("-EINVAL after application error %d\n", rc);
2417 pq_p->retcode = -EINVAL;
2418 pq_p->status[0] |= STAT_FAILED;
2419 break;
2420 case REC_USE_PCICA:
2421 pq_p->retcode = -ERESTARTSYS;
2422 pq_p->status[0] |= STAT_FAILED;
2423 break;
2424 case REC_NO_RESPONSE:
2425 default:
2426 if (z90crypt.mask.st_count > 1)
2427 pq_p->retcode = -ERESTARTSYS;
2428 else
2429 pq_p->retcode = -ENODEV;
2430 pq_p->status[0] |= STAT_FAILED;
2431 break;
2432 }
2433 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
2434 pq_p->audit[1] |= FP_AWAKENING;
2435 atomic_set(&pq_p->alarmrung, 1);
2436 wake_up(&pq_p->waitq);
2437 }
2438}
2439
2440/**
2441 * return TRUE if the work element should be removed from the queue
2442 */
2443static inline int
2444helper_receive_rc(int index, int *rc_p)
2445{
2446 switch (*rc_p) {
2447 case 0:
2448 case REC_OPERAND_INV:
2449 case REC_OPERAND_SIZE:
2450 case REC_EVEN_MOD:
2451 case REC_INVALID_PAD:
2452 case REC_USE_PCICA:
2453 break;
2454
2455 case REC_BUSY:
2456 case REC_NO_WORK:
2457 case REC_EMPTY:
2458 case REC_RETRY_DEV:
2459 case REC_FATAL_ERROR:
2460 return 0;
2461
2462 case REC_NO_RESPONSE:
2463 break;
2464
2465 default:
2466 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
2467 *rc_p, SHRT2LONG(index));
2468 *rc_p = REC_NO_RESPONSE;
2469 break;
2470 }
2471 return 1;
2472}
2473
2474static inline void
2475z90crypt_schedule_reader_timer(void)
2476{
2477 if (timer_pending(&reader_timer))
2478 return;
2479 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
2480 PRINTK("Timer pending while modifying reader timer\n");
2481}
2482
2483static void
2484z90crypt_reader_task(unsigned long ptr)
2485{
2486 int workavail, index, rc, buff_len;
2487 unsigned char psmid[8];
2488 unsigned char __user *resp_addr;
2489 static unsigned char buff[1024];
2490
2491 /**
2492 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2493 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
2494 * loop, there is no work remaining on the queues.
2495 */
2496 resp_addr = 0;
2497 workavail = 2;
2498 buff_len = 0;
2499 while (workavail) {
2500 workavail--;
2501 rc = 0;
2502 spin_lock_irq(&queuespinlock);
2503 memset(buff, 0x00, sizeof(buff));
2504
2505 /* Dequeue once from each device in round robin. */
2506 for (index = 0; index < z90crypt.mask.st_count; index++) {
2507 PDEBUG("About to receive.\n");
2508 rc = receive_from_crypto_device(SHRT2LONG(index),
2509 psmid,
2510 &buff_len,
2511 buff,
2512 &resp_addr);
2513 PDEBUG("Dequeued: rc = %d.\n", rc);
2514
2515 if (helper_receive_rc(index, &rc)) {
2516 if (rc != REC_NO_RESPONSE) {
2517 helper_send_work(index);
2518 workavail = 2;
2519 }
2520
2521 helper_handle_work_element(index, psmid, rc,
2522 buff_len, buff,
2523 resp_addr);
2524 }
2525
2526 if (rc == REC_FATAL_ERROR)
2527 PRINTKW("REC_FATAL_ERROR from device %d!\n",
2528 SHRT2LONG(index));
2529 }
2530 spin_unlock_irq(&queuespinlock);
2531 }
2532
2533 if (pendingq_count + requestq_count)
2534 z90crypt_schedule_reader_timer();
2535}
2536
2537static inline void
2538z90crypt_schedule_config_task(unsigned int expiration)
2539{
2540 if (timer_pending(&config_timer))
2541 return;
2542 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
2543 PRINTK("Timer pending while modifying config timer\n");
2544}
2545
2546static void
2547z90crypt_config_task(unsigned long ptr)
2548{
2549 int rc;
2550
2551 PDEBUG("jiffies %ld\n", jiffies);
2552
2553 if ((rc = refresh_z90crypt(&z90crypt.cdx)))
2554 PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
2555 /* If return was fatal, don't bother reconfiguring */
2556 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
2557 z90crypt_schedule_config_task(CONFIGTIME);
2558}
2559
2560static inline void
2561z90crypt_schedule_cleanup_task(void)
2562{
2563 if (timer_pending(&cleanup_timer))
2564 return;
2565 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
2566 PRINTK("Timer pending while modifying cleanup timer\n");
2567}
2568
2569static inline void
2570helper_drain_queues(void)
2571{
2572 struct work_element *pq_p;
2573 struct list_head *lptr, *tptr;
2574
2575 list_for_each_safe(lptr, tptr, &pending_list) {
2576 pq_p = list_entry(lptr, struct work_element, liste);
2577 pq_p->retcode = -ENODEV;
2578 pq_p->status[0] |= STAT_FAILED;
2579 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2580 (struct caller *)pq_p->requestptr);
2581 list_del_init(lptr);
2582 pendingq_count--;
2583 pq_p->audit[1] |= FP_NOTPENDING;
2584 pq_p->audit[1] |= FP_AWAKENING;
2585 atomic_set(&pq_p->alarmrung, 1);
2586 wake_up(&pq_p->waitq);
2587 }
2588
2589 list_for_each_safe(lptr, tptr, &request_list) {
2590 pq_p = list_entry(lptr, struct work_element, liste);
2591 pq_p->retcode = -ENODEV;
2592 pq_p->status[0] |= STAT_FAILED;
2593 list_del_init(lptr);
2594 requestq_count--;
2595 pq_p->audit[1] |= FP_REMREQUEST;
2596 pq_p->audit[1] |= FP_AWAKENING;
2597 atomic_set(&pq_p->alarmrung, 1);
2598 wake_up(&pq_p->waitq);
2599 }
2600}
2601
2602static inline void
2603helper_timeout_requests(void)
2604{
2605 struct work_element *pq_p;
2606 struct list_head *lptr, *tptr;
2607 long timelimit;
2608
2609 timelimit = jiffies - (CLEANUPTIME * HZ);
2610 /* The list is in strict chronological order */
2611 list_for_each_safe(lptr, tptr, &pending_list) {
2612 pq_p = list_entry(lptr, struct work_element, liste);
2613 if (pq_p->requestsent >= timelimit)
2614 break;
2615 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2616 ((struct caller *)pq_p->requestptr)->caller_id[0],
2617 ((struct caller *)pq_p->requestptr)->caller_id[1],
2618 ((struct caller *)pq_p->requestptr)->caller_id[2],
2619 ((struct caller *)pq_p->requestptr)->caller_id[3],
2620 ((struct caller *)pq_p->requestptr)->caller_id[4],
2621 ((struct caller *)pq_p->requestptr)->caller_id[5],
2622 ((struct caller *)pq_p->requestptr)->caller_id[6],
2623 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2624 pq_p->retcode = -ETIMEOUT;
2625 pq_p->status[0] |= STAT_FAILED;
2626 /* get this off any caller queue it may be on */
2627 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2628 (struct caller *) pq_p->requestptr);
2629 list_del_init(lptr);
2630 pendingq_count--;
2631 pq_p->audit[1] |= FP_TIMEDOUT;
2632 pq_p->audit[1] |= FP_NOTPENDING;
2633 pq_p->audit[1] |= FP_AWAKENING;
2634 atomic_set(&pq_p->alarmrung, 1);
2635 wake_up(&pq_p->waitq);
2636 }
2637
2638 /**
2639 * If pending count is zero, items left on the request queue may
2640 * never be processed.
2641 */
2642 if (pendingq_count <= 0) {
2643 list_for_each_safe(lptr, tptr, &request_list) {
2644 pq_p = list_entry(lptr, struct work_element, liste);
2645 if (pq_p->requestsent >= timelimit)
2646 break;
2647 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2648 ((struct caller *)pq_p->requestptr)->caller_id[0],
2649 ((struct caller *)pq_p->requestptr)->caller_id[1],
2650 ((struct caller *)pq_p->requestptr)->caller_id[2],
2651 ((struct caller *)pq_p->requestptr)->caller_id[3],
2652 ((struct caller *)pq_p->requestptr)->caller_id[4],
2653 ((struct caller *)pq_p->requestptr)->caller_id[5],
2654 ((struct caller *)pq_p->requestptr)->caller_id[6],
2655 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2656 pq_p->retcode = -ETIMEOUT;
2657 pq_p->status[0] |= STAT_FAILED;
2658 list_del_init(lptr);
2659 requestq_count--;
2660 pq_p->audit[1] |= FP_TIMEDOUT;
2661 pq_p->audit[1] |= FP_REMREQUEST;
2662 pq_p->audit[1] |= FP_AWAKENING;
2663 atomic_set(&pq_p->alarmrung, 1);
2664 wake_up(&pq_p->waitq);
2665 }
2666 }
2667}
2668
2669static void
2670z90crypt_cleanup_task(unsigned long ptr)
2671{
2672 PDEBUG("jiffies %ld\n", jiffies);
2673 spin_lock_irq(&queuespinlock);
2674 if (z90crypt.mask.st_count <= 0) // no devices!
2675 helper_drain_queues();
2676 else
2677 helper_timeout_requests();
2678 spin_unlock_irq(&queuespinlock);
2679 z90crypt_schedule_cleanup_task();
2680}
2681
2682static void
2683z90crypt_schedule_reader_task(unsigned long ptr)
2684{
2685 tasklet_schedule(&reader_tasklet);
2686}
2687
2688/**
2689 * Lowlevel Functions:
2690 *
2691 * create_z90crypt: creates and initializes basic data structures
2692 * refresh_z90crypt: re-initializes basic data structures
2693 * find_crypto_devices: returns a count and mask of hardware status
2694 * create_crypto_device: builds the descriptor for a device
2695 * destroy_crypto_device: unallocates the descriptor for a device
2696 * destroy_z90crypt: drains all work, unallocates structs
2697 */
2698
2699/**
2700 * build the z90crypt root structure using the given domain index
2701 */
2702static int
2703create_z90crypt(int *cdx_p)
2704{
2705 struct hdware_block *hdware_blk_p;
2706
2707 memset(&z90crypt, 0x00, sizeof(struct z90crypt));
2708 z90crypt.domain_established = 0;
2709 z90crypt.len = sizeof(struct z90crypt);
2710 z90crypt.max_count = Z90CRYPT_NUM_DEVS;
2711 z90crypt.cdx = *cdx_p;
2712
2713 hdware_blk_p = (struct hdware_block *)
2714 kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
2715 if (!hdware_blk_p) {
2716 PDEBUG("kmalloc for hardware block failed\n");
2717 return ENOMEM;
2718 }
2719 memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
2720 z90crypt.hdware_info = hdware_blk_p;
2721
2722 return 0;
2723}
2724
2725static inline int
2726helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
2727{
2728 enum hdstat hd_stat;
2729 int q_depth, dev_type;
2730 int indx, chkdom, numdomains;
2731
2732 q_depth = dev_type = numdomains = 0;
2733 for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
2734 for (indx = 0; indx < z90crypt.max_count; indx++) {
2735 hd_stat = HD_NOT_THERE;
2736 numdomains = 0;
2737 for (chkdom = 0; chkdom <= 15; chkdom++) {
2738 hd_stat = query_online(indx, chkdom, MAX_RESET,
2739 &q_depth, &dev_type);
2740 if (hd_stat == HD_TSQ_EXCEPTION) {
2741 z90crypt.terminating = 1;
2742 PRINTKC("exception taken!\n");
2743 break;
2744 }
2745 if (hd_stat == HD_ONLINE) {
2746 cdx_array[numdomains++] = chkdom;
2747 if (*cdx_p == chkdom) {
2748 *correct_cdx_found = 1;
2749 break;
2750 }
2751 }
2752 }
2753 if ((*correct_cdx_found == 1) || (numdomains != 0))
2754 break;
2755 if (z90crypt.terminating)
2756 break;
2757 }
2758 return numdomains;
2759}
2760
2761static inline int
2762probe_crypto_domain(int *cdx_p)
2763{
2764 int cdx_array[16];
2765 char cdx_array_text[53], temp[5];
2766 int correct_cdx_found, numdomains;
2767
2768 correct_cdx_found = 0;
2769 numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
2770
2771 if (z90crypt.terminating)
2772 return TSQ_FATAL_ERROR;
2773
2774 if (correct_cdx_found)
2775 return 0;
2776
2777 if (numdomains == 0) {
2778 PRINTKW("Unable to find crypto domain: No devices found\n");
2779 return Z90C_NO_DEVICES;
2780 }
2781
2782 if (numdomains == 1) {
2783 if (*cdx_p == -1) {
2784 *cdx_p = cdx_array[0];
2785 return 0;
2786 }
2787 PRINTKW("incorrect domain: specified = %d, found = %d\n",
2788 *cdx_p, cdx_array[0]);
2789 return Z90C_INCORRECT_DOMAIN;
2790 }
2791
2792 numdomains--;
2793 sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
2794 while (numdomains) {
2795 numdomains--;
2796 sprintf(temp, ", %d", cdx_array[numdomains]);
2797 strcat(cdx_array_text, temp);
2798 }
2799
2800 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
2801 *cdx_p, cdx_array_text);
2802 return Z90C_AMBIGUOUS_DOMAIN;
2803}
2804
2805static int
2806refresh_z90crypt(int *cdx_p)
2807{
2808 int i, j, indx, rv;
2809 static struct status local_mask;
2810 struct device *devPtr;
2811 unsigned char oldStat, newStat;
2812 int return_unchanged;
2813
2814 if (z90crypt.len != sizeof(z90crypt))
2815 return ENOTINIT;
2816 if (z90crypt.terminating)
2817 return TSQ_FATAL_ERROR;
2818 rv = 0;
2819 if (!z90crypt.hdware_info->hdware_mask.st_count &&
2820 !z90crypt.domain_established) {
2821 rv = probe_crypto_domain(cdx_p);
2822 if (z90crypt.terminating)
2823 return TSQ_FATAL_ERROR;
2824 if (rv == Z90C_NO_DEVICES)
2825 return 0; // try later
2826 if (rv)
2827 return rv;
2828 z90crypt.cdx = *cdx_p;
2829 z90crypt.domain_established = 1;
2830 }
2831 rv = find_crypto_devices(&local_mask);
2832 if (rv) {
2833 PRINTK("find crypto devices returned %d\n", rv);
2834 return rv;
2835 }
2836 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
2837 sizeof(struct status))) {
2838 return_unchanged = 1;
2839 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
2840 /**
2841 * Check for disabled cards. If any device is marked
2842 * disabled, destroy it.
2843 */
2844 for (j = 0;
2845 j < z90crypt.hdware_info->type_mask[i].st_count;
2846 j++) {
2847 indx = z90crypt.hdware_info->type_x_addr[i].
2848 device_index[j];
2849 devPtr = z90crypt.device_p[indx];
2850 if (devPtr && devPtr->disabled) {
2851 local_mask.st_mask[indx] = HD_NOT_THERE;
2852 return_unchanged = 0;
2853 }
2854 }
2855 }
2856 if (return_unchanged == 1)
2857 return 0;
2858 }
2859
2860 spin_lock_irq(&queuespinlock);
2861 for (i = 0; i < z90crypt.max_count; i++) {
2862 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
2863 newStat = local_mask.st_mask[i];
2864 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
2865 destroy_crypto_device(i);
2866 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
2867 rv = create_crypto_device(i);
2868 if (rv >= REC_FATAL_ERROR)
2869 return rv;
2870 if (rv != 0) {
2871 local_mask.st_mask[i] = HD_NOT_THERE;
2872 local_mask.st_count--;
2873 }
2874 }
2875 }
2876 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
2877 sizeof(local_mask.st_mask));
2878 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
2879 z90crypt.hdware_info->hdware_mask.disabled_count =
2880 local_mask.disabled_count;
2881 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
2882 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
2883 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
2884 &(z90crypt.hdware_info->type_x_addr[i]));
2885 spin_unlock_irq(&queuespinlock);
2886
2887 return rv;
2888}
2889
2890static int
2891find_crypto_devices(struct status *deviceMask)
2892{
2893 int i, q_depth, dev_type;
2894 enum hdstat hd_stat;
2895
2896 deviceMask->st_count = 0;
2897 deviceMask->disabled_count = 0;
2898 deviceMask->user_disabled_count = 0;
2899
2900 for (i = 0; i < z90crypt.max_count; i++) {
2901 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
2902 &dev_type);
2903 if (hd_stat == HD_TSQ_EXCEPTION) {
2904 z90crypt.terminating = 1;
2905 PRINTKC("Exception during probe for crypto devices\n");
2906 return TSQ_FATAL_ERROR;
2907 }
2908 deviceMask->st_mask[i] = hd_stat;
2909 if (hd_stat == HD_ONLINE) {
2910 PDEBUG("Got an online crypto!: %d\n", i);
2911 PDEBUG("Got a queue depth of %d\n", q_depth);
2912 PDEBUG("Got a device type of %d\n", dev_type);
2913 if (q_depth <= 0)
2914 return TSQ_FATAL_ERROR;
2915 deviceMask->st_count++;
2916 z90crypt.q_depth_array[i] = q_depth;
2917 z90crypt.dev_type_array[i] = dev_type;
2918 }
2919 }
2920
2921 return 0;
2922}
2923
2924static int
2925refresh_index_array(struct status *status_str, struct device_x *index_array)
2926{
2927 int i, count;
2928 enum devstat stat;
2929
2930 i = -1;
2931 count = 0;
2932 do {
2933 stat = status_str->st_mask[++i];
2934 if (stat == DEV_ONLINE)
2935 index_array->device_index[count++] = i;
2936 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
2937
2938 return count;
2939}
2940
2941static int
2942create_crypto_device(int index)
2943{
2944 int rv, devstat, total_size;
2945 struct device *dev_ptr;
2946 struct status *type_str_p;
2947 int deviceType;
2948
2949 dev_ptr = z90crypt.device_p[index];
2950 if (!dev_ptr) {
2951 total_size = sizeof(struct device) +
2952 z90crypt.q_depth_array[index] * sizeof(int);
2953
2954 dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
2955 if (!dev_ptr) {
2956 PRINTK("kmalloc device %d failed\n", index);
2957 return ENOMEM;
2958 }
2959 memset(dev_ptr, 0, total_size);
2960 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
2961 if (!dev_ptr->dev_resp_p) {
2962 kfree(dev_ptr);
2963 PRINTK("kmalloc device %d rec buffer failed\n", index);
2964 return ENOMEM;
2965 }
2966 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
2967 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
2968 }
2969
2970 devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
2971 if (devstat == DEV_RSQ_EXCEPTION) {
2972 PRINTK("exception during reset device %d\n", index);
2973 kfree(dev_ptr->dev_resp_p);
2974 kfree(dev_ptr);
2975 return RSQ_FATAL_ERROR;
2976 }
2977 if (devstat == DEV_ONLINE) {
2978 dev_ptr->dev_self_x = index;
2979 dev_ptr->dev_type = z90crypt.dev_type_array[index];
2980 if (dev_ptr->dev_type == NILDEV) {
2981 rv = probe_device_type(dev_ptr);
2982 if (rv) {
2983 PRINTK("rv = %d from probe_device_type %d\n",
2984 rv, index);
2985 kfree(dev_ptr->dev_resp_p);
2986 kfree(dev_ptr);
2987 return rv;
2988 }
2989 }
2990 if (dev_ptr->dev_type == PCIXCC_UNK) {
2991 rv = probe_PCIXCC_type(dev_ptr);
2992 if (rv) {
2993 PRINTK("rv = %d from probe_PCIXCC_type %d\n",
2994 rv, index);
2995 kfree(dev_ptr->dev_resp_p);
2996 kfree(dev_ptr);
2997 return rv;
2998 }
2999 }
3000 deviceType = dev_ptr->dev_type;
3001 z90crypt.dev_type_array[index] = deviceType;
3002 if (deviceType == PCICA)
3003 z90crypt.hdware_info->device_type_array[index] = 1;
3004 else if (deviceType == PCICC)
3005 z90crypt.hdware_info->device_type_array[index] = 2;
3006 else if (deviceType == PCIXCC_MCL2)
3007 z90crypt.hdware_info->device_type_array[index] = 3;
3008 else if (deviceType == PCIXCC_MCL3)
3009 z90crypt.hdware_info->device_type_array[index] = 4;
3010 else if (deviceType == CEX2C)
3011 z90crypt.hdware_info->device_type_array[index] = 5;
3012 else
3013 z90crypt.hdware_info->device_type_array[index] = -1;
3014 }
3015
3016 /**
3017 * 'q_depth' returned by the hardware is one less than
3018 * the actual depth
3019 */
3020 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
3021 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3022 dev_ptr->dev_stat = devstat;
3023 dev_ptr->disabled = 0;
3024 z90crypt.device_p[index] = dev_ptr;
3025
3026 if (devstat == DEV_ONLINE) {
3027 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
3028 z90crypt.mask.st_mask[index] = DEV_ONLINE;
3029 z90crypt.mask.st_count++;
3030 }
3031 deviceType = dev_ptr->dev_type;
3032 type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
3033 if (type_str_p->st_mask[index] != DEV_ONLINE) {
3034 type_str_p->st_mask[index] = DEV_ONLINE;
3035 type_str_p->st_count++;
3036 }
3037 }
3038
3039 return 0;
3040}
3041
3042static int
3043destroy_crypto_device(int index)
3044{
3045 struct device *dev_ptr;
3046 int t, disabledFlag;
3047
3048 dev_ptr = z90crypt.device_p[index];
3049
3050 /* remember device type; get rid of device struct */
3051 if (dev_ptr) {
3052 disabledFlag = dev_ptr->disabled;
3053 t = dev_ptr->dev_type;
Jesper Juhl17fd6822005-11-07 01:01:30 -08003054 kfree(dev_ptr->dev_resp_p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 kfree(dev_ptr);
3056 } else {
3057 disabledFlag = 0;
3058 t = -1;
3059 }
3060 z90crypt.device_p[index] = 0;
3061
3062 /* if the type is valid, remove the device from the type_mask */
3063 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
3064 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
3065 z90crypt.hdware_info->type_mask[t].st_count--;
3066 if (disabledFlag == 1)
3067 z90crypt.hdware_info->type_mask[t].disabled_count--;
3068 }
3069 if (z90crypt.mask.st_mask[index] != DEV_GONE) {
3070 z90crypt.mask.st_mask[index] = DEV_GONE;
3071 z90crypt.mask.st_count--;
3072 }
3073 z90crypt.hdware_info->device_type_array[index] = 0;
3074
3075 return 0;
3076}
3077
3078static void
3079destroy_z90crypt(void)
3080{
3081 int i;
Jesper Juhl17fd6822005-11-07 01:01:30 -08003082
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 for (i = 0; i < z90crypt.max_count; i++)
3084 if (z90crypt.device_p[i])
3085 destroy_crypto_device(i);
Jesper Juhl17fd6822005-11-07 01:01:30 -08003086 kfree(z90crypt.hdware_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087 memset((void *)&z90crypt, 0, sizeof(z90crypt));
3088}
3089
3090static unsigned char static_testmsg[384] = {
30910x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
30920x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
30930x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
30940x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
30950x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
30960x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
30970x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
30980x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
30990xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31000x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31010x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31020x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31030x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
31040x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
31050x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
31060x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
31070x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
31080x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
31090x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
31100x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
31110x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
31120xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
31130x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
31140x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3115};
3116
3117static int
3118probe_device_type(struct device *devPtr)
3119{
3120 int rv, dv, i, index, length;
3121 unsigned char psmid[8];
3122 static unsigned char loc_testmsg[sizeof(static_testmsg)];
3123
3124 index = devPtr->dev_self_x;
3125 rv = 0;
3126 do {
3127 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
3128 length = sizeof(static_testmsg) - 24;
3129 /* the -24 allows for the header */
3130 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3131 if (dv) {
3132 PDEBUG("dv returned by send during probe: %d\n", dv);
3133 if (dv == DEV_SEN_EXCEPTION) {
3134 rv = SEN_FATAL_ERROR;
3135 PRINTKC("exception in send to AP %d\n", index);
3136 break;
3137 }
3138 PDEBUG("return value from send_to_AP: %d\n", rv);
3139 switch (dv) {
3140 case DEV_GONE:
3141 PDEBUG("dev %d not available\n", index);
3142 rv = SEN_NOT_AVAIL;
3143 break;
3144 case DEV_ONLINE:
3145 rv = 0;
3146 break;
3147 case DEV_EMPTY:
3148 rv = SEN_NOT_AVAIL;
3149 break;
3150 case DEV_NO_WORK:
3151 rv = SEN_FATAL_ERROR;
3152 break;
3153 case DEV_BAD_MESSAGE:
3154 rv = SEN_USER_ERROR;
3155 break;
3156 case DEV_QUEUE_FULL:
3157 rv = SEN_QUEUE_FULL;
3158 break;
3159 default:
3160 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3161 rv = SEN_NOT_AVAIL;
3162 break;
3163 }
3164 }
3165
3166 if (rv)
3167 break;
3168
3169 for (i = 0; i < 6; i++) {
3170 mdelay(300);
3171 dv = receive_from_AP(index, z90crypt.cdx,
3172 devPtr->dev_resp_l,
3173 devPtr->dev_resp_p, psmid);
3174 PDEBUG("dv returned by DQ = %d\n", dv);
3175 if (dv == DEV_REC_EXCEPTION) {
3176 rv = REC_FATAL_ERROR;
3177 PRINTKC("exception in dequeue %d\n",
3178 index);
3179 break;
3180 }
3181 switch (dv) {
3182 case DEV_ONLINE:
3183 rv = 0;
3184 break;
3185 case DEV_EMPTY:
3186 rv = REC_EMPTY;
3187 break;
3188 case DEV_NO_WORK:
3189 rv = REC_NO_WORK;
3190 break;
3191 case DEV_BAD_MESSAGE:
3192 case DEV_GONE:
3193 default:
3194 rv = REC_NO_RESPONSE;
3195 break;
3196 }
3197 if ((rv != 0) && (rv != REC_NO_WORK))
3198 break;
3199 if (rv == 0)
3200 break;
3201 }
3202 if (rv)
3203 break;
3204 rv = (devPtr->dev_resp_p[0] == 0x00) &&
3205 (devPtr->dev_resp_p[1] == 0x86);
3206 if (rv)
3207 devPtr->dev_type = PCICC;
3208 else
3209 devPtr->dev_type = PCICA;
3210 rv = 0;
3211 } while (0);
3212 /* In a general error case, the card is not marked online */
3213 return rv;
3214}
3215
3216static unsigned char MCL3_testmsg[] = {
32170x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
32180x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32190x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32200x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32210x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
32220x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
32230x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
32240x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
32250x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32260x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32270x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32280x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32290x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32300x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32310x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32320x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32330x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32340x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32350x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32360x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32370x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
32380x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
32390x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
32400xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
32410x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
32420x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
32430x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
32440x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
32450x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
32460xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
32470xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
32480x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
32490x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
32500xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
32510x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
3252};
3253
3254static int
3255probe_PCIXCC_type(struct device *devPtr)
3256{
3257 int rv, dv, i, index, length;
3258 unsigned char psmid[8];
3259 static unsigned char loc_testmsg[548];
3260 struct CPRBX *cprbx_p;
3261
3262 index = devPtr->dev_self_x;
3263 rv = 0;
3264 do {
3265 memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
3266 length = sizeof(MCL3_testmsg) - 0x0C;
3267 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3268 if (dv) {
3269 PDEBUG("dv returned = %d\n", dv);
3270 if (dv == DEV_SEN_EXCEPTION) {
3271 rv = SEN_FATAL_ERROR;
3272 PRINTKC("exception in send to AP %d\n", index);
3273 break;
3274 }
3275 PDEBUG("return value from send_to_AP: %d\n", rv);
3276 switch (dv) {
3277 case DEV_GONE:
3278 PDEBUG("dev %d not available\n", index);
3279 rv = SEN_NOT_AVAIL;
3280 break;
3281 case DEV_ONLINE:
3282 rv = 0;
3283 break;
3284 case DEV_EMPTY:
3285 rv = SEN_NOT_AVAIL;
3286 break;
3287 case DEV_NO_WORK:
3288 rv = SEN_FATAL_ERROR;
3289 break;
3290 case DEV_BAD_MESSAGE:
3291 rv = SEN_USER_ERROR;
3292 break;
3293 case DEV_QUEUE_FULL:
3294 rv = SEN_QUEUE_FULL;
3295 break;
3296 default:
3297 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3298 rv = SEN_NOT_AVAIL;
3299 break;
3300 }
3301 }
3302
3303 if (rv)
3304 break;
3305
3306 for (i = 0; i < 6; i++) {
3307 mdelay(300);
3308 dv = receive_from_AP(index, z90crypt.cdx,
3309 devPtr->dev_resp_l,
3310 devPtr->dev_resp_p, psmid);
3311 PDEBUG("dv returned by DQ = %d\n", dv);
3312 if (dv == DEV_REC_EXCEPTION) {
3313 rv = REC_FATAL_ERROR;
3314 PRINTKC("exception in dequeue %d\n",
3315 index);
3316 break;
3317 }
3318 switch (dv) {
3319 case DEV_ONLINE:
3320 rv = 0;
3321 break;
3322 case DEV_EMPTY:
3323 rv = REC_EMPTY;
3324 break;
3325 case DEV_NO_WORK:
3326 rv = REC_NO_WORK;
3327 break;
3328 case DEV_BAD_MESSAGE:
3329 case DEV_GONE:
3330 default:
3331 rv = REC_NO_RESPONSE;
3332 break;
3333 }
3334 if ((rv != 0) && (rv != REC_NO_WORK))
3335 break;
3336 if (rv == 0)
3337 break;
3338 }
3339 if (rv)
3340 break;
3341 cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
3342 if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
3343 devPtr->dev_type = PCIXCC_MCL2;
3344 PDEBUG("device %d is MCL2\n", index);
3345 } else {
3346 devPtr->dev_type = PCIXCC_MCL3;
3347 PDEBUG("device %d is MCL3\n", index);
3348 }
3349 } while (0);
3350 /* In a general error case, the card is not marked online */
3351 return rv;
3352}
3353
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354module_init(z90crypt_init_module);
3355module_exit(z90crypt_cleanup_module);