blob: 223e1c23327888e62f75f29e67050f6ed0c371f7 [file] [log] [blame]
Greg Kroah-Hartman812141a2017-11-14 18:38:01 +01001// SPDX-License-Identifier: GPL-2.0+
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02002/*
Harald Freudenberger00fab232018-09-17 16:18:41 +02003 * Copyright IBM Corp. 2001, 2018
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02004 * Author(s): Robert Burroughs
5 * Eric Rossman (edrossma@us.ibm.com)
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
Holger Dengler5e55a482012-08-28 16:45:36 +020011 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
Harald Freudenberger00fab232018-09-17 16:18:41 +020012 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020013 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/miscdevice.h>
19#include <linux/fs.h>
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020020#include <linux/compat.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Arun Sharma600634972011-07-26 16:09:06 -070022#include <linux/atomic.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080023#include <linux/uaccess.h>
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +020024#include <linux/hw_random.h>
Holger Denglerdabecb22012-09-10 21:34:26 +020025#include <linux/debugfs.h>
Harald Freudenberger00fab232018-09-17 16:18:41 +020026#include <linux/cdev.h>
27#include <linux/ctype.h>
Holger Denglerdabecb22012-09-10 21:34:26 +020028#include <asm/debug.h>
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020029
Harald Freudenberger13b251b2016-11-25 18:04:56 +010030#define CREATE_TRACE_POINTS
31#include <asm/trace/zcrypt.h>
32
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020033#include "zcrypt_api.h"
Harald Freudenbergercccd85b2016-11-24 06:45:21 +010034#include "zcrypt_debug.h"
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020035
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +010036#include "zcrypt_msgtype6.h"
Ingo Tuchschererfc1d3f02016-08-25 11:11:30 +020037#include "zcrypt_msgtype50.h"
Harald Freudenbergerefc598e2019-06-11 11:16:56 +020038#include "zcrypt_ccamisc.h"
Harald Freudenberger7384eb72019-08-30 16:07:08 +020039#include "zcrypt_ep11misc.h"
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +010040
Felix Beck1749a812008-04-17 07:46:28 +020041/*
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020042 * Module description.
43 */
44MODULE_AUTHOR("IBM Corporation");
Holger Dengler5e55a482012-08-28 16:45:36 +020045MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
46 "Copyright IBM Corp. 2001, 2012");
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020047MODULE_LICENSE("GPL");
48
Harald Freudenberger13b251b2016-11-25 18:04:56 +010049/*
50 * zcrypt tracepoint functions
51 */
52EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
53EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
54
Ingo Tuchschererdb490cb2015-03-17 16:02:20 +010055static int zcrypt_hwrng_seed = 1;
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +020056module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440);
Ingo Tuchschererdb490cb2015-03-17 16:02:20 +010057MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
58
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +020059DEFINE_SPINLOCK(zcrypt_list_lock);
60LIST_HEAD(zcrypt_card_list);
61int zcrypt_device_count;
62
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020063static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
Holger Denglerdabecb22012-09-10 21:34:26 +020064static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
65
66atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
67EXPORT_SYMBOL(zcrypt_rescan_req);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020068
Holger Dengler5e55a482012-08-28 16:45:36 +020069static LIST_HEAD(zcrypt_ops_list);
70
Harald Freudenbergercccd85b2016-11-24 06:45:21 +010071/* Zcrypt related debug feature stuff. */
Harald Freudenbergercccd85b2016-11-24 06:45:21 +010072debug_info_t *zcrypt_dbf_info;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +020073
74/**
Holger Denglerdabecb22012-09-10 21:34:26 +020075 * Process a rescan of the transport layer.
76 *
77 * Returns 1, if the rescan has been processed, otherwise 0.
78 */
79static inline int zcrypt_process_rescan(void)
80{
81 if (atomic_read(&zcrypt_rescan_req)) {
82 atomic_set(&zcrypt_rescan_req, 0);
83 atomic_inc(&zcrypt_rescan_count);
84 ap_bus_force_rescan();
Harald Freudenberger792e0e02017-06-29 09:44:11 +020085 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n",
Harald Freudenbergercccd85b2016-11-24 06:45:21 +010086 atomic_inc_return(&zcrypt_rescan_count));
Holger Denglerdabecb22012-09-10 21:34:26 +020087 return 1;
88 }
89 return 0;
90}
91
Holger Dengler5e55a482012-08-28 16:45:36 +020092void zcrypt_msgtype_register(struct zcrypt_ops *zops)
93{
Sascha Silbe121a8682015-10-28 11:06:08 +010094 list_add_tail(&zops->list, &zcrypt_ops_list);
Holger Dengler5e55a482012-08-28 16:45:36 +020095}
Holger Dengler5e55a482012-08-28 16:45:36 +020096
97void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
98{
Holger Dengler5e55a482012-08-28 16:45:36 +020099 list_del_init(&zops->list);
Holger Dengler5e55a482012-08-28 16:45:36 +0200100}
Holger Dengler5e55a482012-08-28 16:45:36 +0200101
Martin Schwidefsky236fb2a2016-09-02 15:21:45 +0200102struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
Holger Dengler5e55a482012-08-28 16:45:36 +0200103{
104 struct zcrypt_ops *zops;
Holger Dengler5e55a482012-08-28 16:45:36 +0200105
Martin Schwidefsky236fb2a2016-09-02 15:21:45 +0200106 list_for_each_entry(zops, &zcrypt_ops_list, list)
Holger Dengler5e55a482012-08-28 16:45:36 +0200107 if ((zops->variant == variant) &&
Martin Schwidefsky236fb2a2016-09-02 15:21:45 +0200108 (!strncmp(zops->name, name, sizeof(zops->name))))
109 return zops;
110 return NULL;
Holger Dengler5e55a482012-08-28 16:45:36 +0200111}
Martin Schwidefsky236fb2a2016-09-02 15:21:45 +0200112EXPORT_SYMBOL(zcrypt_msgtype);
Holger Dengler5e55a482012-08-28 16:45:36 +0200113
Harald Freudenberger00fab232018-09-17 16:18:41 +0200114/*
115 * Multi device nodes extension functions.
116 */
117
118#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
119
120struct zcdn_device;
121
122static struct class *zcrypt_class;
123static dev_t zcrypt_devt;
124static struct cdev zcrypt_cdev;
125
126struct zcdn_device {
127 struct device device;
128 struct ap_perms perms;
129};
130
131#define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
132
133#define ZCDN_MAX_NAME 32
134
135static int zcdn_create(const char *name);
136static int zcdn_destroy(const char *name);
137
Harald Freudenberger00fab232018-09-17 16:18:41 +0200138/*
139 * Find zcdn device by name.
140 * Returns reference to the zcdn device which needs to be released
141 * with put_device() after use.
142 */
143static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
144{
Suzuki K Poulose6cda08a2019-07-23 23:18:32 +0100145 struct device *dev = class_find_device_by_name(zcrypt_class, name);
Harald Freudenberger00fab232018-09-17 16:18:41 +0200146
147 return dev ? to_zcdn_dev(dev) : NULL;
148}
149
150/*
151 * Find zcdn device by devt value.
152 * Returns reference to the zcdn device which needs to be released
153 * with put_device() after use.
154 */
155static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
156{
Suzuki K Poulose4495dfd2019-07-23 23:18:35 +0100157 struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
Harald Freudenberger00fab232018-09-17 16:18:41 +0200158
159 return dev ? to_zcdn_dev(dev) : NULL;
160}
161
162static ssize_t ioctlmask_show(struct device *dev,
163 struct device_attribute *attr,
164 char *buf)
165{
166 int i, rc;
167 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
168
169 if (mutex_lock_interruptible(&ap_perms_mutex))
170 return -ERESTARTSYS;
171
172 buf[0] = '0';
173 buf[1] = 'x';
174 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
175 snprintf(buf + 2 + 2 * i * sizeof(long),
176 PAGE_SIZE - 2 - 2 * i * sizeof(long),
177 "%016lx", zcdndev->perms.ioctlm[i]);
178 buf[2 + 2 * i * sizeof(long)] = '\n';
179 buf[2 + 2 * i * sizeof(long) + 1] = '\0';
180 rc = 2 + 2 * i * sizeof(long) + 1;
181
182 mutex_unlock(&ap_perms_mutex);
183
184 return rc;
185}
186
187static ssize_t ioctlmask_store(struct device *dev,
188 struct device_attribute *attr,
189 const char *buf, size_t count)
190{
191 int rc;
192 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
193
194 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
195 AP_IOCTLS, &ap_perms_mutex);
196 if (rc)
197 return rc;
198
199 return count;
200}
201
202static DEVICE_ATTR_RW(ioctlmask);
203
204static ssize_t apmask_show(struct device *dev,
205 struct device_attribute *attr,
206 char *buf)
207{
208 int i, rc;
209 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
210
211 if (mutex_lock_interruptible(&ap_perms_mutex))
212 return -ERESTARTSYS;
213
214 buf[0] = '0';
215 buf[1] = 'x';
216 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
217 snprintf(buf + 2 + 2 * i * sizeof(long),
218 PAGE_SIZE - 2 - 2 * i * sizeof(long),
219 "%016lx", zcdndev->perms.apm[i]);
220 buf[2 + 2 * i * sizeof(long)] = '\n';
221 buf[2 + 2 * i * sizeof(long) + 1] = '\0';
222 rc = 2 + 2 * i * sizeof(long) + 1;
223
224 mutex_unlock(&ap_perms_mutex);
225
226 return rc;
227}
228
229static ssize_t apmask_store(struct device *dev,
230 struct device_attribute *attr,
231 const char *buf, size_t count)
232{
233 int rc;
234 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
235
236 rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
237 AP_DEVICES, &ap_perms_mutex);
238 if (rc)
239 return rc;
240
241 return count;
242}
243
244static DEVICE_ATTR_RW(apmask);
245
246static ssize_t aqmask_show(struct device *dev,
247 struct device_attribute *attr,
248 char *buf)
249{
250 int i, rc;
251 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
252
253 if (mutex_lock_interruptible(&ap_perms_mutex))
254 return -ERESTARTSYS;
255
256 buf[0] = '0';
257 buf[1] = 'x';
258 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
259 snprintf(buf + 2 + 2 * i * sizeof(long),
260 PAGE_SIZE - 2 - 2 * i * sizeof(long),
261 "%016lx", zcdndev->perms.aqm[i]);
262 buf[2 + 2 * i * sizeof(long)] = '\n';
263 buf[2 + 2 * i * sizeof(long) + 1] = '\0';
264 rc = 2 + 2 * i * sizeof(long) + 1;
265
266 mutex_unlock(&ap_perms_mutex);
267
268 return rc;
269}
270
271static ssize_t aqmask_store(struct device *dev,
272 struct device_attribute *attr,
273 const char *buf, size_t count)
274{
275 int rc;
276 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
277
278 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
279 AP_DOMAINS, &ap_perms_mutex);
280 if (rc)
281 return rc;
282
283 return count;
284}
285
286static DEVICE_ATTR_RW(aqmask);
287
288static struct attribute *zcdn_dev_attrs[] = {
289 &dev_attr_ioctlmask.attr,
290 &dev_attr_apmask.attr,
291 &dev_attr_aqmask.attr,
292 NULL
293};
294
295static struct attribute_group zcdn_dev_attr_group = {
296 .attrs = zcdn_dev_attrs
297};
298
299static const struct attribute_group *zcdn_dev_attr_groups[] = {
300 &zcdn_dev_attr_group,
301 NULL
302};
303
304static ssize_t zcdn_create_store(struct class *class,
305 struct class_attribute *attr,
306 const char *buf, size_t count)
307{
308 int rc;
309 char name[ZCDN_MAX_NAME];
310
311 strncpy(name, skip_spaces(buf), sizeof(name));
312 name[sizeof(name) - 1] = '\0';
313
314 rc = zcdn_create(strim(name));
315
316 return rc ? rc : count;
317}
318
319static const struct class_attribute class_attr_zcdn_create =
320 __ATTR(create, 0600, NULL, zcdn_create_store);
321
322static ssize_t zcdn_destroy_store(struct class *class,
323 struct class_attribute *attr,
324 const char *buf, size_t count)
325{
326 int rc;
327 char name[ZCDN_MAX_NAME];
328
329 strncpy(name, skip_spaces(buf), sizeof(name));
330 name[sizeof(name) - 1] = '\0';
331
332 rc = zcdn_destroy(strim(name));
333
334 return rc ? rc : count;
335}
336
337static const struct class_attribute class_attr_zcdn_destroy =
338 __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
339
340static void zcdn_device_release(struct device *dev)
341{
342 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
343
344 ZCRYPT_DBF(DBF_INFO, "releasing zcdn device %d:%d\n",
345 MAJOR(dev->devt), MINOR(dev->devt));
346
347 kfree(zcdndev);
348}
349
350static int zcdn_create(const char *name)
351{
352 dev_t devt;
353 int i, rc = 0;
354 char nodename[ZCDN_MAX_NAME];
355 struct zcdn_device *zcdndev;
356
357 if (mutex_lock_interruptible(&ap_perms_mutex))
358 return -ERESTARTSYS;
359
360 /* check if device node with this name already exists */
361 if (name[0]) {
362 zcdndev = find_zcdndev_by_name(name);
363 if (zcdndev) {
364 put_device(&zcdndev->device);
365 rc = -EEXIST;
366 goto unlockout;
367 }
368 }
369
370 /* find an unused minor number */
371 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
372 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
373 zcdndev = find_zcdndev_by_devt(devt);
374 if (zcdndev)
375 put_device(&zcdndev->device);
376 else
377 break;
378 }
379 if (i == ZCRYPT_MAX_MINOR_NODES) {
380 rc = -ENOSPC;
381 goto unlockout;
382 }
383
384 /* alloc and prepare a new zcdn device */
385 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
386 if (!zcdndev) {
387 rc = -ENOMEM;
388 goto unlockout;
389 }
390 zcdndev->device.release = zcdn_device_release;
391 zcdndev->device.class = zcrypt_class;
392 zcdndev->device.devt = devt;
393 zcdndev->device.groups = zcdn_dev_attr_groups;
394 if (name[0])
395 strncpy(nodename, name, sizeof(nodename));
396 else
397 snprintf(nodename, sizeof(nodename),
398 ZCRYPT_NAME "_%d", (int) MINOR(devt));
399 nodename[sizeof(nodename)-1] = '\0';
400 if (dev_set_name(&zcdndev->device, nodename)) {
401 rc = -EINVAL;
402 goto unlockout;
403 }
404 rc = device_register(&zcdndev->device);
405 if (rc) {
406 put_device(&zcdndev->device);
407 goto unlockout;
408 }
409
410 ZCRYPT_DBF(DBF_INFO, "created zcdn device %d:%d\n",
411 MAJOR(devt), MINOR(devt));
412
413unlockout:
414 mutex_unlock(&ap_perms_mutex);
415 return rc;
416}
417
418static int zcdn_destroy(const char *name)
419{
420 int rc = 0;
421 struct zcdn_device *zcdndev;
422
423 if (mutex_lock_interruptible(&ap_perms_mutex))
424 return -ERESTARTSYS;
425
426 /* try to find this zcdn device */
427 zcdndev = find_zcdndev_by_name(name);
428 if (!zcdndev) {
429 rc = -ENOENT;
430 goto unlockout;
431 }
432
433 /*
434 * The zcdn device is not hard destroyed. It is subject to
435 * reference counting and thus just needs to be unregistered.
436 */
437 put_device(&zcdndev->device);
438 device_unregister(&zcdndev->device);
439
440unlockout:
441 mutex_unlock(&ap_perms_mutex);
442 return rc;
443}
444
445static void zcdn_destroy_all(void)
446{
447 int i;
448 dev_t devt;
449 struct zcdn_device *zcdndev;
450
451 mutex_lock(&ap_perms_mutex);
452 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
453 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
454 zcdndev = find_zcdndev_by_devt(devt);
455 if (zcdndev) {
456 put_device(&zcdndev->device);
457 device_unregister(&zcdndev->device);
458 }
459 }
460 mutex_unlock(&ap_perms_mutex);
461}
462
463#endif
464
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200465/**
Felix Beck1749a812008-04-17 07:46:28 +0200466 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
467 *
468 * This function is not supported beyond zcrypt 1.3.1.
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200469 */
470static ssize_t zcrypt_read(struct file *filp, char __user *buf,
471 size_t count, loff_t *f_pos)
472{
473 return -EPERM;
474}
475
476/**
Felix Beck1749a812008-04-17 07:46:28 +0200477 * zcrypt_write(): Not allowed.
478 *
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200479 * Write is is not allowed
480 */
481static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
482 size_t count, loff_t *f_pos)
483{
484 return -EPERM;
485}
486
487/**
Felix Beck1749a812008-04-17 07:46:28 +0200488 * zcrypt_open(): Count number of users.
489 *
490 * Device open function to count number of users.
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200491 */
492static int zcrypt_open(struct inode *inode, struct file *filp)
493{
Harald Freudenberger00fab232018-09-17 16:18:41 +0200494 struct ap_perms *perms = &ap_perms;
495
496#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
497 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
498 struct zcdn_device *zcdndev;
499
500 if (mutex_lock_interruptible(&ap_perms_mutex))
501 return -ERESTARTSYS;
502 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
503 /* find returns a reference, no get_device() needed */
504 mutex_unlock(&ap_perms_mutex);
505 if (zcdndev)
506 perms = &zcdndev->perms;
507 }
508#endif
509 filp->private_data = (void *) perms;
510
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200511 atomic_inc(&zcrypt_open_count);
Kirill Smelkovc5bf68f2019-03-26 23:51:19 +0300512 return stream_open(inode, filp);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200513}
514
Felix Beck1749a812008-04-17 07:46:28 +0200515/**
516 * zcrypt_release(): Count number of users.
517 *
518 * Device close function to count number of users.
519 */
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200520static int zcrypt_release(struct inode *inode, struct file *filp)
521{
Harald Freudenberger00fab232018-09-17 16:18:41 +0200522#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
523 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
524 struct zcdn_device *zcdndev;
525
Johan Hovold388bb192019-10-10 15:13:33 +0200526 mutex_lock(&ap_perms_mutex);
Harald Freudenberger00fab232018-09-17 16:18:41 +0200527 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
528 mutex_unlock(&ap_perms_mutex);
529 if (zcdndev) {
530 /* 2 puts here: one for find, one for open */
531 put_device(&zcdndev->device);
532 put_device(&zcdndev->device);
533 }
534 }
535#endif
536
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200537 atomic_dec(&zcrypt_open_count);
538 return 0;
539}
540
Harald Freudenberger00fab232018-09-17 16:18:41 +0200541static inline int zcrypt_check_ioctl(struct ap_perms *perms,
542 unsigned int cmd)
543{
544 int rc = -EPERM;
545 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
546
547 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
548 if (test_bit_inv(ioctlnr, perms->ioctlm))
549 rc = 0;
550 }
551
552 if (rc)
553 ZCRYPT_DBF(DBF_WARN,
554 "ioctl check failed: ioctlnr=0x%04x rc=%d\n",
555 ioctlnr, rc);
556
557 return rc;
558}
559
560static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
561{
562 return test_bit_inv(card, perms->apm) ? true : false;
563}
564
565static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
566{
567 return test_bit_inv(queue, perms->aqm) ? true : false;
568}
569
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200570static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
571 struct zcrypt_queue *zq,
Harald Freudenberger01396a32019-02-22 17:24:11 +0100572 struct module **pmod,
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200573 unsigned int weight)
574{
575 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
576 return NULL;
577 zcrypt_queue_get(zq);
578 get_device(&zq->queue->ap_dev.device);
579 atomic_add(weight, &zc->load);
580 atomic_add(weight, &zq->load);
581 zq->request_count++;
Harald Freudenberger01396a32019-02-22 17:24:11 +0100582 *pmod = zq->queue->ap_dev.drv->driver.owner;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200583 return zq;
584}
585
586static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
587 struct zcrypt_queue *zq,
Harald Freudenberger01396a32019-02-22 17:24:11 +0100588 struct module *mod,
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200589 unsigned int weight)
590{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200591 zq->request_count--;
592 atomic_sub(weight, &zc->load);
593 atomic_sub(weight, &zq->load);
594 put_device(&zq->queue->ap_dev.device);
595 zcrypt_queue_put(zq);
596 module_put(mod);
597}
598
Ingo Tuchscherere47de212016-10-14 14:34:51 +0200599static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
600 struct zcrypt_card *pref_zc,
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +0200601 unsigned int weight,
602 unsigned int pref_weight)
Ingo Tuchscherere47de212016-10-14 14:34:51 +0200603{
604 if (!pref_zc)
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200605 return true;
Ingo Tuchscherere47de212016-10-14 14:34:51 +0200606 weight += atomic_read(&zc->load);
607 pref_weight += atomic_read(&pref_zc->load);
608 if (weight == pref_weight)
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200609 return atomic64_read(&zc->card->total_request_count) <
Harald Freudenbergerfcd98d42019-12-20 16:02:54 +0100610 atomic64_read(&pref_zc->card->total_request_count);
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200611 return weight < pref_weight;
Ingo Tuchscherere47de212016-10-14 14:34:51 +0200612}
613
614static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
615 struct zcrypt_queue *pref_zq,
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +0200616 unsigned int weight,
617 unsigned int pref_weight)
Ingo Tuchscherere47de212016-10-14 14:34:51 +0200618{
619 if (!pref_zq)
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200620 return true;
Ingo Tuchscherere47de212016-10-14 14:34:51 +0200621 weight += atomic_read(&zq->load);
622 pref_weight += atomic_read(&pref_zq->load);
623 if (weight == pref_weight)
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200624 return zq->queue->total_request_count <
Harald Freudenberger0b0882672017-11-17 16:32:22 +0100625 pref_zq->queue->total_request_count;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200626 return weight < pref_weight;
Ingo Tuchscherere47de212016-10-14 14:34:51 +0200627}
628
Felix Beck1749a812008-04-17 07:46:28 +0200629/*
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200630 * zcrypt ioctls.
631 */
Harald Freudenberger00fab232018-09-17 16:18:41 +0200632static long zcrypt_rsa_modexpo(struct ap_perms *perms,
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200633 struct zcrypt_track *tr,
Harald Freudenberger00fab232018-09-17 16:18:41 +0200634 struct ica_rsa_modexpo *mex)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200635{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200636 struct zcrypt_card *zc, *pref_zc;
637 struct zcrypt_queue *zq, *pref_zq;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200638 unsigned int wgt = 0, pref_wgt = 0;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200639 unsigned int func_code;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200640 int cpen, qpen, qid = 0, rc = -ENODEV;
Harald Freudenberger01396a32019-02-22 17:24:11 +0100641 struct module *mod;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200642
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100643 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
644
645 if (mex->outputdatalength < mex->inputdatalength) {
Arnd Bergmann913140e2019-04-08 23:26:18 +0200646 func_code = 0;
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100647 rc = -EINVAL;
648 goto out;
649 }
650
Felix Beck1749a812008-04-17 07:46:28 +0200651 /*
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200652 * As long as outputdatalength is big enough, we can set the
653 * outputdatalength equal to the inputdatalength, since that is the
654 * number of bytes we will copy in any case
655 */
656 mex->outputdatalength = mex->inputdatalength;
657
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200658 rc = get_rsa_modex_fc(mex, &func_code);
659 if (rc)
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100660 goto out;
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200661
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200662 pref_zc = NULL;
663 pref_zq = NULL;
664 spin_lock(&zcrypt_list_lock);
665 for_each_zcrypt_card(zc) {
666 /* Check for online accelarator and CCA cards */
667 if (!zc->online || !(zc->card->functions & 0x18000000))
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200668 continue;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200669 /* Check for size limits */
670 if (zc->min_mod_size > mex->inputdatalength ||
671 zc->max_mod_size < mex->inputdatalength)
672 continue;
Harald Freudenberger00fab232018-09-17 16:18:41 +0200673 /* check if device node has admission for this card */
674 if (!zcrypt_check_card(perms, zc->card->id))
675 continue;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200676 /* get weight index of the card device */
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200677 wgt = zc->speed_rating[func_code];
678 /* penalty if this msg was previously sent via this card */
679 cpen = (tr && tr->again_counter && tr->last_qid &&
680 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
681 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
682 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200683 continue;
684 for_each_zcrypt_queue(zq, zc) {
685 /* check if device is online and eligible */
Harald Freudenberger14878422016-10-27 08:57:39 +0200686 if (!zq->online || !zq->ops->rsa_modexpo)
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200687 continue;
Harald Freudenberger00fab232018-09-17 16:18:41 +0200688 /* check if device node has admission for this queue */
689 if (!zcrypt_check_queue(perms,
690 AP_QID_QUEUE(zq->queue->qid)))
691 continue;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200692 /* penalty if the msg was previously sent at this qid */
693 qpen = (tr && tr->again_counter && tr->last_qid &&
694 tr->last_qid == zq->queue->qid) ?
695 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
696 if (!zcrypt_queue_compare(zq, pref_zq,
697 wgt + cpen + qpen, pref_wgt))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200698 continue;
699 pref_zc = zc;
700 pref_zq = zq;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200701 pref_wgt = wgt + cpen + qpen;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200702 }
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200703 }
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200704 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200705 spin_unlock(&zcrypt_list_lock);
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200706
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100707 if (!pref_zq) {
708 rc = -ENODEV;
709 goto out;
710 }
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200711
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100712 qid = pref_zq->queue->qid;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200713 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
714
715 spin_lock(&zcrypt_list_lock);
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200716 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200717 spin_unlock(&zcrypt_list_lock);
718
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100719out:
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200720 if (tr) {
721 tr->last_rc = rc;
722 tr->last_qid = qid;
723 }
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100724 trace_s390_zcrypt_rep(mex, func_code, rc,
725 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200726 return rc;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200727}
728
Harald Freudenberger00fab232018-09-17 16:18:41 +0200729static long zcrypt_rsa_crt(struct ap_perms *perms,
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200730 struct zcrypt_track *tr,
Harald Freudenberger00fab232018-09-17 16:18:41 +0200731 struct ica_rsa_modexpo_crt *crt)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200732{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200733 struct zcrypt_card *zc, *pref_zc;
734 struct zcrypt_queue *zq, *pref_zq;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200735 unsigned int wgt = 0, pref_wgt = 0;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200736 unsigned int func_code;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200737 int cpen, qpen, qid = 0, rc = -ENODEV;
Harald Freudenberger01396a32019-02-22 17:24:11 +0100738 struct module *mod;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200739
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100740 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
741
742 if (crt->outputdatalength < crt->inputdatalength) {
Arnd Bergmann913140e2019-04-08 23:26:18 +0200743 func_code = 0;
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100744 rc = -EINVAL;
745 goto out;
746 }
747
Felix Beck1749a812008-04-17 07:46:28 +0200748 /*
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200749 * As long as outputdatalength is big enough, we can set the
750 * outputdatalength equal to the inputdatalength, since that is the
751 * number of bytes we will copy in any case
752 */
753 crt->outputdatalength = crt->inputdatalength;
754
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200755 rc = get_rsa_crt_fc(crt, &func_code);
756 if (rc)
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100757 goto out;
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200758
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200759 pref_zc = NULL;
760 pref_zq = NULL;
761 spin_lock(&zcrypt_list_lock);
762 for_each_zcrypt_card(zc) {
763 /* Check for online accelarator and CCA cards */
764 if (!zc->online || !(zc->card->functions & 0x18000000))
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200765 continue;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200766 /* Check for size limits */
767 if (zc->min_mod_size > crt->inputdatalength ||
768 zc->max_mod_size < crt->inputdatalength)
769 continue;
Harald Freudenberger00fab232018-09-17 16:18:41 +0200770 /* check if device node has admission for this card */
771 if (!zcrypt_check_card(perms, zc->card->id))
772 continue;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200773 /* get weight index of the card device */
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200774 wgt = zc->speed_rating[func_code];
775 /* penalty if this msg was previously sent via this card */
776 cpen = (tr && tr->again_counter && tr->last_qid &&
777 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
778 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
779 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200780 continue;
781 for_each_zcrypt_queue(zq, zc) {
782 /* check if device is online and eligible */
Harald Freudenberger14878422016-10-27 08:57:39 +0200783 if (!zq->online || !zq->ops->rsa_modexpo_crt)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200784 continue;
Harald Freudenberger00fab232018-09-17 16:18:41 +0200785 /* check if device node has admission for this queue */
786 if (!zcrypt_check_queue(perms,
787 AP_QID_QUEUE(zq->queue->qid)))
788 continue;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200789 /* penalty if the msg was previously sent at this qid */
790 qpen = (tr && tr->again_counter && tr->last_qid &&
791 tr->last_qid == zq->queue->qid) ?
792 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
793 if (!zcrypt_queue_compare(zq, pref_zq,
794 wgt + cpen + qpen, pref_wgt))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200795 continue;
796 pref_zc = zc;
797 pref_zq = zq;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200798 pref_wgt = wgt + cpen + qpen;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200799 }
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200800 }
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200801 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200802 spin_unlock(&zcrypt_list_lock);
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200803
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100804 if (!pref_zq) {
805 rc = -ENODEV;
806 goto out;
807 }
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200808
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100809 qid = pref_zq->queue->qid;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200810 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
811
812 spin_lock(&zcrypt_list_lock);
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200813 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200814 spin_unlock(&zcrypt_list_lock);
815
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100816out:
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200817 if (tr) {
818 tr->last_rc = rc;
819 tr->last_qid = qid;
820 }
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100821 trace_s390_zcrypt_rep(crt, func_code, rc,
822 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200823 return rc;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +0200824}
825
Harald Freudenberger52f72fe2020-09-15 17:01:58 +0200826static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200827 struct zcrypt_track *tr,
Harald Freudenberger00fab232018-09-17 16:18:41 +0200828 struct ica_xcRB *xcRB)
Ralph Wuerthner54321142006-09-20 15:58:36 +0200829{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200830 struct zcrypt_card *zc, *pref_zc;
831 struct zcrypt_queue *zq, *pref_zq;
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200832 struct ap_message ap_msg;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200833 unsigned int wgt = 0, pref_wgt = 0;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200834 unsigned int func_code;
Harald Freudenberger7379e652019-05-21 13:50:09 +0200835 unsigned short *domain, tdom;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200836 int cpen, qpen, qid = 0, rc = -ENODEV;
Harald Freudenberger01396a32019-02-22 17:24:11 +0100837 struct module *mod;
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100838
839 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200840
Harald Freudenbergera17b92e2018-10-04 15:37:49 +0200841 xcRB->status = 0;
Harald Freudenberger89a0c0e2018-05-28 13:00:42 +0200842 ap_init_message(&ap_msg);
Harald Freudenberger52f72fe2020-09-15 17:01:58 +0200843 rc = get_cprb_fc(userspace, xcRB, &ap_msg, &func_code, &domain);
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200844 if (rc)
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100845 goto out;
Ralph Wuerthner54321142006-09-20 15:58:36 +0200846
Harald Freudenberger7379e652019-05-21 13:50:09 +0200847 /*
848 * If a valid target domain is set and this domain is NOT a usage
849 * domain but a control only domain, use the default domain as target.
850 */
851 tdom = *domain;
Harald Freudenberger47c07bf2020-06-30 09:54:50 +0200852 if (tdom < AP_DOMAINS &&
Harald Freudenberger7379e652019-05-21 13:50:09 +0200853 !ap_test_config_usage_domain(tdom) &&
854 ap_test_config_ctrl_domain(tdom) &&
855 ap_domain_index >= 0)
856 tdom = ap_domain_index;
857
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200858 pref_zc = NULL;
859 pref_zq = NULL;
860 spin_lock(&zcrypt_list_lock);
861 for_each_zcrypt_card(zc) {
862 /* Check for online CCA cards */
863 if (!zc->online || !(zc->card->functions & 0x10000000))
Ralph Wuerthner54321142006-09-20 15:58:36 +0200864 continue;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200865 /* Check for user selected CCA card */
866 if (xcRB->user_defined != AUTOSELECT &&
867 xcRB->user_defined != zc->card->id)
868 continue;
Harald Freudenberger00fab232018-09-17 16:18:41 +0200869 /* check if device node has admission for this card */
870 if (!zcrypt_check_card(perms, zc->card->id))
871 continue;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200872 /* get weight index of the card device */
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200873 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
874 /* penalty if this msg was previously sent via this card */
875 cpen = (tr && tr->again_counter && tr->last_qid &&
876 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
877 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
878 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200879 continue;
880 for_each_zcrypt_queue(zq, zc) {
881 /* check if device is online and eligible */
882 if (!zq->online ||
Harald Freudenberger14878422016-10-27 08:57:39 +0200883 !zq->ops->send_cprb ||
Harald Freudenberger8f291eb2019-08-30 15:17:27 +0200884 (tdom != AUTOSEL_DOM &&
Harald Freudenberger7379e652019-05-21 13:50:09 +0200885 tdom != AP_QID_QUEUE(zq->queue->qid)))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200886 continue;
Harald Freudenberger00fab232018-09-17 16:18:41 +0200887 /* check if device node has admission for this queue */
888 if (!zcrypt_check_queue(perms,
889 AP_QID_QUEUE(zq->queue->qid)))
890 continue;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200891 /* penalty if the msg was previously sent at this qid */
892 qpen = (tr && tr->again_counter && tr->last_qid &&
893 tr->last_qid == zq->queue->qid) ?
894 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
895 if (!zcrypt_queue_compare(zq, pref_zq,
896 wgt + cpen + qpen, pref_wgt))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200897 continue;
898 pref_zc = zc;
899 pref_zq = zq;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200900 pref_wgt = wgt + cpen + qpen;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200901 }
902 }
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200903 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200904 spin_unlock(&zcrypt_list_lock);
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200905
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100906 if (!pref_zq) {
907 rc = -ENODEV;
908 goto out;
909 }
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200910
911 /* in case of auto select, provide the correct domain */
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100912 qid = pref_zq->queue->qid;
Harald Freudenberger8f291eb2019-08-30 15:17:27 +0200913 if (*domain == AUTOSEL_DOM)
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100914 *domain = AP_QID_QUEUE(qid);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200915
Harald Freudenberger52f72fe2020-09-15 17:01:58 +0200916 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200917
918 spin_lock(&zcrypt_list_lock);
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200919 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200920 spin_unlock(&zcrypt_list_lock);
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100921
922out:
Harald Freudenberger89a0c0e2018-05-28 13:00:42 +0200923 ap_release_message(&ap_msg);
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200924 if (tr) {
925 tr->last_rc = rc;
926 tr->last_qid = qid;
927 }
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100928 trace_s390_zcrypt_rep(xcRB, func_code, rc,
929 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200930 return rc;
Ralph Wuerthner54321142006-09-20 15:58:36 +0200931}
Harald Freudenberger00fab232018-09-17 16:18:41 +0200932
933long zcrypt_send_cprb(struct ica_xcRB *xcRB)
934{
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200935 return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB);
Harald Freudenberger00fab232018-09-17 16:18:41 +0200936}
Harald Freudenbergera1d001e2016-11-02 14:32:32 +0100937EXPORT_SYMBOL(zcrypt_send_cprb);
Ralph Wuerthner54321142006-09-20 15:58:36 +0200938
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200939static bool is_desired_ep11_card(unsigned int dev_id,
940 unsigned short target_num,
941 struct ep11_target_dev *targets)
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +0100942{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200943 while (target_num-- > 0) {
Harald Freudenberger8f291eb2019-08-30 15:17:27 +0200944 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +0100945 return true;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200946 targets++;
947 }
948 return false;
949}
950
951static bool is_desired_ep11_queue(unsigned int dev_qid,
952 unsigned short target_num,
953 struct ep11_target_dev *targets)
954{
Harald Freudenberger8f291eb2019-08-30 15:17:27 +0200955 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
956
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200957 while (target_num-- > 0) {
Harald Freudenberger8f291eb2019-08-30 15:17:27 +0200958 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
959 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200960 return true;
961 targets++;
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +0100962 }
963 return false;
964}
965
Harald Freudenberger52f72fe2020-09-15 17:01:58 +0200966static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200967 struct zcrypt_track *tr,
Harald Freudenbergera7367992019-08-30 15:57:17 +0200968 struct ep11_urb *xcrb)
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +0100969{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200970 struct zcrypt_card *zc, *pref_zc;
971 struct zcrypt_queue *zq, *pref_zq;
972 struct ep11_target_dev *targets;
973 unsigned short target_num;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200974 unsigned int wgt = 0, pref_wgt = 0;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200975 unsigned int func_code;
Ingo Tuchscherer34a15162016-08-25 11:14:15 +0200976 struct ap_message ap_msg;
Harald Freudenberger91ffc512020-07-02 11:10:11 +0200977 int cpen, qpen, qid = 0, rc = -ENODEV;
Harald Freudenberger01396a32019-02-22 17:24:11 +0100978 struct module *mod;
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100979
980 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +0100981
Harald Freudenberger89a0c0e2018-05-28 13:00:42 +0200982 ap_init_message(&ap_msg);
983
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200984 target_num = (unsigned short) xcrb->targets_num;
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +0100985
986 /* empty list indicates autoselect (all available targets) */
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200987 targets = NULL;
988 if (target_num != 0) {
989 struct ep11_target_dev __user *uptr;
990
991 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100992 if (!targets) {
Arnd Bergmann913140e2019-04-08 23:26:18 +0200993 func_code = 0;
Harald Freudenberger13b251b2016-11-25 18:04:56 +0100994 rc = -ENOMEM;
995 goto out;
996 }
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +0100997
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +0200998 uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
Harald Freudenberger52f72fe2020-09-15 17:01:58 +0200999 if (z_copy_from_user(userspace, targets, uptr,
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001000 target_num * sizeof(*targets))) {
Arnd Bergmann913140e2019-04-08 23:26:18 +02001001 func_code = 0;
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001002 rc = -EFAULT;
Harald Freudenberger89a0c0e2018-05-28 13:00:42 +02001003 goto out_free;
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001004 }
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +01001005 }
1006
Harald Freudenberger52f72fe2020-09-15 17:01:58 +02001007 rc = get_ep11cprb_fc(userspace, xcrb, &ap_msg, &func_code);
Ingo Tuchscherer34a15162016-08-25 11:14:15 +02001008 if (rc)
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001009 goto out_free;
Ingo Tuchscherer34a15162016-08-25 11:14:15 +02001010
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001011 pref_zc = NULL;
1012 pref_zq = NULL;
1013 spin_lock(&zcrypt_list_lock);
1014 for_each_zcrypt_card(zc) {
1015 /* Check for online EP11 cards */
1016 if (!zc->online || !(zc->card->functions & 0x04000000))
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +01001017 continue;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001018 /* Check for user selected EP11 card */
1019 if (targets &&
1020 !is_desired_ep11_card(zc->card->id, target_num, targets))
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +01001021 continue;
Harald Freudenberger00fab232018-09-17 16:18:41 +02001022 /* check if device node has admission for this card */
1023 if (!zcrypt_check_card(perms, zc->card->id))
1024 continue;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001025 /* get weight index of the card device */
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001026 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
1027 /* penalty if this msg was previously sent via this card */
1028 cpen = (tr && tr->again_counter && tr->last_qid &&
1029 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
1030 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
1031 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001032 continue;
1033 for_each_zcrypt_queue(zq, zc) {
1034 /* check if device is online and eligible */
1035 if (!zq->online ||
Harald Freudenberger14878422016-10-27 08:57:39 +02001036 !zq->ops->send_ep11_cprb ||
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001037 (targets &&
1038 !is_desired_ep11_queue(zq->queue->qid,
1039 target_num, targets)))
1040 continue;
Harald Freudenberger00fab232018-09-17 16:18:41 +02001041 /* check if device node has admission for this queue */
1042 if (!zcrypt_check_queue(perms,
1043 AP_QID_QUEUE(zq->queue->qid)))
1044 continue;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001045 /* penalty if the msg was previously sent at this qid */
1046 qpen = (tr && tr->again_counter && tr->last_qid &&
1047 tr->last_qid == zq->queue->qid) ?
1048 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
1049 if (!zcrypt_queue_compare(zq, pref_zq,
1050 wgt + cpen + qpen, pref_wgt))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001051 continue;
1052 pref_zc = zc;
1053 pref_zq = zq;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001054 pref_wgt = wgt + cpen + qpen;
Ingo Tuchscherer34a15162016-08-25 11:14:15 +02001055 }
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +01001056 }
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001057 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001058 spin_unlock(&zcrypt_list_lock);
1059
1060 if (!pref_zq) {
1061 rc = -ENODEV;
1062 goto out_free;
Ingo Tuchscherer34a15162016-08-25 11:14:15 +02001063 }
1064
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001065 qid = pref_zq->queue->qid;
Harald Freudenberger52f72fe2020-09-15 17:01:58 +02001066 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001067
1068 spin_lock(&zcrypt_list_lock);
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001069 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001070 spin_unlock(&zcrypt_list_lock);
1071
1072out_free:
1073 kfree(targets);
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001074out:
Harald Freudenberger89a0c0e2018-05-28 13:00:42 +02001075 ap_release_message(&ap_msg);
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001076 if (tr) {
1077 tr->last_rc = rc;
1078 tr->last_qid = qid;
1079 }
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001080 trace_s390_zcrypt_rep(xcrb, func_code, rc,
1081 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
Ingo Tuchscherer34a15162016-08-25 11:14:15 +02001082 return rc;
Ingo Tuchscherer91f3e3ea2013-11-20 10:47:13 +01001083}
1084
Harald Freudenbergera7367992019-08-30 15:57:17 +02001085long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
1086{
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001087 return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
Harald Freudenbergera7367992019-08-30 15:57:17 +02001088}
1089EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
1090
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001091static long zcrypt_rng(char *buffer)
1092{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001093 struct zcrypt_card *zc, *pref_zc;
1094 struct zcrypt_queue *zq, *pref_zq;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001095 unsigned int wgt = 0, pref_wgt = 0;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001096 unsigned int func_code;
Ingo Tuchscherer34a15162016-08-25 11:14:15 +02001097 struct ap_message ap_msg;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001098 unsigned int domain;
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001099 int qid = 0, rc = -ENODEV;
Harald Freudenberger01396a32019-02-22 17:24:11 +01001100 struct module *mod;
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001101
1102 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001103
Harald Freudenberger89a0c0e2018-05-28 13:00:42 +02001104 ap_init_message(&ap_msg);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001105 rc = get_rng_fc(&ap_msg, &func_code, &domain);
Ingo Tuchscherer34a15162016-08-25 11:14:15 +02001106 if (rc)
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001107 goto out;
Ingo Tuchscherer34a15162016-08-25 11:14:15 +02001108
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001109 pref_zc = NULL;
1110 pref_zq = NULL;
1111 spin_lock(&zcrypt_list_lock);
1112 for_each_zcrypt_card(zc) {
1113 /* Check for online CCA cards */
1114 if (!zc->online || !(zc->card->functions & 0x10000000))
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001115 continue;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001116 /* get weight index of the card device */
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001117 wgt = zc->speed_rating[func_code];
1118 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001119 continue;
1120 for_each_zcrypt_queue(zq, zc) {
1121 /* check if device is online and eligible */
Harald Freudenberger14878422016-10-27 08:57:39 +02001122 if (!zq->online || !zq->ops->rng)
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001123 continue;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001124 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001125 continue;
1126 pref_zc = zc;
1127 pref_zq = zq;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001128 pref_wgt = wgt;
Ingo Tuchscherer34a15162016-08-25 11:14:15 +02001129 }
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001130 }
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001131 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001132 spin_unlock(&zcrypt_list_lock);
1133
Harald Freudenberger89a0c0e2018-05-28 13:00:42 +02001134 if (!pref_zq) {
1135 rc = -ENODEV;
1136 goto out;
1137 }
Ingo Tuchscherer34a15162016-08-25 11:14:15 +02001138
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001139 qid = pref_zq->queue->qid;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001140 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1141
1142 spin_lock(&zcrypt_list_lock);
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001143 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001144 spin_unlock(&zcrypt_list_lock);
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001145
1146out:
Harald Freudenberger89a0c0e2018-05-28 13:00:42 +02001147 ap_release_message(&ap_msg);
Harald Freudenberger13b251b2016-11-25 18:04:56 +01001148 trace_s390_zcrypt_rep(buffer, func_code, rc,
1149 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
Ingo Tuchscherer34a15162016-08-25 11:14:15 +02001150 return rc;
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001151}
1152
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001153static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
Ingo Tuchschererb886a9d2016-08-25 11:19:58 +02001154{
1155 struct zcrypt_card *zc;
1156 struct zcrypt_queue *zq;
1157 struct zcrypt_device_status *stat;
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001158 int card, queue;
Ingo Tuchschererb886a9d2016-08-25 11:19:58 +02001159
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001160 memset(devstatus, 0, MAX_ZDEV_ENTRIES
1161 * sizeof(struct zcrypt_device_status));
1162
Ingo Tuchschererb886a9d2016-08-25 11:19:58 +02001163 spin_lock(&zcrypt_list_lock);
1164 for_each_zcrypt_card(zc) {
1165 for_each_zcrypt_queue(zq, zc) {
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001166 card = AP_QID_CARD(zq->queue->qid);
1167 if (card >= MAX_ZDEV_CARDIDS)
1168 continue;
1169 queue = AP_QID_QUEUE(zq->queue->qid);
1170 stat = &devstatus[card * AP_DOMAINS + queue];
Ingo Tuchschererb886a9d2016-08-25 11:19:58 +02001171 stat->hwtype = zc->card->ap_dev.device_type;
1172 stat->functions = zc->card->functions >> 26;
1173 stat->qid = zq->queue->qid;
1174 stat->online = zq->online ? 0x01 : 0x00;
1175 }
1176 }
1177 spin_unlock(&zcrypt_list_lock);
1178}
Ingo Tuchschererb886a9d2016-08-25 11:19:58 +02001179
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001180void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001181{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001182 struct zcrypt_card *zc;
1183 struct zcrypt_queue *zq;
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001184 struct zcrypt_device_status_ext *stat;
1185 int card, queue;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001186
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001187 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
1188 * sizeof(struct zcrypt_device_status_ext));
1189
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001190 spin_lock(&zcrypt_list_lock);
1191 for_each_zcrypt_card(zc) {
1192 for_each_zcrypt_queue(zq, zc) {
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001193 card = AP_QID_CARD(zq->queue->qid);
1194 queue = AP_QID_QUEUE(zq->queue->qid);
1195 stat = &devstatus[card * AP_DOMAINS + queue];
1196 stat->hwtype = zc->card->ap_dev.device_type;
1197 stat->functions = zc->card->functions >> 26;
1198 stat->qid = zq->queue->qid;
1199 stat->online = zq->online ? 0x01 : 0x00;
1200 }
1201 }
1202 spin_unlock(&zcrypt_list_lock);
1203}
1204EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
1205
Harald Freudenberger4da57a22019-06-18 15:53:12 +02001206int zcrypt_device_status_ext(int card, int queue,
1207 struct zcrypt_device_status_ext *devstat)
1208{
1209 struct zcrypt_card *zc;
1210 struct zcrypt_queue *zq;
1211
1212 memset(devstat, 0, sizeof(*devstat));
1213
1214 spin_lock(&zcrypt_list_lock);
1215 for_each_zcrypt_card(zc) {
1216 for_each_zcrypt_queue(zq, zc) {
1217 if (card == AP_QID_CARD(zq->queue->qid) &&
1218 queue == AP_QID_QUEUE(zq->queue->qid)) {
1219 devstat->hwtype = zc->card->ap_dev.device_type;
1220 devstat->functions = zc->card->functions >> 26;
1221 devstat->qid = zq->queue->qid;
1222 devstat->online = zq->online ? 0x01 : 0x00;
1223 spin_unlock(&zcrypt_list_lock);
1224 return 0;
1225 }
1226 }
1227 }
1228 spin_unlock(&zcrypt_list_lock);
1229
1230 return -ENODEV;
1231}
1232EXPORT_SYMBOL(zcrypt_device_status_ext);
1233
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001234static void zcrypt_status_mask(char status[], size_t max_adapters)
1235{
1236 struct zcrypt_card *zc;
1237 struct zcrypt_queue *zq;
1238 int card;
1239
1240 memset(status, 0, max_adapters);
1241 spin_lock(&zcrypt_list_lock);
1242 for_each_zcrypt_card(zc) {
1243 for_each_zcrypt_queue(zq, zc) {
1244 card = AP_QID_CARD(zq->queue->qid);
1245 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
1246 || card >= max_adapters)
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001247 continue;
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001248 status[card] = zc->online ? zc->user_space_type : 0x0d;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001249 }
1250 }
1251 spin_unlock(&zcrypt_list_lock);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001252}
1253
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001254static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001255{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001256 struct zcrypt_card *zc;
1257 struct zcrypt_queue *zq;
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001258 int card;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001259
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001260 memset(qdepth, 0, max_adapters);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001261 spin_lock(&zcrypt_list_lock);
Harald Freudenberger7fbe5c02017-01-16 09:43:29 +01001262 local_bh_disable();
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001263 for_each_zcrypt_card(zc) {
1264 for_each_zcrypt_queue(zq, zc) {
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001265 card = AP_QID_CARD(zq->queue->qid);
1266 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
1267 || card >= max_adapters)
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001268 continue;
1269 spin_lock(&zq->queue->lock);
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001270 qdepth[card] =
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001271 zq->queue->pendingq_count +
1272 zq->queue->requestq_count;
1273 spin_unlock(&zq->queue->lock);
1274 }
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001275 }
Harald Freudenberger7fbe5c02017-01-16 09:43:29 +01001276 local_bh_enable();
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001277 spin_unlock(&zcrypt_list_lock);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001278}
1279
Harald Freudenbergerfcd98d42019-12-20 16:02:54 +01001280static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001281{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001282 struct zcrypt_card *zc;
1283 struct zcrypt_queue *zq;
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001284 int card;
Harald Freudenbergerfcd98d42019-12-20 16:02:54 +01001285 u64 cnt;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001286
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001287 memset(reqcnt, 0, sizeof(int) * max_adapters);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001288 spin_lock(&zcrypt_list_lock);
Harald Freudenberger7fbe5c02017-01-16 09:43:29 +01001289 local_bh_disable();
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001290 for_each_zcrypt_card(zc) {
1291 for_each_zcrypt_queue(zq, zc) {
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001292 card = AP_QID_CARD(zq->queue->qid);
1293 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
1294 || card >= max_adapters)
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001295 continue;
1296 spin_lock(&zq->queue->lock);
Harald Freudenbergerfcd98d42019-12-20 16:02:54 +01001297 cnt = zq->queue->total_request_count;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001298 spin_unlock(&zq->queue->lock);
Harald Freudenbergerfcd98d42019-12-20 16:02:54 +01001299 reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001300 }
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001301 }
Harald Freudenberger7fbe5c02017-01-16 09:43:29 +01001302 local_bh_enable();
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001303 spin_unlock(&zcrypt_list_lock);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001304}
1305
1306static int zcrypt_pendingq_count(void)
1307{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001308 struct zcrypt_card *zc;
1309 struct zcrypt_queue *zq;
1310 int pendingq_count;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001311
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001312 pendingq_count = 0;
1313 spin_lock(&zcrypt_list_lock);
Harald Freudenberger7fbe5c02017-01-16 09:43:29 +01001314 local_bh_disable();
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001315 for_each_zcrypt_card(zc) {
1316 for_each_zcrypt_queue(zq, zc) {
1317 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1318 continue;
1319 spin_lock(&zq->queue->lock);
1320 pendingq_count += zq->queue->pendingq_count;
1321 spin_unlock(&zq->queue->lock);
1322 }
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001323 }
Harald Freudenberger7fbe5c02017-01-16 09:43:29 +01001324 local_bh_enable();
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001325 spin_unlock(&zcrypt_list_lock);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001326 return pendingq_count;
1327}
1328
1329static int zcrypt_requestq_count(void)
1330{
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001331 struct zcrypt_card *zc;
1332 struct zcrypt_queue *zq;
1333 int requestq_count;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001334
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001335 requestq_count = 0;
1336 spin_lock(&zcrypt_list_lock);
Harald Freudenberger7fbe5c02017-01-16 09:43:29 +01001337 local_bh_disable();
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001338 for_each_zcrypt_card(zc) {
1339 for_each_zcrypt_queue(zq, zc) {
1340 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1341 continue;
1342 spin_lock(&zq->queue->lock);
1343 requestq_count += zq->queue->requestq_count;
1344 spin_unlock(&zq->queue->lock);
1345 }
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001346 }
Harald Freudenberger7fbe5c02017-01-16 09:43:29 +01001347 local_bh_enable();
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001348 spin_unlock(&zcrypt_list_lock);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001349 return requestq_count;
1350}
1351
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001352static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
1353{
1354 int rc;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001355 struct zcrypt_track tr;
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001356 struct ica_rsa_modexpo mex;
1357 struct ica_rsa_modexpo __user *umex = (void __user *) arg;
1358
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001359 memset(&tr, 0, sizeof(tr));
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001360 if (copy_from_user(&mex, umex, sizeof(mex)))
1361 return -EFAULT;
1362 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001363 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1364 if (rc == -EAGAIN)
1365 tr.again_counter++;
1366 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001367 /* on failure: retry once again after a requested rescan */
1368 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1369 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001370 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1371 if (rc == -EAGAIN)
1372 tr.again_counter++;
1373 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001374 if (rc) {
1375 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
1376 return rc;
1377 }
1378 return put_user(mex.outputdatalength, &umex->outputdatalength);
1379}
1380
1381static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
1382{
1383 int rc;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001384 struct zcrypt_track tr;
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001385 struct ica_rsa_modexpo_crt crt;
1386 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
1387
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001388 memset(&tr, 0, sizeof(tr));
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001389 if (copy_from_user(&crt, ucrt, sizeof(crt)))
1390 return -EFAULT;
1391 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001392 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1393 if (rc == -EAGAIN)
1394 tr.again_counter++;
1395 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001396 /* on failure: retry once again after a requested rescan */
1397 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1398 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001399 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1400 if (rc == -EAGAIN)
1401 tr.again_counter++;
1402 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001403 if (rc) {
1404 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
1405 return rc;
1406 }
1407 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
1408}
1409
1410static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
1411{
1412 int rc;
1413 struct ica_xcRB xcRB;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001414 struct zcrypt_track tr;
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001415 struct ica_xcRB __user *uxcRB = (void __user *) arg;
1416
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001417 memset(&tr, 0, sizeof(tr));
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001418 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
1419 return -EFAULT;
1420 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001421 rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
1422 if (rc == -EAGAIN)
1423 tr.again_counter++;
1424 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001425 /* on failure: retry once again after a requested rescan */
1426 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1427 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001428 rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
1429 if (rc == -EAGAIN)
1430 tr.again_counter++;
1431 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001432 if (rc)
1433 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
1434 rc, xcRB.status);
1435 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
1436 return -EFAULT;
1437 return rc;
1438}
1439
1440static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
1441{
1442 int rc;
1443 struct ep11_urb xcrb;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001444 struct zcrypt_track tr;
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001445 struct ep11_urb __user *uxcrb = (void __user *)arg;
1446
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001447 memset(&tr, 0, sizeof(tr));
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001448 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1449 return -EFAULT;
1450 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001451 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1452 if (rc == -EAGAIN)
1453 tr.again_counter++;
1454 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001455 /* on failure: retry once again after a requested rescan */
1456 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1457 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001458 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1459 if (rc == -EAGAIN)
1460 tr.again_counter++;
1461 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001462 if (rc)
1463 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
1464 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1465 return -EFAULT;
1466 return rc;
1467}
1468
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001469static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1470 unsigned long arg)
1471{
Harald Freudenberger00fab232018-09-17 16:18:41 +02001472 int rc;
1473 struct ap_perms *perms =
1474 (struct ap_perms *) filp->private_data;
1475
1476 rc = zcrypt_check_ioctl(perms, cmd);
1477 if (rc)
1478 return rc;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001479
1480 switch (cmd) {
Harald Freudenberger7e202ac2020-05-20 16:07:19 +02001481 case ICARSAMODEXPO:
1482 return icarsamodexpo_ioctl(perms, arg);
1483 case ICARSACRT:
1484 return icarsacrt_ioctl(perms, arg);
1485 case ZSECSENDCPRB:
1486 return zsecsendcprb_ioctl(perms, arg);
1487 case ZSENDEP11CPRB:
1488 return zsendep11cprb_ioctl(perms, arg);
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001489 case ZCRYPT_DEVICE_STATUS: {
1490 struct zcrypt_device_status_ext *device_status;
1491 size_t total_size = MAX_ZDEV_ENTRIES_EXT
1492 * sizeof(struct zcrypt_device_status_ext);
Ingo Tuchschererb886a9d2016-08-25 11:19:58 +02001493
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001494 device_status = kzalloc(total_size, GFP_KERNEL);
Ingo Tuchschererb886a9d2016-08-25 11:19:58 +02001495 if (!device_status)
1496 return -ENOMEM;
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001497 zcrypt_device_status_mask_ext(device_status);
Ingo Tuchschererb886a9d2016-08-25 11:19:58 +02001498 if (copy_to_user((char __user *) arg, device_status,
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001499 total_size))
1500 rc = -EFAULT;
Ingo Tuchschererb886a9d2016-08-25 11:19:58 +02001501 kfree(device_status);
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001502 return rc;
Ingo Tuchschererb886a9d2016-08-25 11:19:58 +02001503 }
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001504 case ZCRYPT_STATUS_MASK: {
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001505 char status[AP_DEVICES];
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001506
1507 zcrypt_status_mask(status, AP_DEVICES);
1508 if (copy_to_user((char __user *) arg, status, sizeof(status)))
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001509 return -EFAULT;
1510 return 0;
1511 }
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001512 case ZCRYPT_QDEPTH_MASK: {
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001513 char qdepth[AP_DEVICES];
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001514
1515 zcrypt_qdepth_mask(qdepth, AP_DEVICES);
1516 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001517 return -EFAULT;
1518 return 0;
1519 }
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001520 case ZCRYPT_PERDEV_REQCNT: {
Harald Freudenbergerfcd98d42019-12-20 16:02:54 +01001521 u32 *reqcnt;
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001522
Harald Freudenbergerfcd98d42019-12-20 16:02:54 +01001523 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001524 if (!reqcnt)
1525 return -ENOMEM;
1526 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1527 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
1528 rc = -EFAULT;
1529 kfree(reqcnt);
1530 return rc;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001531 }
1532 case Z90STAT_REQUESTQ_COUNT:
1533 return put_user(zcrypt_requestq_count(), (int __user *) arg);
1534 case Z90STAT_PENDINGQ_COUNT:
1535 return put_user(zcrypt_pendingq_count(), (int __user *) arg);
1536 case Z90STAT_TOTALOPEN_COUNT:
1537 return put_user(atomic_read(&zcrypt_open_count),
1538 (int __user *) arg);
1539 case Z90STAT_DOMAIN_INDEX:
1540 return put_user(ap_domain_index, (int __user *) arg);
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001541 /*
1542 * Deprecated ioctls
1543 */
1544 case ZDEVICESTATUS: {
1545 /* the old ioctl supports only 64 adapters */
1546 struct zcrypt_device_status *device_status;
1547 size_t total_size = MAX_ZDEV_ENTRIES
1548 * sizeof(struct zcrypt_device_status);
1549
1550 device_status = kzalloc(total_size, GFP_KERNEL);
1551 if (!device_status)
1552 return -ENOMEM;
1553 zcrypt_device_status_mask(device_status);
1554 if (copy_to_user((char __user *) arg, device_status,
1555 total_size))
1556 rc = -EFAULT;
1557 kfree(device_status);
1558 return rc;
1559 }
1560 case Z90STAT_STATUS_MASK: {
1561 /* the old ioctl supports only 64 adapters */
1562 char status[MAX_ZDEV_CARDIDS];
1563
1564 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
1565 if (copy_to_user((char __user *) arg, status, sizeof(status)))
1566 return -EFAULT;
1567 return 0;
1568 }
1569 case Z90STAT_QDEPTH_MASK: {
1570 /* the old ioctl supports only 64 adapters */
1571 char qdepth[MAX_ZDEV_CARDIDS];
1572
1573 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
1574 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
1575 return -EFAULT;
1576 return 0;
1577 }
1578 case Z90STAT_PERDEV_REQCNT: {
1579 /* the old ioctl supports only 64 adapters */
Harald Freudenbergerfcd98d42019-12-20 16:02:54 +01001580 u32 reqcnt[MAX_ZDEV_CARDIDS];
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001581
1582 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
1583 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
1584 return -EFAULT;
1585 return 0;
1586 }
Harald Freudenberger2a807862018-04-04 13:25:40 +02001587 /* unknown ioctl number */
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001588 default:
Harald Freudenbergeraf4a7222018-04-09 16:18:37 +02001589 ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001590 return -ENOIOCTLCMD;
1591 }
1592}
1593
1594#ifdef CONFIG_COMPAT
Felix Beck1749a812008-04-17 07:46:28 +02001595/*
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001596 * ioctl32 conversion routines
1597 */
1598struct compat_ica_rsa_modexpo {
1599 compat_uptr_t inputdata;
1600 unsigned int inputdatalength;
1601 compat_uptr_t outputdata;
1602 unsigned int outputdatalength;
1603 compat_uptr_t b_key;
1604 compat_uptr_t n_modulus;
1605};
1606
Harald Freudenberger00fab232018-09-17 16:18:41 +02001607static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
1608 unsigned int cmd, unsigned long arg)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001609{
1610 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
1611 struct compat_ica_rsa_modexpo mex32;
1612 struct ica_rsa_modexpo mex64;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001613 struct zcrypt_track tr;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001614 long rc;
1615
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001616 memset(&tr, 0, sizeof(tr));
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001617 if (copy_from_user(&mex32, umex32, sizeof(mex32)))
1618 return -EFAULT;
1619 mex64.inputdata = compat_ptr(mex32.inputdata);
1620 mex64.inputdatalength = mex32.inputdatalength;
1621 mex64.outputdata = compat_ptr(mex32.outputdata);
1622 mex64.outputdatalength = mex32.outputdatalength;
1623 mex64.b_key = compat_ptr(mex32.b_key);
1624 mex64.n_modulus = compat_ptr(mex32.n_modulus);
1625 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001626 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1627 if (rc == -EAGAIN)
1628 tr.again_counter++;
1629 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Holger Denglerdabecb22012-09-10 21:34:26 +02001630 /* on failure: retry once again after a requested rescan */
1631 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1632 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001633 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1634 if (rc == -EAGAIN)
1635 tr.again_counter++;
1636 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Holger Denglerdabecb22012-09-10 21:34:26 +02001637 if (rc)
1638 return rc;
1639 return put_user(mex64.outputdatalength,
1640 &umex32->outputdatalength);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001641}
1642
1643struct compat_ica_rsa_modexpo_crt {
1644 compat_uptr_t inputdata;
1645 unsigned int inputdatalength;
1646 compat_uptr_t outputdata;
1647 unsigned int outputdatalength;
1648 compat_uptr_t bp_key;
1649 compat_uptr_t bq_key;
1650 compat_uptr_t np_prime;
1651 compat_uptr_t nq_prime;
1652 compat_uptr_t u_mult_inv;
1653};
1654
Harald Freudenberger00fab232018-09-17 16:18:41 +02001655static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
1656 unsigned int cmd, unsigned long arg)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001657{
1658 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
1659 struct compat_ica_rsa_modexpo_crt crt32;
1660 struct ica_rsa_modexpo_crt crt64;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001661 struct zcrypt_track tr;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001662 long rc;
1663
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001664 memset(&tr, 0, sizeof(tr));
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001665 if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
1666 return -EFAULT;
1667 crt64.inputdata = compat_ptr(crt32.inputdata);
1668 crt64.inputdatalength = crt32.inputdatalength;
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +02001669 crt64.outputdata = compat_ptr(crt32.outputdata);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001670 crt64.outputdatalength = crt32.outputdatalength;
1671 crt64.bp_key = compat_ptr(crt32.bp_key);
1672 crt64.bq_key = compat_ptr(crt32.bq_key);
1673 crt64.np_prime = compat_ptr(crt32.np_prime);
1674 crt64.nq_prime = compat_ptr(crt32.nq_prime);
1675 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
1676 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001677 rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1678 if (rc == -EAGAIN)
1679 tr.again_counter++;
1680 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Holger Denglerdabecb22012-09-10 21:34:26 +02001681 /* on failure: retry once again after a requested rescan */
1682 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1683 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001684 rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1685 if (rc == -EAGAIN)
1686 tr.again_counter++;
1687 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Holger Denglerdabecb22012-09-10 21:34:26 +02001688 if (rc)
1689 return rc;
1690 return put_user(crt64.outputdatalength,
1691 &ucrt32->outputdatalength);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001692}
1693
Ralph Wuerthner54321142006-09-20 15:58:36 +02001694struct compat_ica_xcRB {
1695 unsigned short agent_ID;
1696 unsigned int user_defined;
1697 unsigned short request_ID;
1698 unsigned int request_control_blk_length;
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +02001699 unsigned char padding1[16 - sizeof(compat_uptr_t)];
Ralph Wuerthner54321142006-09-20 15:58:36 +02001700 compat_uptr_t request_control_blk_addr;
1701 unsigned int request_data_length;
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +02001702 char padding2[16 - sizeof(compat_uptr_t)];
Ralph Wuerthner54321142006-09-20 15:58:36 +02001703 compat_uptr_t request_data_address;
1704 unsigned int reply_control_blk_length;
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +02001705 char padding3[16 - sizeof(compat_uptr_t)];
Ralph Wuerthner54321142006-09-20 15:58:36 +02001706 compat_uptr_t reply_control_blk_addr;
1707 unsigned int reply_data_length;
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +02001708 char padding4[16 - sizeof(compat_uptr_t)];
Ralph Wuerthner54321142006-09-20 15:58:36 +02001709 compat_uptr_t reply_data_addr;
1710 unsigned short priority_window;
1711 unsigned int status;
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +02001712} __packed;
Ralph Wuerthner54321142006-09-20 15:58:36 +02001713
Harald Freudenberger00fab232018-09-17 16:18:41 +02001714static long trans_xcRB32(struct ap_perms *perms, struct file *filp,
1715 unsigned int cmd, unsigned long arg)
Ralph Wuerthner54321142006-09-20 15:58:36 +02001716{
1717 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
1718 struct compat_ica_xcRB xcRB32;
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001719 struct zcrypt_track tr;
Ralph Wuerthner54321142006-09-20 15:58:36 +02001720 struct ica_xcRB xcRB64;
1721 long rc;
1722
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001723 memset(&tr, 0, sizeof(tr));
Ralph Wuerthner54321142006-09-20 15:58:36 +02001724 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
1725 return -EFAULT;
1726 xcRB64.agent_ID = xcRB32.agent_ID;
1727 xcRB64.user_defined = xcRB32.user_defined;
1728 xcRB64.request_ID = xcRB32.request_ID;
1729 xcRB64.request_control_blk_length =
1730 xcRB32.request_control_blk_length;
1731 xcRB64.request_control_blk_addr =
1732 compat_ptr(xcRB32.request_control_blk_addr);
1733 xcRB64.request_data_length =
1734 xcRB32.request_data_length;
1735 xcRB64.request_data_address =
1736 compat_ptr(xcRB32.request_data_address);
1737 xcRB64.reply_control_blk_length =
1738 xcRB32.reply_control_blk_length;
1739 xcRB64.reply_control_blk_addr =
1740 compat_ptr(xcRB32.reply_control_blk_addr);
1741 xcRB64.reply_data_length = xcRB32.reply_data_length;
1742 xcRB64.reply_data_addr =
1743 compat_ptr(xcRB32.reply_data_addr);
1744 xcRB64.priority_window = xcRB32.priority_window;
1745 xcRB64.status = xcRB32.status;
1746 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001747 rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
1748 if (rc == -EAGAIN)
1749 tr.again_counter++;
1750 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Holger Denglerdabecb22012-09-10 21:34:26 +02001751 /* on failure: retry once again after a requested rescan */
1752 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1753 do {
Harald Freudenberger91ffc512020-07-02 11:10:11 +02001754 rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
1755 if (rc == -EAGAIN)
1756 tr.again_counter++;
1757 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
Ralph Wuerthner54321142006-09-20 15:58:36 +02001758 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
1759 xcRB32.reply_data_length = xcRB64.reply_data_length;
1760 xcRB32.status = xcRB64.status;
1761 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +02001762 return -EFAULT;
Ralph Wuerthner54321142006-09-20 15:58:36 +02001763 return rc;
1764}
1765
Heiko Carstens2b67fc42007-02-05 21:16:47 +01001766static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001767 unsigned long arg)
1768{
Harald Freudenberger00fab232018-09-17 16:18:41 +02001769 int rc;
1770 struct ap_perms *perms =
1771 (struct ap_perms *) filp->private_data;
1772
1773 rc = zcrypt_check_ioctl(perms, cmd);
1774 if (rc)
1775 return rc;
1776
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001777 if (cmd == ICARSAMODEXPO)
Harald Freudenberger00fab232018-09-17 16:18:41 +02001778 return trans_modexpo32(perms, filp, cmd, arg);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001779 if (cmd == ICARSACRT)
Harald Freudenberger00fab232018-09-17 16:18:41 +02001780 return trans_modexpo_crt32(perms, filp, cmd, arg);
Ralph Wuerthner54321142006-09-20 15:58:36 +02001781 if (cmd == ZSECSENDCPRB)
Harald Freudenberger00fab232018-09-17 16:18:41 +02001782 return trans_xcRB32(perms, filp, cmd, arg);
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001783 return zcrypt_unlocked_ioctl(filp, cmd, arg);
1784}
1785#endif
1786
Felix Beck1749a812008-04-17 07:46:28 +02001787/*
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001788 * Misc device file operations.
1789 */
Arjan van de Vend54b1fd2007-02-12 00:55:34 -08001790static const struct file_operations zcrypt_fops = {
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001791 .owner = THIS_MODULE,
1792 .read = zcrypt_read,
1793 .write = zcrypt_write,
1794 .unlocked_ioctl = zcrypt_unlocked_ioctl,
1795#ifdef CONFIG_COMPAT
1796 .compat_ioctl = zcrypt_compat_ioctl,
1797#endif
1798 .open = zcrypt_open,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001799 .release = zcrypt_release,
1800 .llseek = no_llseek,
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001801};
1802
Felix Beck1749a812008-04-17 07:46:28 +02001803/*
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001804 * Misc device.
1805 */
1806static struct miscdevice zcrypt_misc_device = {
1807 .minor = MISC_DYNAMIC_MINOR,
1808 .name = "z90crypt",
1809 .fops = &zcrypt_fops,
1810};
1811
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001812static int zcrypt_rng_device_count;
1813static u32 *zcrypt_rng_buffer;
1814static int zcrypt_rng_buffer_index;
1815static DEFINE_MUTEX(zcrypt_rng_mutex);
1816
1817static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1818{
1819 int rc;
1820
Felix Beck1749a812008-04-17 07:46:28 +02001821 /*
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001822 * We don't need locking here because the RNG API guarantees serialized
1823 * read method calls.
1824 */
1825 if (zcrypt_rng_buffer_index == 0) {
1826 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
Holger Denglerdabecb22012-09-10 21:34:26 +02001827 /* on failure: retry once again after a requested rescan */
1828 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1829 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001830 if (rc < 0)
1831 return -EIO;
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +02001832 zcrypt_rng_buffer_index = rc / sizeof(*data);
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001833 }
1834 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
Harald Freudenbergerac2b96f2018-08-17 12:36:01 +02001835 return sizeof(*data);
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001836}
1837
1838static struct hwrng zcrypt_rng_dev = {
1839 .name = "zcrypt",
1840 .data_read = zcrypt_rng_data_read,
Ingo Tuchschererdb490cb2015-03-17 16:02:20 +01001841 .quality = 990,
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001842};
1843
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001844int zcrypt_rng_device_add(void)
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001845{
1846 int rc = 0;
1847
1848 mutex_lock(&zcrypt_rng_mutex);
1849 if (zcrypt_rng_device_count == 0) {
1850 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
1851 if (!zcrypt_rng_buffer) {
1852 rc = -ENOMEM;
1853 goto out;
1854 }
1855 zcrypt_rng_buffer_index = 0;
Ingo Tuchschererdb490cb2015-03-17 16:02:20 +01001856 if (!zcrypt_hwrng_seed)
1857 zcrypt_rng_dev.quality = 0;
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001858 rc = hwrng_register(&zcrypt_rng_dev);
1859 if (rc)
1860 goto out_free;
1861 zcrypt_rng_device_count = 1;
1862 } else
1863 zcrypt_rng_device_count++;
1864 mutex_unlock(&zcrypt_rng_mutex);
1865 return 0;
1866
1867out_free:
1868 free_page((unsigned long) zcrypt_rng_buffer);
1869out:
1870 mutex_unlock(&zcrypt_rng_mutex);
1871 return rc;
1872}
1873
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001874void zcrypt_rng_device_remove(void)
Ralph Wuerthner2f7c8bd2008-04-17 07:46:15 +02001875{
1876 mutex_lock(&zcrypt_rng_mutex);
1877 zcrypt_rng_device_count--;
1878 if (zcrypt_rng_device_count == 0) {
1879 hwrng_unregister(&zcrypt_rng_dev);
1880 free_page((unsigned long) zcrypt_rng_buffer);
1881 }
1882 mutex_unlock(&zcrypt_rng_mutex);
1883}
1884
Holger Denglerdabecb22012-09-10 21:34:26 +02001885int __init zcrypt_debug_init(void)
1886{
Harald Freudenbergercccd85b2016-11-24 06:45:21 +01001887 zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
1888 DBF_MAX_SPRINTF_ARGS * sizeof(long));
1889 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
1890 debug_set_level(zcrypt_dbf_info, DBF_ERR);
Ingo Tuchscherere28d2af2016-08-25 11:16:03 +02001891
Holger Denglerdabecb22012-09-10 21:34:26 +02001892 return 0;
1893}
1894
1895void zcrypt_debug_exit(void)
1896{
Harald Freudenbergercccd85b2016-11-24 06:45:21 +01001897 debug_unregister(zcrypt_dbf_info);
Holger Denglerdabecb22012-09-10 21:34:26 +02001898}
1899
Harald Freudenberger00fab232018-09-17 16:18:41 +02001900#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
1901
1902static int __init zcdn_init(void)
1903{
1904 int rc;
1905
1906 /* create a new class 'zcrypt' */
1907 zcrypt_class = class_create(THIS_MODULE, ZCRYPT_NAME);
1908 if (IS_ERR(zcrypt_class)) {
1909 rc = PTR_ERR(zcrypt_class);
1910 goto out_class_create_failed;
1911 }
1912 zcrypt_class->dev_release = zcdn_device_release;
1913
1914 /* alloc device minor range */
1915 rc = alloc_chrdev_region(&zcrypt_devt,
1916 0, ZCRYPT_MAX_MINOR_NODES,
1917 ZCRYPT_NAME);
1918 if (rc)
1919 goto out_alloc_chrdev_failed;
1920
1921 cdev_init(&zcrypt_cdev, &zcrypt_fops);
1922 zcrypt_cdev.owner = THIS_MODULE;
1923 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1924 if (rc)
1925 goto out_cdev_add_failed;
1926
1927 /* need some class specific sysfs attributes */
1928 rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
1929 if (rc)
1930 goto out_class_create_file_1_failed;
1931 rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
1932 if (rc)
1933 goto out_class_create_file_2_failed;
1934
1935 return 0;
1936
1937out_class_create_file_2_failed:
1938 class_remove_file(zcrypt_class, &class_attr_zcdn_create);
1939out_class_create_file_1_failed:
1940 cdev_del(&zcrypt_cdev);
1941out_cdev_add_failed:
1942 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1943out_alloc_chrdev_failed:
1944 class_destroy(zcrypt_class);
1945out_class_create_failed:
1946 return rc;
1947}
1948
1949static void zcdn_exit(void)
1950{
1951 class_remove_file(zcrypt_class, &class_attr_zcdn_create);
1952 class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
1953 zcdn_destroy_all();
1954 cdev_del(&zcrypt_cdev);
1955 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1956 class_destroy(zcrypt_class);
1957}
1958
1959#endif
1960
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001961/**
Felix Beck1749a812008-04-17 07:46:28 +02001962 * zcrypt_api_init(): Module initialization.
1963 *
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001964 * The module initialization code.
1965 */
1966int __init zcrypt_api_init(void)
1967{
1968 int rc;
1969
Holger Denglerdabecb22012-09-10 21:34:26 +02001970 rc = zcrypt_debug_init();
1971 if (rc)
1972 goto out;
1973
Harald Freudenberger00fab232018-09-17 16:18:41 +02001974#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
1975 rc = zcdn_init();
1976 if (rc)
1977 goto out;
1978#endif
1979
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001980 /* Register the request sprayer. */
1981 rc = misc_register(&zcrypt_misc_device);
Felix Beck1a89dd82008-07-14 09:59:27 +02001982 if (rc < 0)
Harald Freudenberger00fab232018-09-17 16:18:41 +02001983 goto out_misc_register_failed;
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001984
Ingo Tuchschererfc1d3f02016-08-25 11:11:30 +02001985 zcrypt_msgtype6_init();
1986 zcrypt_msgtype50_init();
Harald Freudenberger00fab232018-09-17 16:18:41 +02001987
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001988 return 0;
1989
Harald Freudenberger00fab232018-09-17 16:18:41 +02001990out_misc_register_failed:
1991#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
1992 zcdn_exit();
1993#endif
1994 zcrypt_debug_exit();
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02001995out:
1996 return rc;
1997}
1998
1999/**
Felix Beck1749a812008-04-17 07:46:28 +02002000 * zcrypt_api_exit(): Module termination.
2001 *
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02002002 * The module termination code.
2003 */
Ingo Tuchschererfc1d3f02016-08-25 11:11:30 +02002004void __exit zcrypt_api_exit(void)
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02002005{
Harald Freudenberger00fab232018-09-17 16:18:41 +02002006#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2007 zcdn_exit();
2008#endif
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02002009 misc_deregister(&zcrypt_misc_device);
Ingo Tuchschererfc1d3f02016-08-25 11:11:30 +02002010 zcrypt_msgtype6_exit();
2011 zcrypt_msgtype50_exit();
Harald Freudenbergerefc598e2019-06-11 11:16:56 +02002012 zcrypt_ccamisc_exit();
Harald Freudenberger7384eb72019-08-30 16:07:08 +02002013 zcrypt_ep11misc_exit();
Harald Freudenbergercccd85b2016-11-24 06:45:21 +01002014 zcrypt_debug_exit();
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02002015}
2016
Martin Schwidefsky2dbc2412006-09-20 15:58:27 +02002017module_init(zcrypt_api_init);
2018module_exit(zcrypt_api_exit);