Thomas Gleixner | 64d85cc | 2019-05-29 07:18:13 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 2 | /** |
| 3 | * Routines supporting the Power 7+ Nest Accelerators driver |
| 4 | * |
| 5 | * Copyright (C) 2011-2012 International Business Machines Inc. |
| 6 | * |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 7 | * Author: Kent Yoder <yoder1@us.ibm.com> |
| 8 | */ |
| 9 | |
Herbert Xu | 201f28f | 2015-06-16 13:54:21 +0800 | [diff] [blame] | 10 | #include <crypto/internal/aead.h> |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 11 | #include <crypto/internal/hash.h> |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 12 | #include <crypto/aes.h> |
| 13 | #include <crypto/sha.h> |
| 14 | #include <crypto/algapi.h> |
| 15 | #include <crypto/scatterwalk.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/moduleparam.h> |
| 18 | #include <linux/types.h> |
| 19 | #include <linux/mm.h> |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 20 | #include <linux/scatterlist.h> |
| 21 | #include <linux/device.h> |
| 22 | #include <linux/of.h> |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 23 | #include <asm/hvcall.h> |
| 24 | #include <asm/vio.h> |
| 25 | |
| 26 | #include "nx_csbcpb.h" |
| 27 | #include "nx.h" |
| 28 | |
| 29 | |
| 30 | /** |
| 31 | * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure |
| 32 | * |
| 33 | * @nx_ctx: the crypto context handle |
| 34 | * @op: PFO operation struct to pass in |
| 35 | * @may_sleep: flag indicating the request can sleep |
| 36 | * |
| 37 | * Make the hcall, retrying while the hardware is busy. If we cannot yield |
| 38 | * the thread, limit the number of retries to 10 here. |
| 39 | */ |
| 40 | int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx, |
| 41 | struct vio_pfo_op *op, |
| 42 | u32 may_sleep) |
| 43 | { |
| 44 | int rc, retries = 10; |
| 45 | struct vio_dev *viodev = nx_driver.viodev; |
| 46 | |
| 47 | atomic_inc(&(nx_ctx->stats->sync_ops)); |
| 48 | |
| 49 | do { |
| 50 | rc = vio_h_cop_sync(viodev, op); |
Marcelo Cerri | c849163 | 2013-08-12 18:49:37 -0300 | [diff] [blame] | 51 | } while (rc == -EBUSY && !may_sleep && retries--); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 52 | |
| 53 | if (rc) { |
| 54 | dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " |
| 55 | "hcall rc: %ld\n", rc, op->hcall_err); |
| 56 | atomic_inc(&(nx_ctx->stats->errors)); |
| 57 | atomic_set(&(nx_ctx->stats->last_error), op->hcall_err); |
| 58 | atomic_set(&(nx_ctx->stats->last_error_pid), current->pid); |
| 59 | } |
| 60 | |
| 61 | return rc; |
| 62 | } |
| 63 | |
| 64 | /** |
| 65 | * nx_build_sg_list - build an NX scatter list describing a single buffer |
| 66 | * |
| 67 | * @sg_head: pointer to the first scatter list element to build |
| 68 | * @start_addr: pointer to the linear buffer |
| 69 | * @len: length of the data at @start_addr |
| 70 | * @sgmax: the largest number of scatter list elements we're allowed to create |
| 71 | * |
| 72 | * This function will start writing nx_sg elements at @sg_head and keep |
| 73 | * writing them until all of the data from @start_addr is described or |
| 74 | * until sgmax elements have been written. Scatter list elements will be |
| 75 | * created such that none of the elements describes a buffer that crosses a 4K |
| 76 | * boundary. |
| 77 | */ |
| 78 | struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, |
| 79 | u8 *start_addr, |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 80 | unsigned int *len, |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 81 | u32 sgmax) |
| 82 | { |
| 83 | unsigned int sg_len = 0; |
| 84 | struct nx_sg *sg; |
| 85 | u64 sg_addr = (u64)start_addr; |
| 86 | u64 end_addr; |
| 87 | |
| 88 | /* determine the start and end for this address range - slightly |
| 89 | * different if this is in VMALLOC_REGION */ |
| 90 | if (is_vmalloc_addr(start_addr)) |
Michael Ellerman | 7187daf | 2012-07-25 21:19:48 +0000 | [diff] [blame] | 91 | sg_addr = page_to_phys(vmalloc_to_page(start_addr)) |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 92 | + offset_in_page(sg_addr); |
| 93 | else |
Michael Ellerman | 7187daf | 2012-07-25 21:19:48 +0000 | [diff] [blame] | 94 | sg_addr = __pa(sg_addr); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 95 | |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 96 | end_addr = sg_addr + *len; |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 97 | |
| 98 | /* each iteration will write one struct nx_sg element and add the |
| 99 | * length of data described by that element to sg_len. Once @len bytes |
| 100 | * have been described (or @sgmax elements have been written), the |
| 101 | * loop ends. min_t is used to ensure @end_addr falls on the same page |
| 102 | * as sg_addr, if not, we need to create another nx_sg element for the |
Marcelo Cerri | 2b7c15c | 2013-08-02 12:09:51 +0000 | [diff] [blame] | 103 | * data on the next page. |
| 104 | * |
| 105 | * Also when using vmalloc'ed data, every time that a system page |
| 106 | * boundary is crossed the physical address needs to be re-calculated. |
| 107 | */ |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 108 | for (sg = sg_head; sg_len < *len; sg++) { |
Marcelo Cerri | 2b7c15c | 2013-08-02 12:09:51 +0000 | [diff] [blame] | 109 | u64 next_page; |
| 110 | |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 111 | sg->addr = sg_addr; |
Marcelo Cerri | 2b7c15c | 2013-08-02 12:09:51 +0000 | [diff] [blame] | 112 | sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), |
| 113 | end_addr); |
| 114 | |
| 115 | next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE; |
| 116 | sg->len = min_t(u64, sg_addr, next_page) - sg->addr; |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 117 | sg_len += sg->len; |
| 118 | |
Marcelo Cerri | 2b7c15c | 2013-08-02 12:09:51 +0000 | [diff] [blame] | 119 | if (sg_addr >= next_page && |
| 120 | is_vmalloc_addr(start_addr + sg_len)) { |
| 121 | sg_addr = page_to_phys(vmalloc_to_page( |
| 122 | start_addr + sg_len)); |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 123 | end_addr = sg_addr + *len - sg_len; |
Marcelo Cerri | 2b7c15c | 2013-08-02 12:09:51 +0000 | [diff] [blame] | 124 | } |
| 125 | |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 126 | if ((sg - sg_head) == sgmax) { |
| 127 | pr_err("nx: scatter/gather list overflow, pid: %d\n", |
| 128 | current->pid); |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 129 | sg++; |
| 130 | break; |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 131 | } |
| 132 | } |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 133 | *len = sg_len; |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 134 | |
| 135 | /* return the moved sg_head pointer */ |
| 136 | return sg; |
| 137 | } |
| 138 | |
| 139 | /** |
| 140 | * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist |
| 141 | * |
| 142 | * @nx_dst: pointer to the first nx_sg element to write |
| 143 | * @sglen: max number of nx_sg entries we're allowed to write |
| 144 | * @sg_src: pointer to the source linux scatterlist to walk |
| 145 | * @start: number of bytes to fast-forward past at the beginning of @sg_src |
| 146 | * @src_len: number of bytes to walk in @sg_src |
| 147 | */ |
| 148 | struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, |
| 149 | unsigned int sglen, |
| 150 | struct scatterlist *sg_src, |
| 151 | unsigned int start, |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 152 | unsigned int *src_len) |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 153 | { |
| 154 | struct scatter_walk walk; |
| 155 | struct nx_sg *nx_sg = nx_dst; |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 156 | unsigned int n, offset = 0, len = *src_len; |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 157 | char *dst; |
| 158 | |
| 159 | /* we need to fast forward through @start bytes first */ |
| 160 | for (;;) { |
| 161 | scatterwalk_start(&walk, sg_src); |
| 162 | |
| 163 | if (start < offset + sg_src->length) |
| 164 | break; |
| 165 | |
| 166 | offset += sg_src->length; |
Cristian Stoica | 5be4d4c | 2015-01-20 10:06:16 +0200 | [diff] [blame] | 167 | sg_src = sg_next(sg_src); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 168 | } |
| 169 | |
| 170 | /* start - offset is the number of bytes to advance in the scatterlist |
| 171 | * element we're currently looking at */ |
| 172 | scatterwalk_advance(&walk, start - offset); |
| 173 | |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 174 | while (len && (nx_sg - nx_dst) < sglen) { |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 175 | n = scatterwalk_clamp(&walk, len); |
| 176 | if (!n) { |
Cristian Stoica | 5be4d4c | 2015-01-20 10:06:16 +0200 | [diff] [blame] | 177 | /* In cases where we have scatterlist chain sg_next |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 178 | * handles with it properly */ |
Cristian Stoica | 5be4d4c | 2015-01-20 10:06:16 +0200 | [diff] [blame] | 179 | scatterwalk_start(&walk, sg_next(walk.sg)); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 180 | n = scatterwalk_clamp(&walk, len); |
| 181 | } |
| 182 | dst = scatterwalk_map(&walk); |
| 183 | |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 184 | nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst)); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 185 | len -= n; |
| 186 | |
| 187 | scatterwalk_unmap(dst); |
| 188 | scatterwalk_advance(&walk, n); |
| 189 | scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); |
| 190 | } |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 191 | /* update to_process */ |
| 192 | *src_len -= len; |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 193 | |
| 194 | /* return the moved destination pointer */ |
| 195 | return nx_sg; |
| 196 | } |
| 197 | |
| 198 | /** |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 199 | * trim_sg_list - ensures the bound in sg list. |
| 200 | * @sg: sg list head |
| 201 | * @end: sg lisg end |
| 202 | * @delta: is the amount we need to crop in order to bound the list. |
| 203 | * |
| 204 | */ |
Leonidas Da Silva Barbosa | c3365ce | 2015-04-23 17:40:30 -0300 | [diff] [blame] | 205 | static long int trim_sg_list(struct nx_sg *sg, |
| 206 | struct nx_sg *end, |
| 207 | unsigned int delta, |
| 208 | unsigned int *nbytes) |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 209 | { |
Leonidas Da Silva Barbosa | c3365ce | 2015-04-23 17:40:30 -0300 | [diff] [blame] | 210 | long int oplen; |
| 211 | long int data_back; |
| 212 | unsigned int is_delta = delta; |
| 213 | |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 214 | while (delta && end > sg) { |
| 215 | struct nx_sg *last = end - 1; |
| 216 | |
| 217 | if (last->len > delta) { |
| 218 | last->len -= delta; |
| 219 | delta = 0; |
| 220 | } else { |
| 221 | end--; |
| 222 | delta -= last->len; |
| 223 | } |
| 224 | } |
Leonidas Da Silva Barbosa | c3365ce | 2015-04-23 17:40:30 -0300 | [diff] [blame] | 225 | |
| 226 | /* There are cases where we need to crop list in order to make it |
| 227 | * a block size multiple, but we also need to align data. In order to |
| 228 | * that we need to calculate how much we need to put back to be |
| 229 | * processed |
| 230 | */ |
| 231 | oplen = (sg - end) * sizeof(struct nx_sg); |
| 232 | if (is_delta) { |
| 233 | data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len; |
| 234 | data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1)); |
| 235 | *nbytes -= data_back; |
| 236 | } |
| 237 | |
| 238 | return oplen; |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 239 | } |
| 240 | |
| 241 | /** |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 242 | * nx_build_sg_lists - walk the input scatterlists and build arrays of NX |
| 243 | * scatterlists based on them. |
| 244 | * |
| 245 | * @nx_ctx: NX crypto context for the lists we're building |
| 246 | * @desc: the block cipher descriptor for the operation |
| 247 | * @dst: destination scatterlist |
| 248 | * @src: source scatterlist |
| 249 | * @nbytes: length of data described in the scatterlists |
Marcelo Cerri | a8fc391 | 2013-08-29 11:36:31 -0300 | [diff] [blame] | 250 | * @offset: number of bytes to fast-forward past at the beginning of |
| 251 | * scatterlists. |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 252 | * @iv: destination for the iv data, if the algorithm requires it |
| 253 | * |
| 254 | * This is common code shared by all the AES algorithms. It uses the block |
| 255 | * cipher walk routines to traverse input and output scatterlists, building |
| 256 | * corresponding NX scatterlists |
| 257 | */ |
| 258 | int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, |
| 259 | struct blkcipher_desc *desc, |
| 260 | struct scatterlist *dst, |
| 261 | struct scatterlist *src, |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 262 | unsigned int *nbytes, |
Marcelo Cerri | a8fc391 | 2013-08-29 11:36:31 -0300 | [diff] [blame] | 263 | unsigned int offset, |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 264 | u8 *iv) |
| 265 | { |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 266 | unsigned int delta = 0; |
| 267 | unsigned int total = *nbytes; |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 268 | struct nx_sg *nx_insg = nx_ctx->in_sg; |
| 269 | struct nx_sg *nx_outsg = nx_ctx->out_sg; |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 270 | unsigned int max_sg_len; |
| 271 | |
| 272 | max_sg_len = min_t(u64, nx_ctx->ap->sglen, |
| 273 | nx_driver.of.max_sg_len/sizeof(struct nx_sg)); |
| 274 | max_sg_len = min_t(u64, max_sg_len, |
| 275 | nx_ctx->ap->databytelen/NX_PAGE_SIZE); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 276 | |
| 277 | if (iv) |
Kent Yoder | 1ad936e | 2013-04-12 17:13:59 +0000 | [diff] [blame] | 278 | memcpy(iv, desc->info, AES_BLOCK_SIZE); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 279 | |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 280 | *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); |
| 281 | |
| 282 | nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst, |
| 283 | offset, nbytes); |
| 284 | nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src, |
| 285 | offset, nbytes); |
| 286 | |
| 287 | if (*nbytes < total) |
| 288 | delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1)); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 289 | |
| 290 | /* these lengths should be negative, which will indicate to phyp that |
| 291 | * the input and output parameters are scatterlists, not linear |
| 292 | * buffers */ |
Leonidas Da Silva Barbosa | c3365ce | 2015-04-23 17:40:30 -0300 | [diff] [blame] | 293 | nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes); |
| 294 | nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes); |
Kent Yoder | 1ad936e | 2013-04-12 17:13:59 +0000 | [diff] [blame] | 295 | |
| 296 | return 0; |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 297 | } |
| 298 | |
| 299 | /** |
| 300 | * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct |
| 301 | * |
| 302 | * @nx_ctx: the nx context to initialize |
| 303 | * @function: the function code for the op |
| 304 | */ |
| 305 | void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) |
| 306 | { |
Marcelo Cerri | c849163 | 2013-08-12 18:49:37 -0300 | [diff] [blame] | 307 | spin_lock_init(&nx_ctx->lock); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 308 | memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); |
| 309 | nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; |
| 310 | |
| 311 | nx_ctx->op.flags = function; |
Michael Ellerman | 7187daf | 2012-07-25 21:19:48 +0000 | [diff] [blame] | 312 | nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb); |
| 313 | nx_ctx->op.in = __pa(nx_ctx->in_sg); |
| 314 | nx_ctx->op.out = __pa(nx_ctx->out_sg); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 315 | |
| 316 | if (nx_ctx->csbcpb_aead) { |
| 317 | nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT; |
| 318 | |
| 319 | nx_ctx->op_aead.flags = function; |
Michael Ellerman | 7187daf | 2012-07-25 21:19:48 +0000 | [diff] [blame] | 320 | nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead); |
| 321 | nx_ctx->op_aead.in = __pa(nx_ctx->in_sg); |
| 322 | nx_ctx->op_aead.out = __pa(nx_ctx->out_sg); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 323 | } |
| 324 | } |
| 325 | |
| 326 | static void nx_of_update_status(struct device *dev, |
| 327 | struct property *p, |
| 328 | struct nx_of *props) |
| 329 | { |
| 330 | if (!strncmp(p->value, "okay", p->length)) { |
| 331 | props->status = NX_WAITING; |
| 332 | props->flags |= NX_OF_FLAG_STATUS_SET; |
| 333 | } else { |
| 334 | dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__, |
| 335 | (char *)p->value); |
| 336 | } |
| 337 | } |
| 338 | |
| 339 | static void nx_of_update_sglen(struct device *dev, |
| 340 | struct property *p, |
| 341 | struct nx_of *props) |
| 342 | { |
| 343 | if (p->length != sizeof(props->max_sg_len)) { |
| 344 | dev_err(dev, "%s: unexpected format for " |
| 345 | "ibm,max-sg-len property\n", __func__); |
| 346 | dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes " |
| 347 | "long, expected %zd bytes\n", __func__, |
| 348 | p->length, sizeof(props->max_sg_len)); |
| 349 | return; |
| 350 | } |
| 351 | |
| 352 | props->max_sg_len = *(u32 *)p->value; |
| 353 | props->flags |= NX_OF_FLAG_MAXSGLEN_SET; |
| 354 | } |
| 355 | |
| 356 | static void nx_of_update_msc(struct device *dev, |
| 357 | struct property *p, |
| 358 | struct nx_of *props) |
| 359 | { |
| 360 | struct msc_triplet *trip; |
| 361 | struct max_sync_cop *msc; |
| 362 | unsigned int bytes_so_far, i, lenp; |
| 363 | |
| 364 | msc = (struct max_sync_cop *)p->value; |
| 365 | lenp = p->length; |
| 366 | |
| 367 | /* You can't tell if the data read in for this property is sane by its |
| 368 | * size alone. This is because there are sizes embedded in the data |
| 369 | * structure. The best we can do is check lengths as we parse and bail |
| 370 | * as soon as a length error is detected. */ |
| 371 | bytes_so_far = 0; |
| 372 | |
| 373 | while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) { |
| 374 | bytes_so_far += sizeof(struct max_sync_cop); |
| 375 | |
| 376 | trip = msc->trip; |
| 377 | |
| 378 | for (i = 0; |
| 379 | ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) && |
| 380 | i < msc->triplets; |
| 381 | i++) { |
Dan Carpenter | e514cc0 | 2016-07-15 14:09:13 +0300 | [diff] [blame] | 382 | if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) { |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 383 | dev_err(dev, "unknown function code/mode " |
| 384 | "combo: %d/%d (ignored)\n", msc->fc, |
| 385 | msc->mode); |
| 386 | goto next_loop; |
| 387 | } |
| 388 | |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 389 | if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) { |
| 390 | dev_warn(dev, "bogus sglen/databytelen: " |
| 391 | "%u/%u (ignored)\n", trip->sglen, |
| 392 | trip->databytelen); |
| 393 | goto next_loop; |
| 394 | } |
| 395 | |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 396 | switch (trip->keybitlen) { |
| 397 | case 128: |
| 398 | case 160: |
| 399 | props->ap[msc->fc][msc->mode][0].databytelen = |
| 400 | trip->databytelen; |
| 401 | props->ap[msc->fc][msc->mode][0].sglen = |
| 402 | trip->sglen; |
| 403 | break; |
| 404 | case 192: |
| 405 | props->ap[msc->fc][msc->mode][1].databytelen = |
| 406 | trip->databytelen; |
| 407 | props->ap[msc->fc][msc->mode][1].sglen = |
| 408 | trip->sglen; |
| 409 | break; |
| 410 | case 256: |
| 411 | if (msc->fc == NX_FC_AES) { |
| 412 | props->ap[msc->fc][msc->mode][2]. |
| 413 | databytelen = trip->databytelen; |
| 414 | props->ap[msc->fc][msc->mode][2].sglen = |
| 415 | trip->sglen; |
| 416 | } else if (msc->fc == NX_FC_AES_HMAC || |
| 417 | msc->fc == NX_FC_SHA) { |
| 418 | props->ap[msc->fc][msc->mode][1]. |
| 419 | databytelen = trip->databytelen; |
| 420 | props->ap[msc->fc][msc->mode][1].sglen = |
| 421 | trip->sglen; |
| 422 | } else { |
| 423 | dev_warn(dev, "unknown function " |
| 424 | "code/key bit len combo" |
| 425 | ": (%u/256)\n", msc->fc); |
| 426 | } |
| 427 | break; |
| 428 | case 512: |
| 429 | props->ap[msc->fc][msc->mode][2].databytelen = |
| 430 | trip->databytelen; |
| 431 | props->ap[msc->fc][msc->mode][2].sglen = |
| 432 | trip->sglen; |
| 433 | break; |
| 434 | default: |
| 435 | dev_warn(dev, "unknown function code/key bit " |
| 436 | "len combo: (%u/%u)\n", msc->fc, |
| 437 | trip->keybitlen); |
| 438 | break; |
| 439 | } |
| 440 | next_loop: |
| 441 | bytes_so_far += sizeof(struct msc_triplet); |
| 442 | trip++; |
| 443 | } |
| 444 | |
| 445 | msc = (struct max_sync_cop *)trip; |
| 446 | } |
| 447 | |
| 448 | props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET; |
| 449 | } |
| 450 | |
| 451 | /** |
| 452 | * nx_of_init - read openFirmware values from the device tree |
| 453 | * |
| 454 | * @dev: device handle |
| 455 | * @props: pointer to struct to hold the properties values |
| 456 | * |
| 457 | * Called once at driver probe time, this function will read out the |
| 458 | * openFirmware properties we use at runtime. If all the OF properties are |
| 459 | * acceptable, when we exit this function props->flags will indicate that |
| 460 | * we're ready to register our crypto algorithms. |
| 461 | */ |
| 462 | static void nx_of_init(struct device *dev, struct nx_of *props) |
| 463 | { |
| 464 | struct device_node *base_node = dev->of_node; |
| 465 | struct property *p; |
| 466 | |
| 467 | p = of_find_property(base_node, "status", NULL); |
| 468 | if (!p) |
| 469 | dev_info(dev, "%s: property 'status' not found\n", __func__); |
| 470 | else |
| 471 | nx_of_update_status(dev, p, props); |
| 472 | |
| 473 | p = of_find_property(base_node, "ibm,max-sg-len", NULL); |
| 474 | if (!p) |
| 475 | dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n", |
| 476 | __func__); |
| 477 | else |
| 478 | nx_of_update_sglen(dev, p, props); |
| 479 | |
| 480 | p = of_find_property(base_node, "ibm,max-sync-cop", NULL); |
| 481 | if (!p) |
| 482 | dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n", |
| 483 | __func__); |
| 484 | else |
| 485 | nx_of_update_msc(dev, p, props); |
| 486 | } |
| 487 | |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 488 | static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot) |
| 489 | { |
| 490 | struct alg_props *props = &nx_driver.of.ap[fc][mode][slot]; |
| 491 | |
| 492 | if (!props->sglen || props->databytelen < NX_PAGE_SIZE) { |
| 493 | if (dev) |
| 494 | dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: " |
| 495 | "%u/%u (ignored)\n", fc, mode, slot, |
| 496 | props->sglen, props->databytelen); |
| 497 | return false; |
| 498 | } |
| 499 | |
| 500 | return true; |
| 501 | } |
| 502 | |
| 503 | static bool nx_check_props(struct device *dev, u32 fc, u32 mode) |
| 504 | { |
| 505 | int i; |
| 506 | |
| 507 | for (i = 0; i < 3; i++) |
| 508 | if (!nx_check_prop(dev, fc, mode, i)) |
| 509 | return false; |
| 510 | |
| 511 | return true; |
| 512 | } |
| 513 | |
| 514 | static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode) |
| 515 | { |
| 516 | return nx_check_props(&nx_driver.viodev->dev, fc, mode) ? |
| 517 | crypto_register_alg(alg) : 0; |
| 518 | } |
| 519 | |
| 520 | static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode) |
| 521 | { |
| 522 | return nx_check_props(&nx_driver.viodev->dev, fc, mode) ? |
| 523 | crypto_register_aead(alg) : 0; |
| 524 | } |
| 525 | |
| 526 | static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot) |
| 527 | { |
| 528 | return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev, |
| 529 | fc, mode, slot) : |
| 530 | nx_check_props(&nx_driver.viodev->dev, fc, mode)) ? |
| 531 | crypto_register_shash(alg) : 0; |
| 532 | } |
| 533 | |
| 534 | static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode) |
| 535 | { |
| 536 | if (nx_check_props(NULL, fc, mode)) |
| 537 | crypto_unregister_alg(alg); |
| 538 | } |
| 539 | |
| 540 | static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode) |
| 541 | { |
| 542 | if (nx_check_props(NULL, fc, mode)) |
| 543 | crypto_unregister_aead(alg); |
| 544 | } |
| 545 | |
| 546 | static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode, |
| 547 | int slot) |
| 548 | { |
| 549 | if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) : |
| 550 | nx_check_props(NULL, fc, mode)) |
| 551 | crypto_unregister_shash(alg); |
| 552 | } |
| 553 | |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 554 | /** |
| 555 | * nx_register_algs - register algorithms with the crypto API |
| 556 | * |
| 557 | * Called from nx_probe() |
| 558 | * |
| 559 | * If all OF properties are in an acceptable state, the driver flags will |
| 560 | * indicate that we're ready and we'll create our debugfs files and register |
| 561 | * out crypto algorithms. |
| 562 | */ |
| 563 | static int nx_register_algs(void) |
| 564 | { |
| 565 | int rc = -1; |
| 566 | |
| 567 | if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY) |
| 568 | goto out; |
| 569 | |
| 570 | memset(&nx_driver.stats, 0, sizeof(struct nx_stats)); |
| 571 | |
Greg Kroah-Hartman | 576d152 | 2019-06-14 16:29:04 +0200 | [diff] [blame] | 572 | NX_DEBUGFS_INIT(&nx_driver); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 573 | |
Kent Yoder | 1ad936e | 2013-04-12 17:13:59 +0000 | [diff] [blame] | 574 | nx_driver.of.status = NX_OKAY; |
| 575 | |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 576 | rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 577 | if (rc) |
| 578 | goto out; |
| 579 | |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 580 | rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 581 | if (rc) |
| 582 | goto out_unreg_ecb; |
| 583 | |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 584 | rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 585 | if (rc) |
Leonidas Da Silva Barbosa | 9cfaf08 | 2015-08-08 18:31:01 -0300 | [diff] [blame] | 586 | goto out_unreg_cbc; |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 587 | |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 588 | rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 589 | if (rc) |
| 590 | goto out_unreg_ctr3686; |
| 591 | |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 592 | rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 593 | if (rc) |
| 594 | goto out_unreg_gcm; |
| 595 | |
Herbert Xu | cc81565 | 2015-07-14 16:53:21 +0800 | [diff] [blame] | 596 | rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 597 | if (rc) |
| 598 | goto out_unreg_gcm4106; |
| 599 | |
Herbert Xu | cc81565 | 2015-07-14 16:53:21 +0800 | [diff] [blame] | 600 | rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 601 | if (rc) |
| 602 | goto out_unreg_ccm; |
| 603 | |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 604 | rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, |
| 605 | NX_PROPS_SHA256); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 606 | if (rc) |
| 607 | goto out_unreg_ccm4309; |
| 608 | |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 609 | rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, |
| 610 | NX_PROPS_SHA512); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 611 | if (rc) |
| 612 | goto out_unreg_s256; |
| 613 | |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 614 | rc = nx_register_shash(&nx_shash_aes_xcbc_alg, |
| 615 | NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 616 | if (rc) |
| 617 | goto out_unreg_s512; |
| 618 | |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 619 | goto out; |
| 620 | |
| 621 | out_unreg_s512: |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 622 | nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, |
| 623 | NX_PROPS_SHA512); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 624 | out_unreg_s256: |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 625 | nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, |
| 626 | NX_PROPS_SHA256); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 627 | out_unreg_ccm4309: |
Herbert Xu | cc81565 | 2015-07-14 16:53:21 +0800 | [diff] [blame] | 628 | nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 629 | out_unreg_ccm: |
Herbert Xu | cc81565 | 2015-07-14 16:53:21 +0800 | [diff] [blame] | 630 | nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 631 | out_unreg_gcm4106: |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 632 | nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 633 | out_unreg_gcm: |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 634 | nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 635 | out_unreg_ctr3686: |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 636 | nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 637 | out_unreg_cbc: |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 638 | nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 639 | out_unreg_ecb: |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 640 | nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 641 | out: |
| 642 | return rc; |
| 643 | } |
| 644 | |
| 645 | /** |
| 646 | * nx_crypto_ctx_init - create and initialize a crypto api context |
| 647 | * |
| 648 | * @nx_ctx: the crypto api context |
| 649 | * @fc: function code for the context |
| 650 | * @mode: the function code specific mode for this context |
| 651 | */ |
| 652 | static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode) |
| 653 | { |
| 654 | if (nx_driver.of.status != NX_OKAY) { |
| 655 | pr_err("Attempt to initialize NX crypto context while device " |
| 656 | "is not available!\n"); |
| 657 | return -ENODEV; |
| 658 | } |
| 659 | |
| 660 | /* we need an extra page for csbcpb_aead for these modes */ |
| 661 | if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 662 | nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) + |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 663 | sizeof(struct nx_csbcpb); |
| 664 | else |
Leonidas S. Barbosa | f129430 | 2014-10-28 15:50:45 -0200 | [diff] [blame] | 665 | nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) + |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 666 | sizeof(struct nx_csbcpb); |
| 667 | |
| 668 | nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL); |
| 669 | if (!nx_ctx->kmem) |
| 670 | return -ENOMEM; |
| 671 | |
| 672 | /* the csbcpb and scatterlists must be 4K aligned pages */ |
| 673 | nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem, |
| 674 | (u64)NX_PAGE_SIZE)); |
| 675 | nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE); |
| 676 | nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE); |
| 677 | |
| 678 | if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) |
| 679 | nx_ctx->csbcpb_aead = |
| 680 | (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg + |
| 681 | NX_PAGE_SIZE); |
| 682 | |
| 683 | /* give each context a pointer to global stats and their OF |
| 684 | * properties */ |
| 685 | nx_ctx->stats = &nx_driver.stats; |
| 686 | memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode], |
| 687 | sizeof(struct alg_props) * 3); |
| 688 | |
| 689 | return 0; |
| 690 | } |
| 691 | |
| 692 | /* entry points from the crypto tfm initializers */ |
Herbert Xu | cc81565 | 2015-07-14 16:53:21 +0800 | [diff] [blame] | 693 | int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm) |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 694 | { |
Herbert Xu | cc81565 | 2015-07-14 16:53:21 +0800 | [diff] [blame] | 695 | crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx)); |
| 696 | return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 697 | NX_MODE_AES_CCM); |
| 698 | } |
| 699 | |
Herbert Xu | 201f28f | 2015-06-16 13:54:21 +0800 | [diff] [blame] | 700 | int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm) |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 701 | { |
Herbert Xu | 030f4e9 | 2015-07-07 17:30:25 +0800 | [diff] [blame] | 702 | crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx)); |
Herbert Xu | 201f28f | 2015-06-16 13:54:21 +0800 | [diff] [blame] | 703 | return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 704 | NX_MODE_AES_GCM); |
| 705 | } |
| 706 | |
| 707 | int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm) |
| 708 | { |
| 709 | return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, |
| 710 | NX_MODE_AES_CTR); |
| 711 | } |
| 712 | |
| 713 | int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm) |
| 714 | { |
| 715 | return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, |
| 716 | NX_MODE_AES_CBC); |
| 717 | } |
| 718 | |
| 719 | int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm) |
| 720 | { |
| 721 | return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, |
| 722 | NX_MODE_AES_ECB); |
| 723 | } |
| 724 | |
| 725 | int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm) |
| 726 | { |
| 727 | return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA); |
| 728 | } |
| 729 | |
| 730 | int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm) |
| 731 | { |
| 732 | return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, |
| 733 | NX_MODE_AES_XCBC_MAC); |
| 734 | } |
| 735 | |
| 736 | /** |
| 737 | * nx_crypto_ctx_exit - destroy a crypto api context |
| 738 | * |
| 739 | * @tfm: the crypto transform pointer for the context |
| 740 | * |
| 741 | * As crypto API contexts are destroyed, this exit hook is called to free the |
| 742 | * memory associated with it. |
| 743 | */ |
| 744 | void nx_crypto_ctx_exit(struct crypto_tfm *tfm) |
| 745 | { |
| 746 | struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); |
| 747 | |
| 748 | kzfree(nx_ctx->kmem); |
| 749 | nx_ctx->csbcpb = NULL; |
| 750 | nx_ctx->csbcpb_aead = NULL; |
| 751 | nx_ctx->in_sg = NULL; |
| 752 | nx_ctx->out_sg = NULL; |
| 753 | } |
| 754 | |
Herbert Xu | 201f28f | 2015-06-16 13:54:21 +0800 | [diff] [blame] | 755 | void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm) |
| 756 | { |
| 757 | struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); |
| 758 | |
| 759 | kzfree(nx_ctx->kmem); |
| 760 | } |
| 761 | |
Greg Kroah-Hartman | 49cfe4d | 2012-12-21 13:14:09 -0800 | [diff] [blame] | 762 | static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 763 | { |
| 764 | dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n", |
| 765 | viodev->name, viodev->resource_id); |
| 766 | |
| 767 | if (nx_driver.viodev) { |
| 768 | dev_err(&viodev->dev, "%s: Attempt to register more than one " |
| 769 | "instance of the hardware\n", __func__); |
| 770 | return -EINVAL; |
| 771 | } |
| 772 | |
| 773 | nx_driver.viodev = viodev; |
| 774 | |
| 775 | nx_of_init(&viodev->dev, &nx_driver.of); |
| 776 | |
| 777 | return nx_register_algs(); |
| 778 | } |
| 779 | |
Greg Kroah-Hartman | 49cfe4d | 2012-12-21 13:14:09 -0800 | [diff] [blame] | 780 | static int nx_remove(struct vio_dev *viodev) |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 781 | { |
| 782 | dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n", |
| 783 | viodev->unit_address); |
| 784 | |
| 785 | if (nx_driver.of.status == NX_OKAY) { |
| 786 | NX_DEBUGFS_FINI(&nx_driver); |
| 787 | |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 788 | nx_unregister_shash(&nx_shash_aes_xcbc_alg, |
| 789 | NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1); |
| 790 | nx_unregister_shash(&nx_shash_sha512_alg, |
| 791 | NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); |
| 792 | nx_unregister_shash(&nx_shash_sha256_alg, |
| 793 | NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512); |
Herbert Xu | cc81565 | 2015-07-14 16:53:21 +0800 | [diff] [blame] | 794 | nx_unregister_aead(&nx_ccm4309_aes_alg, |
| 795 | NX_FC_AES, NX_MODE_AES_CCM); |
| 796 | nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 797 | nx_unregister_aead(&nx_gcm4106_aes_alg, |
| 798 | NX_FC_AES, NX_MODE_AES_GCM); |
| 799 | nx_unregister_aead(&nx_gcm_aes_alg, |
| 800 | NX_FC_AES, NX_MODE_AES_GCM); |
| 801 | nx_unregister_alg(&nx_ctr3686_aes_alg, |
| 802 | NX_FC_AES, NX_MODE_AES_CTR); |
Herbert Xu | 8000112 | 2015-06-19 12:07:54 +0800 | [diff] [blame] | 803 | nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); |
| 804 | nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 805 | } |
| 806 | |
| 807 | return 0; |
| 808 | } |
| 809 | |
| 810 | |
| 811 | /* module wide initialization/cleanup */ |
| 812 | static int __init nx_init(void) |
| 813 | { |
| 814 | return vio_register_driver(&nx_driver.viodriver); |
| 815 | } |
| 816 | |
| 817 | static void __exit nx_fini(void) |
| 818 | { |
| 819 | vio_unregister_driver(&nx_driver.viodriver); |
| 820 | } |
| 821 | |
Arvind Yadav | 7fc342d | 2017-08-17 18:44:10 +0530 | [diff] [blame] | 822 | static const struct vio_device_id nx_crypto_driver_ids[] = { |
Kent Yoder | ae0222b | 2012-05-14 10:59:38 +0000 | [diff] [blame] | 823 | { "ibm,sym-encryption-v1", "ibm,sym-encryption" }, |
| 824 | { "", "" } |
| 825 | }; |
| 826 | MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids); |
| 827 | |
| 828 | /* driver state structure */ |
| 829 | struct nx_crypto_driver nx_driver = { |
| 830 | .viodriver = { |
| 831 | .id_table = nx_crypto_driver_ids, |
| 832 | .probe = nx_probe, |
| 833 | .remove = nx_remove, |
| 834 | .name = NX_NAME, |
| 835 | }, |
| 836 | }; |
| 837 | |
| 838 | module_init(nx_init); |
| 839 | module_exit(nx_fini); |
| 840 | |
| 841 | MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>"); |
| 842 | MODULE_DESCRIPTION(NX_STRING); |
| 843 | MODULE_LICENSE("GPL"); |
| 844 | MODULE_VERSION(NX_VERSION); |