Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 2 | /* |
| 3 | * CAAM/SEC 4.x QI transport/backend driver |
| 4 | * Queue Interface backend functionality |
| 5 | * |
| 6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. |
| 7 | * Copyright 2016-2017 NXP |
| 8 | */ |
| 9 | |
| 10 | #include <linux/cpumask.h> |
| 11 | #include <linux/kthread.h> |
| 12 | #include <soc/fsl/qman.h> |
| 13 | |
| 14 | #include "regs.h" |
| 15 | #include "qi.h" |
| 16 | #include "desc.h" |
| 17 | #include "intern.h" |
| 18 | #include "desc_constr.h" |
| 19 | |
| 20 | #define PREHDR_RSLS_SHIFT 31 |
| 21 | |
| 22 | /* |
| 23 | * Use a reasonable backlog of frames (per CPU) as congestion threshold, |
| 24 | * so that resources used by the in-flight buffers do not become a memory hog. |
| 25 | */ |
| 26 | #define MAX_RSP_FQ_BACKLOG_PER_CPU 256 |
| 27 | |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 28 | #define CAAM_QI_ENQUEUE_RETRIES 10000 |
| 29 | |
| 30 | #define CAAM_NAPI_WEIGHT 63 |
| 31 | |
| 32 | /* |
| 33 | * caam_napi - struct holding CAAM NAPI-related params |
| 34 | * @irqtask: IRQ task for QI backend |
| 35 | * @p: QMan portal |
| 36 | */ |
| 37 | struct caam_napi { |
| 38 | struct napi_struct irqtask; |
| 39 | struct qman_portal *p; |
| 40 | }; |
| 41 | |
| 42 | /* |
| 43 | * caam_qi_pcpu_priv - percpu private data structure to main list of pending |
| 44 | * responses expected on each cpu. |
| 45 | * @caam_napi: CAAM NAPI params |
| 46 | * @net_dev: netdev used by NAPI |
| 47 | * @rsp_fq: response FQ from CAAM |
| 48 | */ |
| 49 | struct caam_qi_pcpu_priv { |
| 50 | struct caam_napi caam_napi; |
| 51 | struct net_device net_dev; |
| 52 | struct qman_fq *rsp_fq; |
| 53 | } ____cacheline_aligned; |
| 54 | |
| 55 | static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv); |
Horia Geantă | 1ed289f | 2017-07-10 08:40:29 +0300 | [diff] [blame] | 56 | static DEFINE_PER_CPU(int, last_cpu); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 57 | |
| 58 | /* |
| 59 | * caam_qi_priv - CAAM QI backend private params |
| 60 | * @cgr: QMan congestion group |
| 61 | * @qi_pdev: platform device for QI backend |
| 62 | */ |
| 63 | struct caam_qi_priv { |
| 64 | struct qman_cgr cgr; |
| 65 | struct platform_device *qi_pdev; |
| 66 | }; |
| 67 | |
| 68 | static struct caam_qi_priv qipriv ____cacheline_aligned; |
| 69 | |
| 70 | /* |
| 71 | * This is written by only one core - the one that initialized the CGR - and |
| 72 | * read by multiple cores (all the others). |
| 73 | */ |
| 74 | bool caam_congested __read_mostly; |
| 75 | EXPORT_SYMBOL(caam_congested); |
| 76 | |
| 77 | #ifdef CONFIG_DEBUG_FS |
| 78 | /* |
| 79 | * This is a counter for the number of times the congestion group (where all |
| 80 | * the request and response queueus are) reached congestion. Incremented |
| 81 | * each time the congestion callback is called with congested == true. |
| 82 | */ |
| 83 | static u64 times_congested; |
| 84 | #endif |
| 85 | |
| 86 | /* |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 87 | * This is a a cache of buffers, from which the users of CAAM QI driver |
| 88 | * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than |
| 89 | * doing malloc on the hotpath. |
| 90 | * NOTE: A more elegant solution would be to have some headroom in the frames |
| 91 | * being processed. This could be added by the dpaa-ethernet driver. |
| 92 | * This would pose a problem for userspace application processing which |
| 93 | * cannot know of this limitation. So for now, this will work. |
| 94 | * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here |
| 95 | */ |
| 96 | static struct kmem_cache *qi_cache; |
| 97 | |
| 98 | int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) |
| 99 | { |
| 100 | struct qm_fd fd; |
| 101 | dma_addr_t addr; |
| 102 | int ret; |
| 103 | int num_retries = 0; |
| 104 | |
| 105 | qm_fd_clear_fd(&fd); |
| 106 | qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1])); |
| 107 | |
| 108 | addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt), |
| 109 | DMA_BIDIRECTIONAL); |
| 110 | if (dma_mapping_error(qidev, addr)) { |
| 111 | dev_err(qidev, "DMA mapping error for QI enqueue request\n"); |
| 112 | return -EIO; |
| 113 | } |
| 114 | qm_fd_addr_set64(&fd, addr); |
| 115 | |
| 116 | do { |
| 117 | ret = qman_enqueue(req->drv_ctx->req_fq, &fd); |
| 118 | if (likely(!ret)) |
| 119 | return 0; |
| 120 | |
| 121 | if (ret != -EBUSY) |
| 122 | break; |
| 123 | num_retries++; |
| 124 | } while (num_retries < CAAM_QI_ENQUEUE_RETRIES); |
| 125 | |
| 126 | dev_err(qidev, "qman_enqueue failed: %d\n", ret); |
| 127 | |
| 128 | return ret; |
| 129 | } |
| 130 | EXPORT_SYMBOL(caam_qi_enqueue); |
| 131 | |
| 132 | static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, |
| 133 | const union qm_mr_entry *msg) |
| 134 | { |
| 135 | const struct qm_fd *fd; |
| 136 | struct caam_drv_req *drv_req; |
| 137 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); |
| 138 | |
| 139 | fd = &msg->ern.fd; |
| 140 | |
| 141 | if (qm_fd_get_format(fd) != qm_fd_compound) { |
| 142 | dev_err(qidev, "Non-compound FD from CAAM\n"); |
| 143 | return; |
| 144 | } |
| 145 | |
| 146 | drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); |
| 147 | if (!drv_req) { |
| 148 | dev_err(qidev, |
| 149 | "Can't find original request for CAAM response\n"); |
| 150 | return; |
| 151 | } |
| 152 | |
| 153 | dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), |
| 154 | sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); |
| 155 | |
| 156 | drv_req->cbk(drv_req, -EIO); |
| 157 | } |
| 158 | |
| 159 | static struct qman_fq *create_caam_req_fq(struct device *qidev, |
| 160 | struct qman_fq *rsp_fq, |
| 161 | dma_addr_t hwdesc, |
| 162 | int fq_sched_flag) |
| 163 | { |
| 164 | int ret; |
| 165 | struct qman_fq *req_fq; |
| 166 | struct qm_mcc_initfq opts; |
| 167 | |
| 168 | req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC); |
| 169 | if (!req_fq) |
| 170 | return ERR_PTR(-ENOMEM); |
| 171 | |
| 172 | req_fq->cb.ern = caam_fq_ern_cb; |
| 173 | req_fq->cb.fqs = NULL; |
| 174 | |
| 175 | ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | |
| 176 | QMAN_FQ_FLAG_TO_DCPORTAL, req_fq); |
| 177 | if (ret) { |
| 178 | dev_err(qidev, "Failed to create session req FQ\n"); |
| 179 | goto create_req_fq_fail; |
| 180 | } |
| 181 | |
| 182 | memset(&opts, 0, sizeof(opts)); |
| 183 | opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | |
| 184 | QM_INITFQ_WE_CONTEXTB | |
| 185 | QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); |
| 186 | opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); |
| 187 | qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2); |
| 188 | opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq)); |
| 189 | qm_fqd_context_a_set64(&opts.fqd, hwdesc); |
| 190 | opts.fqd.cgid = qipriv.cgr.cgrid; |
| 191 | |
| 192 | ret = qman_init_fq(req_fq, fq_sched_flag, &opts); |
| 193 | if (ret) { |
| 194 | dev_err(qidev, "Failed to init session req FQ\n"); |
| 195 | goto init_req_fq_fail; |
| 196 | } |
| 197 | |
Horia Geantă | c7a91eb | 2017-07-10 08:40:35 +0300 | [diff] [blame] | 198 | dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, |
| 199 | smp_processor_id()); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 200 | return req_fq; |
| 201 | |
| 202 | init_req_fq_fail: |
| 203 | qman_destroy_fq(req_fq); |
| 204 | create_req_fq_fail: |
| 205 | kfree(req_fq); |
| 206 | return ERR_PTR(ret); |
| 207 | } |
| 208 | |
| 209 | static int empty_retired_fq(struct device *qidev, struct qman_fq *fq) |
| 210 | { |
| 211 | int ret; |
| 212 | |
| 213 | ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT | |
| 214 | QMAN_VOLATILE_FLAG_FINISH, |
| 215 | QM_VDQCR_PRECEDENCE_VDQCR | |
| 216 | QM_VDQCR_NUMFRAMES_TILLEMPTY); |
| 217 | if (ret) { |
| 218 | dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid); |
| 219 | return ret; |
| 220 | } |
| 221 | |
| 222 | do { |
| 223 | struct qman_portal *p; |
| 224 | |
| 225 | p = qman_get_affine_portal(smp_processor_id()); |
| 226 | qman_p_poll_dqrr(p, 16); |
| 227 | } while (fq->flags & QMAN_FQ_STATE_NE); |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | static int kill_fq(struct device *qidev, struct qman_fq *fq) |
| 233 | { |
| 234 | u32 flags; |
| 235 | int ret; |
| 236 | |
| 237 | ret = qman_retire_fq(fq, &flags); |
| 238 | if (ret < 0) { |
| 239 | dev_err(qidev, "qman_retire_fq failed: %d\n", ret); |
| 240 | return ret; |
| 241 | } |
| 242 | |
| 243 | if (!ret) |
| 244 | goto empty_fq; |
| 245 | |
| 246 | /* Async FQ retirement condition */ |
| 247 | if (ret == 1) { |
| 248 | /* Retry till FQ gets in retired state */ |
| 249 | do { |
| 250 | msleep(20); |
| 251 | } while (fq->state != qman_fq_state_retired); |
| 252 | |
| 253 | WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS); |
| 254 | WARN_ON(fq->flags & QMAN_FQ_STATE_ORL); |
| 255 | } |
| 256 | |
| 257 | empty_fq: |
| 258 | if (fq->flags & QMAN_FQ_STATE_NE) { |
| 259 | ret = empty_retired_fq(qidev, fq); |
| 260 | if (ret) { |
| 261 | dev_err(qidev, "empty_retired_fq fail for FQ: %u\n", |
| 262 | fq->fqid); |
| 263 | return ret; |
| 264 | } |
| 265 | } |
| 266 | |
| 267 | ret = qman_oos_fq(fq); |
| 268 | if (ret) |
| 269 | dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid); |
| 270 | |
| 271 | qman_destroy_fq(fq); |
Xulin Sun | 430f133 | 2017-07-13 05:21:01 -0400 | [diff] [blame] | 272 | kfree(fq); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 273 | |
| 274 | return ret; |
| 275 | } |
| 276 | |
| 277 | static int empty_caam_fq(struct qman_fq *fq) |
| 278 | { |
| 279 | int ret; |
| 280 | struct qm_mcr_queryfq_np np; |
| 281 | |
| 282 | /* Wait till the older CAAM FQ get empty */ |
| 283 | do { |
| 284 | ret = qman_query_fq_np(fq, &np); |
| 285 | if (ret) |
| 286 | return ret; |
| 287 | |
| 288 | if (!qm_mcr_np_get(&np, frm_cnt)) |
| 289 | break; |
| 290 | |
| 291 | msleep(20); |
| 292 | } while (1); |
| 293 | |
| 294 | /* |
| 295 | * Give extra time for pending jobs from this FQ in holding tanks |
| 296 | * to get processed |
| 297 | */ |
| 298 | msleep(20); |
| 299 | return 0; |
| 300 | } |
| 301 | |
| 302 | int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) |
| 303 | { |
| 304 | int ret; |
| 305 | u32 num_words; |
| 306 | struct qman_fq *new_fq, *old_fq; |
| 307 | struct device *qidev = drv_ctx->qidev; |
| 308 | |
| 309 | num_words = desc_len(sh_desc); |
| 310 | if (num_words > MAX_SDLEN) { |
| 311 | dev_err(qidev, "Invalid descriptor len: %d words\n", num_words); |
| 312 | return -EINVAL; |
| 313 | } |
| 314 | |
| 315 | /* Note down older req FQ */ |
| 316 | old_fq = drv_ctx->req_fq; |
| 317 | |
| 318 | /* Create a new req FQ in parked state */ |
| 319 | new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, |
| 320 | drv_ctx->context_a, 0); |
Dan Carpenter | ffecb69 | 2019-03-28 17:36:01 +0300 | [diff] [blame^] | 321 | if (IS_ERR(new_fq)) { |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 322 | dev_err(qidev, "FQ allocation for shdesc update failed\n"); |
| 323 | return PTR_ERR(new_fq); |
| 324 | } |
| 325 | |
| 326 | /* Hook up new FQ to context so that new requests keep queuing */ |
| 327 | drv_ctx->req_fq = new_fq; |
| 328 | |
| 329 | /* Empty and remove the older FQ */ |
| 330 | ret = empty_caam_fq(old_fq); |
| 331 | if (ret) { |
| 332 | dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret); |
| 333 | |
| 334 | /* We can revert to older FQ */ |
| 335 | drv_ctx->req_fq = old_fq; |
| 336 | |
| 337 | if (kill_fq(qidev, new_fq)) |
Xulin Sun | 430f133 | 2017-07-13 05:21:01 -0400 | [diff] [blame] | 338 | dev_warn(qidev, "New CAAM FQ kill failed\n"); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 339 | |
| 340 | return ret; |
| 341 | } |
| 342 | |
| 343 | /* |
| 344 | * Re-initialise pre-header. Set RSLS and SDLEN. |
| 345 | * Update the shared descriptor for driver context. |
| 346 | */ |
| 347 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | |
| 348 | num_words); |
| 349 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); |
| 350 | dma_sync_single_for_device(qidev, drv_ctx->context_a, |
| 351 | sizeof(drv_ctx->sh_desc) + |
| 352 | sizeof(drv_ctx->prehdr), |
| 353 | DMA_BIDIRECTIONAL); |
| 354 | |
| 355 | /* Put the new FQ in scheduled state */ |
| 356 | ret = qman_schedule_fq(new_fq); |
| 357 | if (ret) { |
| 358 | dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret); |
| 359 | |
| 360 | /* |
| 361 | * We can kill new FQ and revert to old FQ. |
| 362 | * Since the desc is already modified, it is success case |
| 363 | */ |
| 364 | |
| 365 | drv_ctx->req_fq = old_fq; |
| 366 | |
| 367 | if (kill_fq(qidev, new_fq)) |
Xulin Sun | 430f133 | 2017-07-13 05:21:01 -0400 | [diff] [blame] | 368 | dev_warn(qidev, "New CAAM FQ kill failed\n"); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 369 | } else if (kill_fq(qidev, old_fq)) { |
Xulin Sun | 430f133 | 2017-07-13 05:21:01 -0400 | [diff] [blame] | 370 | dev_warn(qidev, "Old CAAM FQ kill failed\n"); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 371 | } |
| 372 | |
| 373 | return 0; |
| 374 | } |
| 375 | EXPORT_SYMBOL(caam_drv_ctx_update); |
| 376 | |
| 377 | struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, |
| 378 | int *cpu, |
| 379 | u32 *sh_desc) |
| 380 | { |
| 381 | size_t size; |
| 382 | u32 num_words; |
| 383 | dma_addr_t hwdesc; |
| 384 | struct caam_drv_ctx *drv_ctx; |
| 385 | const cpumask_t *cpus = qman_affine_cpus(); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 386 | |
| 387 | num_words = desc_len(sh_desc); |
| 388 | if (num_words > MAX_SDLEN) { |
| 389 | dev_err(qidev, "Invalid descriptor len: %d words\n", |
| 390 | num_words); |
| 391 | return ERR_PTR(-EINVAL); |
| 392 | } |
| 393 | |
| 394 | drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC); |
| 395 | if (!drv_ctx) |
| 396 | return ERR_PTR(-ENOMEM); |
| 397 | |
| 398 | /* |
| 399 | * Initialise pre-header - set RSLS and SDLEN - and shared descriptor |
| 400 | * and dma-map them. |
| 401 | */ |
| 402 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | |
| 403 | num_words); |
| 404 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); |
| 405 | size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); |
| 406 | hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, |
| 407 | DMA_BIDIRECTIONAL); |
| 408 | if (dma_mapping_error(qidev, hwdesc)) { |
| 409 | dev_err(qidev, "DMA map error for preheader + shdesc\n"); |
| 410 | kfree(drv_ctx); |
| 411 | return ERR_PTR(-ENOMEM); |
| 412 | } |
| 413 | drv_ctx->context_a = hwdesc; |
| 414 | |
| 415 | /* If given CPU does not own the portal, choose another one that does */ |
| 416 | if (!cpumask_test_cpu(*cpu, cpus)) { |
| 417 | int *pcpu = &get_cpu_var(last_cpu); |
| 418 | |
| 419 | *pcpu = cpumask_next(*pcpu, cpus); |
| 420 | if (*pcpu >= nr_cpu_ids) |
| 421 | *pcpu = cpumask_first(cpus); |
| 422 | *cpu = *pcpu; |
| 423 | |
| 424 | put_cpu_var(last_cpu); |
| 425 | } |
| 426 | drv_ctx->cpu = *cpu; |
| 427 | |
| 428 | /* Find response FQ hooked with this CPU */ |
| 429 | drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu); |
| 430 | |
| 431 | /* Attach request FQ */ |
| 432 | drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc, |
| 433 | QMAN_INITFQ_FLAG_SCHED); |
Dan Carpenter | ffecb69 | 2019-03-28 17:36:01 +0300 | [diff] [blame^] | 434 | if (IS_ERR(drv_ctx->req_fq)) { |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 435 | dev_err(qidev, "create_caam_req_fq failed\n"); |
| 436 | dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL); |
| 437 | kfree(drv_ctx); |
| 438 | return ERR_PTR(-ENOMEM); |
| 439 | } |
| 440 | |
| 441 | drv_ctx->qidev = qidev; |
| 442 | return drv_ctx; |
| 443 | } |
| 444 | EXPORT_SYMBOL(caam_drv_ctx_init); |
| 445 | |
| 446 | void *qi_cache_alloc(gfp_t flags) |
| 447 | { |
| 448 | return kmem_cache_alloc(qi_cache, flags); |
| 449 | } |
| 450 | EXPORT_SYMBOL(qi_cache_alloc); |
| 451 | |
| 452 | void qi_cache_free(void *obj) |
| 453 | { |
| 454 | kmem_cache_free(qi_cache, obj); |
| 455 | } |
| 456 | EXPORT_SYMBOL(qi_cache_free); |
| 457 | |
| 458 | static int caam_qi_poll(struct napi_struct *napi, int budget) |
| 459 | { |
| 460 | struct caam_napi *np = container_of(napi, struct caam_napi, irqtask); |
| 461 | |
| 462 | int cleaned = qman_p_poll_dqrr(np->p, budget); |
| 463 | |
| 464 | if (cleaned < budget) { |
| 465 | napi_complete(napi); |
| 466 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); |
| 467 | } |
| 468 | |
| 469 | return cleaned; |
| 470 | } |
| 471 | |
| 472 | void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx) |
| 473 | { |
| 474 | if (IS_ERR_OR_NULL(drv_ctx)) |
| 475 | return; |
| 476 | |
| 477 | /* Remove request FQ */ |
| 478 | if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq)) |
| 479 | dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n"); |
| 480 | |
| 481 | dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a, |
| 482 | sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr), |
| 483 | DMA_BIDIRECTIONAL); |
| 484 | kfree(drv_ctx); |
| 485 | } |
| 486 | EXPORT_SYMBOL(caam_drv_ctx_rel); |
| 487 | |
Horia Geantă | 29e83c7 | 2018-10-08 14:09:37 +0300 | [diff] [blame] | 488 | void caam_qi_shutdown(struct device *qidev) |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 489 | { |
Horia Geantă | 29e83c7 | 2018-10-08 14:09:37 +0300 | [diff] [blame] | 490 | int i; |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 491 | struct caam_qi_priv *priv = dev_get_drvdata(qidev); |
| 492 | const cpumask_t *cpus = qman_affine_cpus(); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 493 | |
| 494 | for_each_cpu(i, cpus) { |
| 495 | struct napi_struct *irqtask; |
| 496 | |
| 497 | irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask; |
| 498 | napi_disable(irqtask); |
| 499 | netif_napi_del(irqtask); |
| 500 | |
| 501 | if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i))) |
| 502 | dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 503 | } |
| 504 | |
Horia Geantă | 29e83c7 | 2018-10-08 14:09:37 +0300 | [diff] [blame] | 505 | qman_delete_cgr_safe(&priv->cgr); |
| 506 | qman_release_cgrid(priv->cgr.cgrid); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 507 | |
| 508 | kmem_cache_destroy(qi_cache); |
| 509 | |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 510 | platform_device_unregister(priv->qi_pdev); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 511 | } |
| 512 | |
| 513 | static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) |
| 514 | { |
| 515 | caam_congested = congested; |
| 516 | |
| 517 | if (congested) { |
| 518 | #ifdef CONFIG_DEBUG_FS |
| 519 | times_congested++; |
| 520 | #endif |
| 521 | pr_debug_ratelimited("CAAM entered congestion\n"); |
| 522 | |
| 523 | } else { |
| 524 | pr_debug_ratelimited("CAAM exited congestion\n"); |
| 525 | } |
| 526 | } |
| 527 | |
| 528 | static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np) |
| 529 | { |
| 530 | /* |
| 531 | * In case of threaded ISR, for RT kernels in_irq() does not return |
| 532 | * appropriate value, so use in_serving_softirq to distinguish between |
| 533 | * softirq and irq contexts. |
| 534 | */ |
| 535 | if (unlikely(in_irq() || !in_serving_softirq())) { |
| 536 | /* Disable QMan IRQ source and invoke NAPI */ |
| 537 | qman_p_irqsource_remove(p, QM_PIRQ_DQRI); |
| 538 | np->p = p; |
| 539 | napi_schedule(&np->irqtask); |
| 540 | return 1; |
| 541 | } |
| 542 | return 0; |
| 543 | } |
| 544 | |
| 545 | static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, |
| 546 | struct qman_fq *rsp_fq, |
| 547 | const struct qm_dqrr_entry *dqrr) |
| 548 | { |
| 549 | struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi); |
| 550 | struct caam_drv_req *drv_req; |
| 551 | const struct qm_fd *fd; |
| 552 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); |
| 553 | u32 status; |
| 554 | |
| 555 | if (caam_qi_napi_schedule(p, caam_napi)) |
| 556 | return qman_cb_dqrr_stop; |
| 557 | |
| 558 | fd = &dqrr->fd; |
| 559 | status = be32_to_cpu(fd->status); |
Horia Geantă | cb3078f | 2018-01-29 10:38:35 +0200 | [diff] [blame] | 560 | if (unlikely(status)) { |
| 561 | u32 ssrc = status & JRSTA_SSRC_MASK; |
| 562 | u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; |
| 563 | |
| 564 | if (ssrc != JRSTA_SSRC_CCB_ERROR || |
| 565 | err_id != JRSTA_CCBERR_ERRID_ICVCHK) |
| 566 | dev_err(qidev, "Error: %#x in CAAM response FD\n", |
| 567 | status); |
| 568 | } |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 569 | |
| 570 | if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) { |
| 571 | dev_err(qidev, "Non-compound FD from CAAM\n"); |
| 572 | return qman_cb_dqrr_consume; |
| 573 | } |
| 574 | |
| 575 | drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); |
| 576 | if (unlikely(!drv_req)) { |
| 577 | dev_err(qidev, |
| 578 | "Can't find original request for caam response\n"); |
| 579 | return qman_cb_dqrr_consume; |
| 580 | } |
| 581 | |
| 582 | dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), |
| 583 | sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); |
| 584 | |
| 585 | drv_req->cbk(drv_req, status); |
| 586 | return qman_cb_dqrr_consume; |
| 587 | } |
| 588 | |
| 589 | static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu) |
| 590 | { |
| 591 | struct qm_mcc_initfq opts; |
| 592 | struct qman_fq *fq; |
| 593 | int ret; |
| 594 | |
| 595 | fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA); |
| 596 | if (!fq) |
| 597 | return -ENOMEM; |
| 598 | |
| 599 | fq->cb.dqrr = caam_rsp_fq_dqrr_cb; |
| 600 | |
| 601 | ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE | |
| 602 | QMAN_FQ_FLAG_DYNAMIC_FQID, fq); |
| 603 | if (ret) { |
| 604 | dev_err(qidev, "Rsp FQ create failed\n"); |
| 605 | kfree(fq); |
| 606 | return -ENODEV; |
| 607 | } |
| 608 | |
| 609 | memset(&opts, 0, sizeof(opts)); |
| 610 | opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | |
| 611 | QM_INITFQ_WE_CONTEXTB | |
| 612 | QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); |
| 613 | opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING | |
| 614 | QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); |
| 615 | qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3); |
| 616 | opts.fqd.cgid = qipriv.cgr.cgrid; |
| 617 | opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX | |
| 618 | QM_STASHING_EXCL_DATA; |
| 619 | qm_fqd_set_stashing(&opts.fqd, 0, 1, 1); |
| 620 | |
| 621 | ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); |
| 622 | if (ret) { |
| 623 | dev_err(qidev, "Rsp FQ init failed\n"); |
| 624 | kfree(fq); |
| 625 | return -ENODEV; |
| 626 | } |
| 627 | |
| 628 | per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq; |
| 629 | |
Horia Geantă | c7a91eb | 2017-07-10 08:40:35 +0300 | [diff] [blame] | 630 | dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 631 | return 0; |
| 632 | } |
| 633 | |
| 634 | static int init_cgr(struct device *qidev) |
| 635 | { |
| 636 | int ret; |
| 637 | struct qm_mcc_initcgr opts; |
Horia Geantă | d9c3577 | 2018-05-23 14:32:42 +0300 | [diff] [blame] | 638 | const u64 val = (u64)cpumask_weight(qman_affine_cpus()) * |
| 639 | MAX_RSP_FQ_BACKLOG_PER_CPU; |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 640 | |
| 641 | ret = qman_alloc_cgrid(&qipriv.cgr.cgrid); |
| 642 | if (ret) { |
| 643 | dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret); |
| 644 | return ret; |
| 645 | } |
| 646 | |
| 647 | qipriv.cgr.cb = cgr_cb; |
| 648 | memset(&opts, 0, sizeof(opts)); |
| 649 | opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | |
| 650 | QM_CGR_WE_MODE); |
| 651 | opts.cgr.cscn_en = QM_CGR_EN; |
| 652 | opts.cgr.mode = QMAN_CGR_MODE_FRAME; |
| 653 | qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1); |
| 654 | |
| 655 | ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts); |
| 656 | if (ret) { |
| 657 | dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret, |
| 658 | qipriv.cgr.cgrid); |
| 659 | return ret; |
| 660 | } |
| 661 | |
Horia Geantă | c7a91eb | 2017-07-10 08:40:35 +0300 | [diff] [blame] | 662 | dev_dbg(qidev, "Congestion threshold set to %llu\n", val); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 663 | return 0; |
| 664 | } |
| 665 | |
| 666 | static int alloc_rsp_fqs(struct device *qidev) |
| 667 | { |
| 668 | int ret, i; |
| 669 | const cpumask_t *cpus = qman_affine_cpus(); |
| 670 | |
| 671 | /*Now create response FQs*/ |
| 672 | for_each_cpu(i, cpus) { |
| 673 | ret = alloc_rsp_fq_cpu(qidev, i); |
| 674 | if (ret) { |
| 675 | dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i); |
| 676 | return ret; |
| 677 | } |
| 678 | } |
| 679 | |
| 680 | return 0; |
| 681 | } |
| 682 | |
| 683 | static void free_rsp_fqs(void) |
| 684 | { |
| 685 | int i; |
| 686 | const cpumask_t *cpus = qman_affine_cpus(); |
| 687 | |
| 688 | for_each_cpu(i, cpus) |
| 689 | kfree(per_cpu(pcpu_qipriv.rsp_fq, i)); |
| 690 | } |
| 691 | |
| 692 | int caam_qi_init(struct platform_device *caam_pdev) |
| 693 | { |
| 694 | int err, i; |
| 695 | struct platform_device *qi_pdev; |
| 696 | struct device *ctrldev = &caam_pdev->dev, *qidev; |
| 697 | struct caam_drv_private *ctrlpriv; |
| 698 | const cpumask_t *cpus = qman_affine_cpus(); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 699 | static struct platform_device_info qi_pdev_info = { |
| 700 | .name = "caam_qi", |
| 701 | .id = PLATFORM_DEVID_NONE |
| 702 | }; |
| 703 | |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 704 | qi_pdev_info.parent = ctrldev; |
| 705 | qi_pdev_info.dma_mask = dma_get_mask(ctrldev); |
| 706 | qi_pdev = platform_device_register_full(&qi_pdev_info); |
| 707 | if (IS_ERR(qi_pdev)) |
| 708 | return PTR_ERR(qi_pdev); |
Horia Geantă | 5747ff3 | 2017-07-10 08:40:33 +0300 | [diff] [blame] | 709 | set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev)); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 710 | |
| 711 | ctrlpriv = dev_get_drvdata(ctrldev); |
| 712 | qidev = &qi_pdev->dev; |
| 713 | |
| 714 | qipriv.qi_pdev = qi_pdev; |
| 715 | dev_set_drvdata(qidev, &qipriv); |
| 716 | |
| 717 | /* Initialize the congestion detection */ |
| 718 | err = init_cgr(qidev); |
| 719 | if (err) { |
| 720 | dev_err(qidev, "CGR initialization failed: %d\n", err); |
| 721 | platform_device_unregister(qi_pdev); |
| 722 | return err; |
| 723 | } |
| 724 | |
| 725 | /* Initialise response FQs */ |
| 726 | err = alloc_rsp_fqs(qidev); |
| 727 | if (err) { |
| 728 | dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); |
| 729 | free_rsp_fqs(); |
| 730 | platform_device_unregister(qi_pdev); |
| 731 | return err; |
| 732 | } |
| 733 | |
| 734 | /* |
| 735 | * Enable the NAPI contexts on each of the core which has an affine |
| 736 | * portal. |
| 737 | */ |
| 738 | for_each_cpu(i, cpus) { |
| 739 | struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i); |
| 740 | struct caam_napi *caam_napi = &priv->caam_napi; |
| 741 | struct napi_struct *irqtask = &caam_napi->irqtask; |
| 742 | struct net_device *net_dev = &priv->net_dev; |
| 743 | |
| 744 | net_dev->dev = *qidev; |
| 745 | INIT_LIST_HEAD(&net_dev->napi_list); |
| 746 | |
| 747 | netif_napi_add(net_dev, irqtask, caam_qi_poll, |
| 748 | CAAM_NAPI_WEIGHT); |
| 749 | |
| 750 | napi_enable(irqtask); |
| 751 | } |
| 752 | |
| 753 | /* Hook up QI device to parent controlling caam device */ |
| 754 | ctrlpriv->qidev = qidev; |
| 755 | |
| 756 | qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, |
| 757 | SLAB_CACHE_DMA, NULL); |
| 758 | if (!qi_cache) { |
| 759 | dev_err(qidev, "Can't allocate CAAM cache\n"); |
| 760 | free_rsp_fqs(); |
| 761 | platform_device_unregister(qi_pdev); |
Wei Yongjun | 7e207d8 | 2017-04-11 16:04:09 +0000 | [diff] [blame] | 762 | return -ENOMEM; |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 763 | } |
| 764 | |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 765 | #ifdef CONFIG_DEBUG_FS |
Fabio Estevam | a92f7af | 2017-08-01 10:45:01 -0300 | [diff] [blame] | 766 | debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, |
| 767 | ×_congested, &caam_fops_u64_ro); |
Horia Geantă | 67c2315d | 2017-03-17 12:06:01 +0200 | [diff] [blame] | 768 | #endif |
| 769 | dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); |
| 770 | return 0; |
| 771 | } |